From 1c649ec871947345d2099636c20f850e9b737321 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:04:46 -0700 Subject: [PATCH 001/239] Bump version in preparation for new changes. --- doc/src/release_notes.rst | 13 +++++++++++++ src/oracledb/version.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2193817a..e337300d 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,6 +11,19 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. +oracledb 3.1.0 (TBD) +-------------------- + +Thin Mode Changes ++++++++++++++++++ + +Thick Mode Changes +++++++++++++++++++ + +Common Changes +++++++++++++++ + + oracledb 3.0.0 (March 2025) --------------------------- diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 755262da..34b0487b 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.0.0" +__version__ = "3.1.0b1" From e001f665cbb0ed1e9d9b9fdf5b3d670151d31d6b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:05:30 -0700 Subject: [PATCH 002/239] Added support for AQ using asyncio. --- doc/src/api_manual/async_aq.rst | 105 +++++++ doc/src/api_manual/async_connection.rst | 26 ++ doc/src/index.rst | 1 + doc/src/release_notes.rst | 3 + doc/src/user_guide/aq.rst | 34 +- samples/multi_consumer_aq_async.py | 96 ++++++ samples/object_aq_async.py | 101 ++++++ samples/raw_aq_async.py | 86 +++++ src/oracledb/aq.py | 130 ++++---- src/oracledb/connection.py | 164 +++++----- src/oracledb/impl/thin/connection.pyx | 18 +- src/oracledb/impl/thin/messages.pyx | 4 +- src/oracledb/impl/thin/queue.pyx | 95 ++++-- tests/test_7900_aq_raw_async.py | 398 ++++++++++++++++++++++++ tests/test_env.py | 18 ++ utils/templates/connection.py | 164 +++++----- 16 files changed, 1190 insertions(+), 253 deletions(-) create mode 100644 doc/src/api_manual/async_aq.rst create mode 100644 samples/multi_consumer_aq_async.py create mode 100644 samples/object_aq_async.py create mode 100644 samples/raw_aq_async.py create mode 100644 tests/test_7900_aq_raw_async.py diff --git a/doc/src/api_manual/async_aq.rst b/doc/src/api_manual/async_aq.rst new file mode 100644 index 00000000..bf9c7458 --- /dev/null +++ b/doc/src/api_manual/async_aq.rst @@ -0,0 +1,105 @@ +.. _asyncaq: + +******************************** +API: Async Advanced Queuing (AQ) +******************************** + +See :ref:`aqusermanual` for more information about using AQ in python-oracledb. + +.. versionadded:: 3.1.0 + +.. note:: + + AsyncQueue objects are only supported in python-oracledb Thin mode. + +.. _asyncqueue: + +AsyncQueue Objects +================== + +These objects are created using the :meth:`AsyncConnection.queue()` method and +are used to enqueue and dequeue messages. + +AsyncQueue Methods +------------------ + +.. method:: AsyncQueue.deqone() + + Dequeues at most one message from the queue. If a message is dequeued, it + will be a :ref:`message property ` object; otherwise, + the value *None* will be returned. + +.. method:: AsyncQueue.enqone(message) + + Enqueues a single message into the queue. The message must be a + :ref:`message property ` object which has had its + payload attribute set to a value that the queue supports. + +AsyncQueue Attributes +--------------------- + +.. attribute:: AsyncQueue.connection + + This read-only attribute returns a reference to the connection object on + which the queue was created. + +.. attribute:: AsyncQueue.deqoptions + + This read-only attribute returns a reference to the :ref:`options + ` that will be used when dequeuing messages from the queue. + +.. attribute:: AsyncQueue.enqoptions + + This read-only attribute returns a reference to the :ref:`options + ` that will be used when enqueuing messages into the queue. + +.. attribute:: AsyncQueue.name + + This read-only attribute returns the name of the queue. + +.. attribute:: AsyncQueue.payload_type + + This read-only attribute returns the object type for payloads that can be + enqueued and dequeued. If using a JSON queue, this returns the value + ``"JSON"``. If using a raw queue, this returns the value *None*. + +.. _asyncdeqoptions: + +Dequeue Options +=============== + +.. note:: + + These objects are used to configure how messages are dequeued from queues. + An instance of this object is found in the attribute + :attr:`AsyncQueue.deqoptions`. + +See :ref:`deqoptions` for information on the supported attributes. + +.. _asyncenqoptions: + +Enqueue Options +=============== + +.. note:: + + These objects are used to configure how messages are enqueued into queues. + An instance of this object is found in the attribute + :attr:`AsyncQueue.enqoptions`. + +See :ref:`enqoptions` for information on the supported attributes. + +.. _asyncmsgproperties: + +Message Properties +================== + +.. note:: + + These objects are used to identify the properties of messages that are + enqueued and dequeued in queues. They are created by the method + :meth:`AsyncConnection.msgproperties()`. They are used by the method + :meth:`AsyncQueue.enqone()` and returned by the method + :meth:`AsyncQueue.deqone()`. + +See :ref:`msgproperties` for information on the supported attributes. diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index f08e31da..913aa2a1 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -253,10 +253,36 @@ AsyncConnection Methods use :meth:`AsyncConnection.ping()` which performs a :ref:`round-trip ` to the database. +.. method:: AsyncConnection.msgproperties(payload, correlation, delay, exceptionq, expiration, priority) + + Creates and returns a message properties object that contains the + properties of messages used in advanced queuing. See + :ref:`asyncmsgproperties` for more information. + + Each of the parameters are optional. If specified, they act as a shortcut + for setting each of the equivalently named properties. + + .. versionadded:: 3.1.0 + .. method:: AsyncConnection.ping() Pings the database to verify if the connection is valid. +.. method:: AsyncConnection.queue(name, payload_type=None) + + Creates a :ref:`queue ` which is used to enqueue and dequeue + messages in Advanced Queuing. + + The ``name`` parameter is expected to be a string identifying the queue in + which messages are to be enqueued or dequeued. + + The ``payload_type`` parameter, if specified, is expected to be an + :ref:`object type ` that identifies the type of payload the + queue expects. If the string "JSON" is specified, JSON data is enqueued and + dequeued. If not specified, RAW data is enqueued and dequeued. + + .. versionadded:: 3.1.0 + .. method:: AsyncConnection.rollback() Rolls back any pending transaction. diff --git a/doc/src/index.rst b/doc/src/index.rst index cd2c9c28..97f535d4 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -74,6 +74,7 @@ API Manual api_manual/async_connection_pool.rst api_manual/async_cursor.rst api_manual/async_lob.rst + api_manual/async_aq.rst api_manual/pipeline.rst api_manual/deprecations.rst diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e337300d..9a252329 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,9 @@ oracledb 3.1.0 (TBD) Thin Mode Changes +++++++++++++++++ +#) Added Async :ref:`Oracle Advanced Queuing ` support for single + enqueue and dequeue of RAW and Oracle object payload types. + Thick Mode Changes ++++++++++++++++++ diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 5d06e7fc..03a5a916 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -15,7 +15,6 @@ receiving of various payloads, such as RAW values, JSON, JMS, and objects. Transactional Event Queues use a highly optimized implementation of Advanced Queuing. They were previously called AQ Sharded Queues. - Python-oracledb API calls are the same for Transactional Event Queues and Classic Queues, however there are differences in support for some payload types. @@ -113,14 +112,15 @@ queue can then be used for enqueuing, dequeuing, or for both. **Enqueuing RAW Payloads** You can connect to the database and get the queue that was created with RAW -payload type by using: +payload type by using :meth:`Connection.queue()` or +:meth:`AsyncConnection.queue()`. For example: .. code-block:: python queue = connection.queue("DEMO_RAW_QUEUE") -Now messages can be queued using :meth:`~Queue.enqone()`. To send three -messages: +Now messages can be queued using :meth:`Queue.enqone()` or +:meth:`AsyncQueue.enqone()`. To send three messages: .. code-block:: python @@ -137,9 +137,9 @@ Since the queue is a RAW queue, strings are internally encoded to bytes using `encode() `__ before being enqueued. -The use of :meth:`~Connection.commit()` allows messages to be sent only when -any database transaction related to them is committed. This default behavior -can be altered, see :ref:`aqoptions`. +The use of :meth:`Connection.commit()` or :meth:`AsyncConnection.commit()` +allows messages to be sent only when any database transaction related to them +is committed. This default behavior can be altered, see :ref:`aqoptions`. **Enqueuing JSON Payloads** @@ -173,13 +173,15 @@ Now the message can be enqueued using :meth:`~Queue.enqone()`. Dequeuing Messages ================== -Dequeuing is performed similarly. To dequeue a message call the method -:meth:`~Queue.deqone()` as shown in the examples below. This returns a +Dequeuing is performed similarly as shown in the examples below. This returns a :ref:`MessageProperties ` object containing the message payload and related attributes. **Dequeuing RAW Payloads** +To dequeue a message, call the method :meth:`Queue.deqone()` or +:meth:`AsyncQueue.deqone()`. For example: + .. code-block:: python queue = connection.queue("DEMO_RAW_QUEUE") @@ -191,9 +193,9 @@ Note that if the message is expected to be a string, the bytes must be decoded by the application using `decode() `__, as shown. -If there are no messages in the queue, :meth:`~Queue.deqone()` will wait for -one to be enqueued. This default behavior can be altered, see -:ref:`aqoptions`. +If there are no messages in the queue, :meth:`Queue.deqone()` or +:meth:`AsyncQueue.deqone()` will wait for one to be enqueued. This default +behavior can be altered, see :ref:`aqoptions`. Various :ref:`message properties ` can be accessed. For example to show the :attr:`~MessageProperties.msgid` of a dequeued message: @@ -204,6 +206,8 @@ to show the :attr:`~MessageProperties.msgid` of a dequeued message: **Dequeuing JSON Payloads** +To dequeue a message, call the method :meth:`Queue.deqone()`, for example: + .. code-block:: python queue = connection.queue("DEMO_JSON_QUEUE", "JSON") @@ -236,7 +240,8 @@ And a queue that accepts this type: end; / -You can enqueue messages: +You can enqueue messages using :meth:`Queue.enqone()` or +:meth:`AsyncQueue.enqone()`, for example: .. code-block:: python @@ -251,7 +256,8 @@ You can enqueue messages: queue.enqone(connection.msgproperties(payload=book)) connection.commit() -Dequeuing can be done like this: +Dequeuing can be done with :meth:`Queue.deqone()` or +:meth:`AsyncQueue.deqone()` like this: .. code-block:: python diff --git a/samples/multi_consumer_aq_async.py b/samples/multi_consumer_aq_async.py new file mode 100644 index 00000000..12c405e9 --- /dev/null +++ b/samples/multi_consumer_aq_async.py @@ -0,0 +1,96 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. +# +# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, +# Canada. All rights reserved. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# multi_consumer_aq.py +# +# Demonstrates how to use multi-consumer advanced queuing. It makes use of a +# RAW queue created in the sample setup. +# ----------------------------------------------------------------------------- + +import asyncio + +import oracledb +import sample_env + +QUEUE_NAME = "DEMO_RAW_QUEUE_MULTI" +PAYLOAD_DATA = [ + "The first message", + "The second message", + "The third message", + "The fourth and final message", +] + + +async def main(): + + # connect to database + connection = await oracledb.connect_async( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + ) + + # create a queue + queue = connection.queue(QUEUE_NAME) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + + # enqueue a few messages + print("Enqueuing messages...") + for data in PAYLOAD_DATA: + print(data) + await queue.enqone(connection.msgproperties(payload=data)) + await connection.commit() + print() + + # dequeue the messages for consumer A + print("Dequeuing the messages for consumer A...") + queue.deqoptions.consumername = "SUBSCRIBER_A" + while True: + props = await queue.deqone() + if not props: + break + print(props.payload.decode()) + await connection.commit() + print() + + # dequeue the message for consumer B + print("Dequeuing the messages for consumer B...") + queue.deqoptions.consumername = "SUBSCRIBER_B" + while True: + props = await queue.deqone() + if not props: + break + print(props.payload.decode()) + await connection.commit() + print("\nDone.") + + +asyncio.run(main()) diff --git a/samples/object_aq_async.py b/samples/object_aq_async.py new file mode 100644 index 00000000..1dda7de0 --- /dev/null +++ b/samples/object_aq_async.py @@ -0,0 +1,101 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. +# +# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, +# Canada. All rights reserved. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# object_aq.py +# +# Demonstrates how to use advanced queuing with objects. It makes use of a +# simple type and queue created in the sample setup. +# ----------------------------------------------------------------------------- + +import asyncio +import decimal + +import oracledb +import sample_env + +BOOK_TYPE_NAME = "UDT_BOOK" +QUEUE_NAME = "DEMO_BOOK_QUEUE" +BOOK_DATA = [ + ( + "The Fellowship of the Ring", + "Tolkien, J.R.R.", + decimal.Decimal("10.99"), + ), + ( + "Harry Potter and the Philosopher's Stone", + "Rowling, J.K.", + decimal.Decimal("7.99"), + ), +] + + +async def main(): + + # connect to database + connection = await oracledb.connect_async( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + ) + + # create a queue + books_type = await connection.gettype(BOOK_TYPE_NAME) + queue = connection.queue(QUEUE_NAME, payload_type=books_type) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + + # dequeue all existing messages to ensure the queue is empty, just so that + # the results are consistent + while await queue.deqone(): + pass + + # enqueue a few messages + print("Enqueuing messages...") + for title, authors, price in BOOK_DATA: + book = books_type.newobject() + book.TITLE = title + book.AUTHORS = authors + book.PRICE = price + print(title) + await queue.enqone(connection.msgproperties(payload=book)) + await connection.commit() + + # dequeue the messages + print("\nDequeuing messages...") + while True: + props = await queue.deqone() + if not props: + break + print(props.payload.TITLE) + await connection.commit() + print("\nDone.") + + +asyncio.run(main()) diff --git a/samples/raw_aq_async.py b/samples/raw_aq_async.py new file mode 100644 index 00000000..8d25b614 --- /dev/null +++ b/samples/raw_aq_async.py @@ -0,0 +1,86 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. +# +# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, +# Canada. All rights reserved. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# raw_aq.py +# +# Demonstrates how to use advanced queuing with RAW data. It makes use of a +# RAW queue created in the sample setup. +# ----------------------------------------------------------------------------- + +import asyncio + +import oracledb +import sample_env + +QUEUE_NAME = "DEMO_RAW_QUEUE" +PAYLOAD_DATA = [ + "The first message", + "The second message", + "The third message", + "The fourth and final message", +] + + +async def main(): + connection = await oracledb.connect_async( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + ) + + # create a queue + queue = connection.queue(QUEUE_NAME) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + + # dequeue all existing messages to ensure the queue is empty, just so + # that the results are consistent + while await queue.deqone(): + pass + + # enqueue a few messages + print("Enqueuing messages...") + for data in PAYLOAD_DATA: + print(data) + await queue.enqone(connection.msgproperties(payload=data)) + await connection.commit() + + # dequeue the messages + print("\nDequeuing messages...") + while True: + props = await queue.deqone() + if not props: + break + print(props.payload.decode()) + await connection.commit() + print("\nDone.") + + +asyncio.run(main()) diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index 9aa4f384..7144dab6 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2023, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -37,7 +37,7 @@ from .dbobject import DbObject, DbObjectType -class Queue: +class BaseQueue: @classmethod def _from_impl(cls, connection, impl): queue = cls.__new__(cls) @@ -64,6 +64,68 @@ def connection(self) -> "connection_module.Connection": """ return self._connection + @property + def deqoptions(self) -> "DeqOptions": + """ + Returns the options that will be used when dequeuing messages from the + queue. + """ + return self._deq_options + + @property + def deqOptions(self) -> "DeqOptions": + """ + Deprecated: use deqoptions instead. + """ + return self.deqoptions + + @property + def enqoptions(self) -> "EnqOptions": + """ + Returns the options that will be used when enqueuing messages into the + queue. + """ + return self._enq_options + + @property + def enqOptions(self) -> "EnqOptions": + """ + Deprecated: use enqoptions() instead. + """ + return self.enqoptions + + @property + def name(self) -> str: + """ + Returns the name of the queue. + """ + return self._impl.name + + @property + def payload_type(self) -> Union[DbObjectType, None]: + """ + Returns the object type for payloads that can be enqueued and dequeued. + If using a raw queue, this returns the value None. + """ + if self._payload_type is None: + if self._impl.is_json: + self._payload_type = "JSON" + elif self._impl.payload_type is not None: + self._payload_type = DbObjectType._from_impl( + self._impl.payload_type + ) + return self._payload_type + + @property + def payloadType(self) -> Union[DbObjectType, None]: + """ + Deprecated: use payload_type instead. + """ + return self.payload_type + + +class Queue(BaseQueue): + def deqmany(self, max_num_messages: int) -> list: """ Dequeues up to the specified number of messages from the queue and @@ -93,21 +155,6 @@ def deqOne(self) -> Union["MessageProperties", None]: """ return self.deqone() - @property - def deqoptions(self) -> "DeqOptions": - """ - Returns the options that will be used when dequeuing messages from the - queue. - """ - return self._deq_options - - @property - def deqOptions(self) -> "DeqOptions": - """ - Deprecated: use deqoptions instead. - """ - return self.deqoptions - def enqmany(self, messages: list) -> None: """ Enqueues multiple messages into the queue. The messages parameter must @@ -146,49 +193,26 @@ def enqOne(self, message: "MessageProperties") -> None: """ return self.enqone(message) - @property - def enqoptions(self) -> "EnqOptions": - """ - Returns the options that will be used when enqueuing messages into the - queue. - """ - return self._enq_options - - @property - def enqOptions(self) -> "EnqOptions": - """ - Deprecated: use enqoptions() instead. - """ - return self.enqoptions - @property - def name(self) -> str: - """ - Returns the name of the queue. - """ - return self._impl.name +class AsyncQueue(BaseQueue): - @property - def payload_type(self) -> Union[DbObjectType, None]: + async def deqone(self) -> Union["MessageProperties", None]: """ - Returns the object type for payloads that can be enqueued and dequeued. - If using a raw queue, this returns the value None. + Dequeues at most one message from the queue and returns it. If no + message is dequeued, None is returned. """ - if self._payload_type is None: - if self._impl.is_json: - self._payload_type = "JSON" - elif self._impl.payload_type is not None: - self._payload_type = DbObjectType._from_impl( - self._impl.payload_type - ) - return self._payload_type + message_impl = await self._impl.deq_one() + if message_impl is not None: + return MessageProperties._from_impl(message_impl) - @property - def payloadType(self) -> Union[DbObjectType, None]: + async def enqone(self, message: "MessageProperties") -> None: """ - Deprecated: use payload_type instead. + Enqueues a single message into the queue. The message must be a message + property object which has had its payload attribute set to a value that + the queue supports. """ - return self.payload_type + self._verify_message(message) + await self._impl.enq_one(message._impl) class DeqOptions: diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 06b2b49a..e7c1c635 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -44,7 +44,7 @@ from . import base_impl, constants, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module -from .aq import Queue, MessageProperties +from .aq import AsyncQueue, Queue, MessageProperties from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor @@ -381,6 +381,80 @@ def module(self, value: str) -> None: self._verify_connected() self._impl.set_module(value) + def msgproperties( + self, + payload: Optional[Union[bytes, str, DbObject]] = None, + correlation: Optional[str] = None, + delay: Optional[int] = None, + exceptionq: Optional[str] = None, + expiration: Optional[int] = None, + priority: Optional[int] = None, + recipients: Optional[list] = None, + ) -> MessageProperties: + """ + Create and return a message properties object. If the parameters are + not None, they act as a shortcut for setting each of the equivalently + named properties. + """ + impl = self._impl.create_msg_props_impl() + props = MessageProperties._from_impl(impl) + if payload is not None: + props.payload = payload + if correlation is not None: + props.correlation = correlation + if delay is not None: + props.delay = delay + if exceptionq is not None: + props.exceptionq = exceptionq + if expiration is not None: + props.expiration = expiration + if priority is not None: + props.priority = priority + if recipients is not None: + props.recipients = recipients + return props + + def queue( + self, + name: str, + payload_type: Optional[Union[DbObjectType, str]] = None, + *, + payloadType: Optional[DbObjectType] = None, + ) -> Queue: + """ + Creates and returns a queue which is used to enqueue and dequeue + messages in Advanced Queueing (AQ). + + The name parameter is expected to be a string identifying the queue in + which messages are to be enqueued or dequeued. + + The payload_type parameter, if specified, is expected to be an + object type that identifies the type of payload the queue expects. + If the string "JSON" is specified, JSON data is enqueued and dequeued. + If not specified, RAW data is enqueued and dequeued. + """ + self._verify_connected() + payload_type_impl = None + is_json = False + if payloadType is not None: + if payload_type is not None: + errors._raise_err( + errors.ERR_DUPLICATED_PARAMETER, + deprecated_name="payloadType", + new_name="payload_type", + ) + payload_type = payloadType + if payload_type is not None: + if payload_type == "JSON": + is_json = True + elif not isinstance(payload_type, DbObjectType): + raise TypeError("expecting DbObjectType") + else: + payload_type_impl = payload_type._impl + impl = self._impl.create_queue_impl() + impl.initialize(self._impl, name, payload_type_impl, is_json) + return self._create_queue(impl) + @property def outputtypehandler(self) -> Callable: """ @@ -602,6 +676,13 @@ def __exit__(self, exc_type, exc_value, exc_tb): self._impl.close(in_del=True) self._impl = None + def _create_queue(self, impl): + """ + Returns a queue object that the user can use to dequeue and enqueue + messages. + """ + return Queue._from_impl(self, impl) + def _get_oci_attr( self, handle_type: int, attr_num: int, attr_type: int ) -> Any: @@ -786,39 +867,6 @@ def maxBytesPerCharacter(self) -> int: """ return 4 - def msgproperties( - self, - payload: Optional[Union[bytes, str, DbObject]] = None, - correlation: Optional[str] = None, - delay: Optional[int] = None, - exceptionq: Optional[str] = None, - expiration: Optional[int] = None, - priority: Optional[int] = None, - recipients: Optional[list] = None, - ) -> MessageProperties: - """ - Create and return a message properties object. If the parameters are - not None, they act as a shortcut for setting each of the equivalently - named properties. - """ - impl = self._impl.create_msg_props_impl() - props = MessageProperties._from_impl(impl) - if payload is not None: - props.payload = payload - if correlation is not None: - props.correlation = correlation - if delay is not None: - props.delay = delay - if exceptionq is not None: - props.exceptionq = exceptionq - if expiration is not None: - props.expiration = expiration - if priority is not None: - props.priority = priority - if recipients is not None: - props.recipients = recipients - return props - def ping(self) -> None: """ Pings the database to verify the connection is valid. @@ -840,47 +888,6 @@ def proxy_user(self) -> Union[str, None]: self._verify_connected() return self._impl.proxy_user - def queue( - self, - name: str, - payload_type: Optional[Union[DbObjectType, str]] = None, - *, - payloadType: Optional[DbObjectType] = None, - ) -> Queue: - """ - Creates and returns a queue which is used to enqueue and dequeue - messages in Advanced Queueing (AQ). - - The name parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. - - The payload_type parameter, if specified, is expected to be an - object type that identifies the type of payload the queue expects. - If the string "JSON" is specified, JSON data is enqueued and dequeued. - If not specified, RAW data is enqueued and dequeued. - """ - self._verify_connected() - payload_type_impl = None - is_json = False - if payloadType is not None: - if payload_type is not None: - errors._raise_err( - errors.ERR_DUPLICATED_PARAMETER, - deprecated_name="payloadType", - new_name="payload_type", - ) - payload_type = payloadType - if payload_type is not None: - if payload_type == "JSON": - is_json = True - elif not isinstance(payload_type, DbObjectType): - raise TypeError("expecting DbObjectType") - else: - payload_type_impl = payload_type._impl - impl = self._impl.create_queue_impl() - impl.initialize(self._impl, name, payload_type_impl, is_json) - return Queue._from_impl(self, impl) - def rollback(self) -> None: """ Rolls back any pending transactions. @@ -1638,6 +1645,13 @@ async def _connect(self, dsn, pool, params, kwargs): return self + def _create_queue(self, impl): + """ + Returns a queue object that the user can use to dequeue and enqueue + messages. + """ + return AsyncQueue._from_impl(self, impl) + def _verify_can_execute( self, parameters: Any, keyword_parameters: Any ) -> Any: diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index 90f10306..4c905220 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -206,6 +206,12 @@ cdef class BaseThinConnImpl(BaseConnImpl): def cancel(self): self._protocol._break_external() + def create_msg_props_impl(self): + cdef ThinMsgPropsImpl impl + impl = ThinMsgPropsImpl() + impl._conn_impl = self + return impl + def get_call_timeout(self): return self._call_timeout @@ -438,12 +444,6 @@ cdef class ThinConnImpl(BaseThinConnImpl): self._force_close() raise - def create_msg_props_impl(self): - cdef ThinMsgPropsImpl impl - impl = ThinMsgPropsImpl() - impl._conn_impl = self - return impl - def create_queue_impl(self): return ThinQueueImpl.__new__(ThinQueueImpl) @@ -960,6 +960,12 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): self._force_close() raise + def create_queue_impl(self): + """ + Create and return the implementation object to use for AQ queuing. + """ + return AsyncThinQueueImpl.__new__(AsyncThinQueueImpl) + async def create_temp_lob_impl(self, DbType dbtype): cdef AsyncThinLobImpl lob_impl = self._create_lob_impl(dbtype) await lob_impl.create_temp() diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index 2a31e50c..da5fe7e7 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -2347,7 +2347,7 @@ cdef class FetchMessage(MessageWithData): @cython.final cdef class DeqMessage(Message): cdef: - ThinQueueImpl queue_impl + BaseThinQueueImpl queue_impl ThinDeqOptionsImpl deq_options_impl ThinMsgPropsImpl props_impl bint no_msg_found @@ -2566,7 +2566,7 @@ cdef class DeqMessage(Message): @cython.final cdef class EnqMessage(Message): cdef: - ThinQueueImpl queue_impl + BaseThinQueueImpl queue_impl ThinEnqOptionsImpl enq_options_impl ThinMsgPropsImpl props_impl diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 552d9953..59790f9e 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -29,13 +29,28 @@ # thin_impl.pyx). #------------------------------------------------------------------------------ -cdef class ThinQueueImpl(BaseQueueImpl): +cdef class BaseThinQueueImpl(BaseQueueImpl): cdef: - ThinConnImpl _conn_impl + BaseThinConnImpl _conn_impl bytes payload_toid - cdef Message _create_enq_message(self, ThinMsgPropsImpl props_impl): + cdef DeqMessage _create_deq_message(self): + """ + Create the message for dequeuing a payload. + """ + cdef: + ThinMsgPropsImpl props_impl + DeqMessage message + props_impl = ThinMsgPropsImpl() + props_impl._initialize(self) + message = self._conn_impl._create_message(DeqMessage) + message.queue_impl = self + message.deq_options_impl = self.deq_options_impl + message.props_impl = props_impl + return message + + cdef EnqMessage _create_enq_message(self, ThinMsgPropsImpl props_impl): """ Create the message for enqueuing the provided payload. """ @@ -46,6 +61,27 @@ cdef class ThinQueueImpl(BaseQueueImpl): message.props_impl = props_impl return message + def initialize(self, BaseThinConnImpl conn_impl, str name, + ThinDbObjectTypeImpl payload_type, bint is_json): + """ + Internal method for initializing the queue. + """ + self._conn_impl = conn_impl + self.is_json = is_json + self.deq_options_impl = ThinDeqOptionsImpl() + self.enq_options_impl = ThinEnqOptionsImpl() + self.payload_type = payload_type + if self.is_json: + errors._raise_not_supported("JSON payload in AQ") + elif self.payload_type is not None: + self.payload_toid = payload_type.oid + else: + self.payload_toid = bytes([0]*15+[0x17]) + self.name = name + + +cdef class ThinQueueImpl(BaseThinQueueImpl): + def deq_one(self): """ Internal method for dequeuing a single message from a queue. @@ -53,13 +89,7 @@ cdef class ThinQueueImpl(BaseQueueImpl): cdef: Protocol protocol = self._conn_impl._protocol DeqMessage message - ThinMsgPropsImpl props_impl - props_impl = ThinMsgPropsImpl() - props_impl._initialize(self) - message = self._conn_impl._create_message(DeqMessage) - message.queue_impl = self - message.deq_options_impl = self.deq_options_impl - message.props_impl = props_impl + message = self._create_deq_message() protocol._process_single_message(message) if not message.no_msg_found: return message.props_impl @@ -70,27 +100,36 @@ cdef class ThinQueueImpl(BaseQueueImpl): """ cdef: Protocol protocol = self._conn_impl._protocol - Message message + EnqMessage message message = self._create_enq_message(props_impl) protocol._process_single_message(message) - def initialize(self, ThinConnImpl conn_impl, str name, - ThinDbObjectTypeImpl payload_type, bint is_json): + +cdef class AsyncThinQueueImpl(BaseThinQueueImpl): + + async def deq_one(self): """ - Internal method for initializing the queue. + Internal method for dequeuing a single message from a queue. """ - self._conn_impl = conn_impl - self.is_json = is_json - self.deq_options_impl = ThinDeqOptionsImpl() - self.enq_options_impl = ThinEnqOptionsImpl() - self.payload_type = payload_type - if self.is_json: - errors._raise_not_supported("JSON payload in AQ") - elif self.payload_type is not None: - self.payload_toid = payload_type.oid - else: - self.payload_toid = bytes([0]*15+[0x17]) - self.name = name + cdef: + BaseAsyncProtocol protocol + DeqMessage message + protocol = self._conn_impl._protocol + message = self._create_deq_message() + await protocol._process_single_message(message) + if not message.no_msg_found: + return message.props_impl + + async def enq_one(self, ThinMsgPropsImpl props_impl): + """ + Internal method for enqueuing a single message into a queue. + """ + cdef: + BaseAsyncProtocol protocol + EnqMessage message + protocol = self._conn_impl._protocol + message = self._create_enq_message(props_impl) + await protocol._process_single_message(message) cdef class ThinDeqOptionsImpl(BaseDeqOptionsImpl): @@ -286,7 +325,7 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): object payloadObject bytes toid int32_t version - ThinConnImpl _conn_impl + BaseThinConnImpl _conn_impl bytes enq_txn_id bytes sender_agent_name bytes sender_agent_address @@ -300,7 +339,7 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): self.version = 1 self.sender_agent_protocol = 0 - cdef int _initialize(self, ThinQueueImpl queue_impl) except -1: + cdef int _initialize(self, BaseThinQueueImpl queue_impl) except -1: """ Internal method to initialize the message properties. """ diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py new file mode 100644 index 00000000..941d7a78 --- /dev/null +++ b/tests/test_7900_aq_raw_async.py @@ -0,0 +1,398 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +7900 - Module for testing AQ with raw queues with asyncio +""" + +import unittest + +import oracledb +import test_env + + +@unittest.skipUnless( + test_env.get_is_thin(), "asyncio not supported in thick mode" +) +class TestCase(test_env.BaseAsyncTestCase): + raw_data = [ + b"sample raw data 1", + b"sample raw data 2", + b"sample raw data 3", + b"sample raw data 4", + b"sample raw data 5", + b"sample raw data 6", + ] + + def __verify_attr(self, obj, attrName, value): + setattr(obj, attrName, value) + self.assertEqual(getattr(obj, attrName), value) + + async def test_7900(self): + "7900 - test dequeuing an empty RAW queue" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + + async def test_7901(self): + "7901 - test enqueuing and dequeuing multiple RAW messages" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + props = self.conn.msgproperties() + for value in self.raw_data: + props.payload = value + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + value = props.payload + results.append(value) + await self.conn.commit() + self.assertEqual(results, self.raw_data) + + async def test_7902(self): + "7902 - test dequeuing with DEQ_REMOVE_NODATA in RAW queue" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[1] + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + self.assertIsNotNone(props) + self.assertEqual(props.payload, b"") + + async def test_7903(self): + "7903 - test getting/setting dequeue options attributes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + options = queue.deqoptions + self.__verify_attr(options, "condition", "TEST_CONDITION") + self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") + self.__verify_attr(options, "correlation", "TEST_CORRELATION") + self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) + self.__verify_attr( + options, "navigation", oracledb.DEQ_NEXT_TRANSACTION + ) + self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + self.__verify_attr(options, "wait", 1287) + self.__verify_attr(options, "msgid", b"mID") + + async def test_7904(self): + "7904 - test enqueue options attributes RAW queue" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + options = queue.enqoptions + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + async def test_7905(self): + "7905 - test errors for invalid values for enqueue" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + with self.assertRaises(TypeError): + await queue.enqone(value) + + async def test_7906(self): + "7906 - test getting/setting message properties attributes" + props = self.conn.msgproperties() + self.__verify_attr(props, "correlation", "TEST_CORRELATION") + self.__verify_attr(props, "delay", 60) + self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + self.__verify_attr(props, "expiration", 30) + self.assertEqual(props.attempts, 0) + self.__verify_attr(props, "priority", 1) + self.assertEqual(props.state, oracledb.MSG_READY) + self.assertEqual(props.deliverymode, 0) + + async def test_7907(self): + "7907 - test enqueue visibility option - ENQ_ON_COMMIT" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + await self.conn.commit() + props = await queue.deqone() + self.assertIsNotNone(props) + + async def test_7908(self): + "7908 - test enqueue visibility option - ENQ_IMMEDIATE" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + self.assertEqual(results, self.raw_data[0]) + + async def test_7909(self): + "7909 - test enqueue/dequeue delivery modes identical - buffered" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + self.assertEqual(results, self.raw_data[0]) + + async def test_7910(self): + "7910 - test enqueue/dequeue delivery modes identical - persistent" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + self.assertEqual(results, self.raw_data[0]) + + async def test_7911(self): + "7911 - test enqueue/dequeue delivery modes the same" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + self.assertEqual(results, self.raw_data[0]) + + async def test_7912(self): + "7912 - test enqueue/dequeue delivery modes different" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + + async def test_7913(self): + "7913 - test error for message with no payload" + queue = self.conn.queue("TEST_RAW_QUEUE") + props = self.conn.msgproperties() + with self.assertRaisesFullCode("DPY-2000"): + await queue.enqone(props) + + async def test_7914(self): + "7914 - verify that the msgid property is returned correctly" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + props = self.conn.msgproperties(payload=value) + self.assertIsNone(props.msgid) + await queue.enqone(props) + await self.cursor.execute("select msgid from RAW_QUEUE_TAB") + (actual_msgid,) = await self.cursor.fetchone() + self.assertEqual(props.msgid, actual_msgid) + props = await queue.deqone() + self.assertEqual(props.msgid, actual_msgid) + + async def test_7915(self): + "7915 - test message props enqtime" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + await self.cursor.execute("select current_timestamp from dual") + (start_date,) = await self.cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + props = await queue.deqone() + await self.cursor.execute("select current_timestamp from dual") + (end_date,) = await self.cursor.fetchone() + end_date = end_date.replace(microsecond=0) + self.assertTrue(start_date <= props.enqtime <= end_date) + + async def test_7916(self): + "7916 - test message props declared attributes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + values = dict( + payload=value, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = self.conn.msgproperties(**values) + for attr_name in values: + self.assertEqual(getattr(props, attr_name), values[attr_name]) + await queue.enqone(props) + await self.conn.commit() + prop = await queue.deqone() + for attr_name in values: + self.assertEqual(getattr(prop, attr_name), values[attr_name]) + + async def test_7917(self): + "7917 - test getting queue attributes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + self.assertEqual(queue.name, "TEST_RAW_QUEUE") + self.assertEqual(queue.connection, self.conn) + + async def test_7918(self): + "7918 - test getting write-only attributes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + for options in (queue.enqoptions, queue.deqoptions): + with self.assertRaises(AttributeError): + options.deliverymode + + async def test_7919(self): + "7919 - test deqoption condition with priority" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + value = self.raw_data[0] + props = self.conn.msgproperties(payload=value, priority=priority) + await queue.enqone(props) + + queue.deqoptions.condition = "priority = 9" + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await self.conn.commit() + self.assertEqual(len(results), 3) + + async def test_7920(self): + "7920 - test deqoption correlation" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + value = self.raw_data[0] + props = self.conn.msgproperties( + payload=value, correlation=correlation + ) + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await self.conn.commit() + self.assertEqual(len(results), 2) + + async def test_7921(self): + "7921 - test deqoption msgid" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + props = self.conn.msgproperties(payload=value) + await queue.enqone(props) + await queue.enqone(props) + await self.conn.commit() + msgid = props.msgid + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = await queue.deqone() + await self.conn.commit() + self.assertEqual(prop.msgid, msgid) + + async def test_7922(self): + "7922 - test payload_type returns the correct value" + queue = self.conn.queue("TEST_RAW_QUEUE") + self.assertIsNone(queue.payload_type) + + async def test_7923(self): + "7923 - test deprecated attributes (enqOptions, deqOptions)" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + self.assertEqual(queue.enqOptions, queue.enqoptions) + self.assertEqual(queue.deqOptions, queue.deqoptions) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_env.py b/tests/test_env.py index fdb8191b..1e8bcd04 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -733,6 +733,24 @@ async def asyncTearDown(self): del self.cursor del self.conn + async def get_and_clear_queue( + self, + queue_name, + payload_type=None, + message="not supported with this client/server combination", + ): + if payload_type == "JSON": + self.skipTest(message) + elif isinstance(payload_type, str): + payload_type = await self.conn.gettype(payload_type) + queue = self.conn.queue(queue_name, payload_type) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + while await queue.deqone(): + pass + return self.conn.queue(queue_name, payload_type) + async def get_db_object_as_plain_object(self, obj): if obj.type.iscollection: element_values = [] diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 5548b3fb..7b2f7ae6 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -42,7 +42,7 @@ from . import base_impl, constants, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module -from .aq import Queue, MessageProperties +from .aq import AsyncQueue, Queue, MessageProperties from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor @@ -379,6 +379,80 @@ def module(self, value: str) -> None: self._verify_connected() self._impl.set_module(value) + def msgproperties( + self, + payload: Optional[Union[bytes, str, DbObject]] = None, + correlation: Optional[str] = None, + delay: Optional[int] = None, + exceptionq: Optional[str] = None, + expiration: Optional[int] = None, + priority: Optional[int] = None, + recipients: Optional[list] = None, + ) -> MessageProperties: + """ + Create and return a message properties object. If the parameters are + not None, they act as a shortcut for setting each of the equivalently + named properties. + """ + impl = self._impl.create_msg_props_impl() + props = MessageProperties._from_impl(impl) + if payload is not None: + props.payload = payload + if correlation is not None: + props.correlation = correlation + if delay is not None: + props.delay = delay + if exceptionq is not None: + props.exceptionq = exceptionq + if expiration is not None: + props.expiration = expiration + if priority is not None: + props.priority = priority + if recipients is not None: + props.recipients = recipients + return props + + def queue( + self, + name: str, + payload_type: Optional[Union[DbObjectType, str]] = None, + *, + payloadType: Optional[DbObjectType] = None, + ) -> Queue: + """ + Creates and returns a queue which is used to enqueue and dequeue + messages in Advanced Queueing (AQ). + + The name parameter is expected to be a string identifying the queue in + which messages are to be enqueued or dequeued. + + The payload_type parameter, if specified, is expected to be an + object type that identifies the type of payload the queue expects. + If the string "JSON" is specified, JSON data is enqueued and dequeued. + If not specified, RAW data is enqueued and dequeued. + """ + self._verify_connected() + payload_type_impl = None + is_json = False + if payloadType is not None: + if payload_type is not None: + errors._raise_err( + errors.ERR_DUPLICATED_PARAMETER, + deprecated_name="payloadType", + new_name="payload_type", + ) + payload_type = payloadType + if payload_type is not None: + if payload_type == "JSON": + is_json = True + elif not isinstance(payload_type, DbObjectType): + raise TypeError("expecting DbObjectType") + else: + payload_type_impl = payload_type._impl + impl = self._impl.create_queue_impl() + impl.initialize(self._impl, name, payload_type_impl, is_json) + return self._create_queue(impl) + @property def outputtypehandler(self) -> Callable: """ @@ -600,6 +674,13 @@ def __exit__(self, exc_type, exc_value, exc_tb): self._impl.close(in_del=True) self._impl = None + def _create_queue(self, impl): + """ + Returns a queue object that the user can use to dequeue and enqueue + messages. + """ + return Queue._from_impl(self, impl) + def _get_oci_attr( self, handle_type: int, attr_num: int, attr_type: int ) -> Any: @@ -784,39 +865,6 @@ def maxBytesPerCharacter(self) -> int: """ return 4 - def msgproperties( - self, - payload: Optional[Union[bytes, str, DbObject]] = None, - correlation: Optional[str] = None, - delay: Optional[int] = None, - exceptionq: Optional[str] = None, - expiration: Optional[int] = None, - priority: Optional[int] = None, - recipients: Optional[list] = None, - ) -> MessageProperties: - """ - Create and return a message properties object. If the parameters are - not None, they act as a shortcut for setting each of the equivalently - named properties. - """ - impl = self._impl.create_msg_props_impl() - props = MessageProperties._from_impl(impl) - if payload is not None: - props.payload = payload - if correlation is not None: - props.correlation = correlation - if delay is not None: - props.delay = delay - if exceptionq is not None: - props.exceptionq = exceptionq - if expiration is not None: - props.expiration = expiration - if priority is not None: - props.priority = priority - if recipients is not None: - props.recipients = recipients - return props - def ping(self) -> None: """ Pings the database to verify the connection is valid. @@ -838,47 +886,6 @@ def proxy_user(self) -> Union[str, None]: self._verify_connected() return self._impl.proxy_user - def queue( - self, - name: str, - payload_type: Optional[Union[DbObjectType, str]] = None, - *, - payloadType: Optional[DbObjectType] = None, - ) -> Queue: - """ - Creates and returns a queue which is used to enqueue and dequeue - messages in Advanced Queueing (AQ). - - The name parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. - - The payload_type parameter, if specified, is expected to be an - object type that identifies the type of payload the queue expects. - If the string "JSON" is specified, JSON data is enqueued and dequeued. - If not specified, RAW data is enqueued and dequeued. - """ - self._verify_connected() - payload_type_impl = None - is_json = False - if payloadType is not None: - if payload_type is not None: - errors._raise_err( - errors.ERR_DUPLICATED_PARAMETER, - deprecated_name="payloadType", - new_name="payload_type", - ) - payload_type = payloadType - if payload_type is not None: - if payload_type == "JSON": - is_json = True - elif not isinstance(payload_type, DbObjectType): - raise TypeError("expecting DbObjectType") - else: - payload_type_impl = payload_type._impl - impl = self._impl.create_queue_impl() - impl.initialize(self._impl, name, payload_type_impl, is_json) - return Queue._from_impl(self, impl) - def rollback(self) -> None: """ Rolls back any pending transactions. @@ -1388,6 +1395,13 @@ async def _connect(self, dsn, pool, params, kwargs): return self + def _create_queue(self, impl): + """ + Returns a queue object that the user can use to dequeue and enqueue + messages. + """ + return AsyncQueue._from_impl(self, impl) + def _verify_can_execute( self, parameters: Any, keyword_parameters: Any ) -> Any: From eb71405743b5d028cb39906140a7922e2ad22f9c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:06:05 -0700 Subject: [PATCH 003/239] Improve error message when the cryptography package cannot be imported (#455). --- doc/src/release_notes.rst | 2 ++ src/oracledb/errors.py | 2 +- src/oracledb/impl/thin/connection.pyx | 3 +-- src/oracledb/impl/thin/crypto.pyx | 6 +++--- src/oracledb/impl/thin/pool.pyx | 3 +-- src/oracledb/impl/thin/utils.pyx | 11 ++++++++++- src/oracledb/thin_impl.pyx | 2 +- 7 files changed, 19 insertions(+), 10 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9a252329..002f3911 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,6 +19,8 @@ Thin Mode Changes #) Added Async :ref:`Oracle Advanced Queuing ` support for single enqueue and dequeue of RAW and Oracle object payload types. +#) Improved error message when the cryptography package cannot be imported + (`issue 455 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 4f46deec..2672452d 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -710,7 +710,7 @@ def _raise_not_supported(feature: str) -> None: ERR_NO_CREDENTIALS: "no credentials specified", ERR_NO_CRYPTOGRAPHY_PACKAGE: ( "python-oracledb thin mode cannot be used because the " - "cryptography package is not installed" + "cryptography package cannot be imported" ), ERR_NO_STATEMENT: "no statement specified and no prior statement prepared", ERR_NO_STATEMENT_EXECUTED: "no statement executed", diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index 4c905220..54a0a3ab 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -76,8 +76,7 @@ cdef class BaseThinConnImpl(BaseConnImpl): uint8_t _session_state_desired def __init__(self, str dsn, ConnectParamsImpl params): - if not HAS_CRYPTOGRAPHY: - errors._raise_err(errors.ERR_NO_CRYPTOGRAPHY_PACKAGE) + _check_cryptography() BaseConnImpl.__init__(self, dsn, params) self.thin = True diff --git a/src/oracledb/impl/thin/crypto.pyx b/src/oracledb/impl/thin/crypto.pyx index b00d0ff7..b9d7c9c6 100644 --- a/src/oracledb/impl/thin/crypto.pyx +++ b/src/oracledb/impl/thin/crypto.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2021, 2023, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,8 +35,8 @@ try: from cryptography.hazmat.primitives.ciphers import algorithms, modes, Cipher from cryptography.hazmat.primitives.asymmetric import padding from cryptography.hazmat.primitives.kdf import pbkdf2 -except ImportError: - HAS_CRYPTOGRAPHY = False +except Exception as e: + CRYPTOGRAPHY_IMPORT_ERROR = e DN_REGEX = '(?:^|,\s?)(?:(?P[A-Z]+)=(?P"(?:[^"]|"")+"|[^,]+))+' diff --git a/src/oracledb/impl/thin/pool.pyx b/src/oracledb/impl/thin/pool.pyx index 9ca08644..beee9c53 100644 --- a/src/oracledb/impl/thin/pool.pyx +++ b/src/oracledb/impl/thin/pool.pyx @@ -56,8 +56,7 @@ cdef class BaseThinPoolImpl(BasePoolImpl): bint _open def __init__(self, str dsn, PoolParamsImpl params): - if not HAS_CRYPTOGRAPHY: - errors._raise_err(errors.ERR_NO_CRYPTOGRAPHY_PACKAGE) + _check_cryptography() params._check_credentials() self.connect_params = params self.username = params.user diff --git a/src/oracledb/impl/thin/utils.pyx b/src/oracledb/impl/thin/utils.pyx index 62476188..e0984c92 100644 --- a/src/oracledb/impl/thin/utils.pyx +++ b/src/oracledb/impl/thin/utils.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2020, 2023, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -89,6 +89,15 @@ cdef str _get_connect_data(Description description, str connection_id, ConnectPa return description.build_connect_string(cid) +cdef int _check_cryptography() except -1: + """ + Checks to see that the cryptography package was imported successfully. + """ + if CRYPTOGRAPHY_IMPORT_ERROR is not None: + errors._raise_err(errors.ERR_NO_CRYPTOGRAPHY_PACKAGE, + str(CRYPTOGRAPHY_IMPORT_ERROR)) + + def init_thin_impl(package): """ Initializes globals after the package has been completely initialized. This diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index c08a24f6..7b305e6a 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -195,7 +195,7 @@ from .base_impl import ( ctypedef unsigned char char_type # flag whether the cryptography package exists -cdef bint HAS_CRYPTOGRAPHY = True +cdef object CRYPTOGRAPHY_IMPORT_ERROR = None include "impl/thin/constants.pxi" include "impl/thin/utils.pyx" From 8d280fc36ca747ce56bd91a00ebf664c972e121f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:07:13 -0700 Subject: [PATCH 004/239] Decode nested records correctly (#456). --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/dbobject.pyx | 14 +++++++---- tests/sql/create_schema.sql | 36 +++++++++++++++++++++++++++++ tests/test_2300_object_var.py | 17 ++++++++++++++ tests/test_5600_dbobject_async.py | 17 ++++++++++++++ 5 files changed, 81 insertions(+), 5 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 002f3911..26452a58 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -21,6 +21,8 @@ Thin Mode Changes enqueue and dequeue of RAW and Oracle object payload types. #) Improved error message when the cryptography package cannot be imported (`issue 455 `__). +#) Fixed decoding of nested PL/SQL records + (`issue 456 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/dbobject.pyx b/src/oracledb/impl/thin/dbobject.pyx index 81508712..b9d147c8 100644 --- a/src/oracledb/impl/thin/dbobject.pyx +++ b/src/oracledb/impl/thin/dbobject.pyx @@ -53,14 +53,16 @@ cdef class DbObjectPickleBuffer(GrowableBuffer): self.write_length(num_bytes) self.write_raw(ptr, num_bytes) - cdef int get_is_atomic_null(self, bint* is_null) except -1: + cdef int get_is_atomic_null(self, bint is_collection, + bint* is_null) except -1: """ Reads the next byte and checks to see if the value is atomically null. If not, the byte is returned to the buffer for further processing. """ cdef uint8_t value self.read_ub1(&value) - if value in (TNS_OBJ_ATOMIC_NULL, TNS_NULL_LENGTH_INDICATOR): + if value == TNS_OBJ_ATOMIC_NULL \ + or (is_collection and value == TNS_NULL_LENGTH_INDICATOR): is_null[0] = True else: is_null[0] = False @@ -352,12 +354,12 @@ cdef class ThinDbObjectImpl(BaseDbObjectImpl): uint8_t ora_type_num = metadata.dbtype._ora_type_num uint8_t csfrm = metadata.dbtype._csfrm DbObjectPickleBuffer xml_buf + bint is_null, is_collection BaseThinConnImpl conn_impl ThinDbObjectImpl obj_impl BaseThinLobImpl lob_impl OracleData data bytes locator - bint is_null type cls if ora_type_num in (ORA_TYPE_NUM_CLOB, ORA_TYPE_NUM_BLOB, @@ -372,7 +374,9 @@ cdef class ThinDbObjectImpl(BaseDbObjectImpl): else PY_TYPE_LOB return cls._from_impl(lob_impl) elif ora_type_num == ORA_TYPE_NUM_OBJECT: - buf.get_is_atomic_null(&is_null) + is_collection = \ + metadata.objtype.is_collection or self.type.is_collection + buf.get_is_atomic_null(is_collection, &is_null) if is_null: return None if metadata.objtype is None: @@ -381,7 +385,7 @@ cdef class ThinDbObjectImpl(BaseDbObjectImpl): return xml_buf.read_xmltype(self.type._conn_impl) obj_impl = ThinDbObjectImpl.__new__(ThinDbObjectImpl) obj_impl.type = metadata.objtype - if metadata.objtype.is_collection or self.type.is_collection: + if is_collection: obj_impl.packed_data = buf.read_bytes() else: obj_impl._unpack_data_from_buf(buf) diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index b342d5af..8885b34e 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -1321,6 +1321,42 @@ create or replace package body &main_user..pkg_TestRecords as end; / +create or replace package &main_user..pkg_TestNestedRecords as + + type udt_Inner is record ( + Attr1 number, + Attr2 number + ); + + type udt_Outer is record ( + Inner1 udt_Inner, + Inner2 udt_Inner + ); + + function GetOuter ( + a_Value1 number, + a_Value2 number + ) return udt_Outer; + +end; +/ + +create or replace package body &main_user..pkg_TestNestedRecords as + + function GetOuter ( + a_Value1 number, + a_Value2 number + ) return udt_Outer is + t_Outer udt_Outer; + begin + t_Outer.Inner1.Attr2 := a_Value1; + t_Outer.Inner2.Attr2 := a_Value2; + return t_Outer; + end; + +end; +/ + create or replace package &main_user..pkg_SessionCallback as procedure TheCallback ( diff --git a/tests/test_2300_object_var.py b/tests/test_2300_object_var.py index ca74b986..87d8ce32 100644 --- a/tests/test_2300_object_var.py +++ b/tests/test_2300_object_var.py @@ -849,6 +849,23 @@ def test_2341(self): with self.assertRaisesFullCode("DPY-2035"): conn.gettype(f"{main_user}.UDT_OBJECTARRAY") + def test_2342(self): + "2342 - test nested records" + options = [(None, None), (1, None), (None, 2), (1, 2)] + typ = self.conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") + for option in options: + with self.subTest(option=option): + value1, value2 = option + obj = self.cursor.callfunc( + "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) + ) + self.assertIsNotNone(obj.INNER1) + self.assertIsNone(obj.INNER1.ATTR1) + self.assertEqual(obj.INNER1.ATTR2, value1) + self.assertIsNotNone(obj.INNER2) + self.assertIsNone(obj.INNER2.ATTR1) + self.assertEqual(obj.INNER2.ATTR2, value2) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5600_dbobject_async.py b/tests/test_5600_dbobject_async.py index 03ecd645..b376f727 100644 --- a/tests/test_5600_dbobject_async.py +++ b/tests/test_5600_dbobject_async.py @@ -629,6 +629,23 @@ async def test_5618(self): with self.assertRaisesFullCode("DPY-2035"): await conn.gettype(f"{main_user}.UDT_OBJECTARRAY") + async def test_5619(self): + "5619 - test nested records" + options = [(None, None), (1, None), (None, 2), (1, 2)] + typ = await self.conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") + for option in options: + with self.subTest(option=option): + value1, value2 = option + obj = await self.cursor.callfunc( + "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) + ) + self.assertIsNotNone(obj.INNER1) + self.assertIsNone(obj.INNER1.ATTR1) + self.assertEqual(obj.INNER1.ATTR2, value1) + self.assertIsNotNone(obj.INNER2) + self.assertIsNone(obj.INNER2.ATTR1) + self.assertEqual(obj.INNER2.ATTR2, value2) + if __name__ == "__main__": test_env.run_test_cases() From a3606d4946970a5a41e12ef2022456dc00270c39 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:07:46 -0700 Subject: [PATCH 005/239] Avoid memory allocation/free cycles for decimal data using Arrow; put methods in alphabetical order; eliminate Cython warnings. --- doc/src/release_notes.rst | 4 ++ src/oracledb/interchange/nanoarrow_bridge.pxd | 9 +-- src/oracledb/interchange/nanoarrow_bridge.pyx | 61 +++++++++---------- 3 files changed, 35 insertions(+), 39 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 26452a58..9453d6e8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,6 +30,10 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Internal change: avoid memory allocation/free cycles for decimal data when + calling :meth:`Connection.fetch_df_all()` and + :meth:`Connection.fetch_df_batches()`. + oracledb 3.0.0 (March 2025) --------------------------- diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index 806f660e..f8c52872 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -61,13 +61,6 @@ cdef extern from "nanoarrow.h": NANOARROW_TIME_UNIT_MICRO NANOARROW_TIME_UNIT_NANO - cdef struct ArrowStringView: - const char* data - int64_t size_bytes - - cdef struct ArrowDecimal: - pass - cdef class OracleArrowArray: """ @@ -94,9 +87,9 @@ cdef class OracleArrowArray: cdef str _schema_to_string(self) cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 + cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int append_double(self, double value) except -1 cdef int append_float(self, float value) except -1 cdef int append_int64(self, int64_t value) except -1 cdef int append_null(self) except -1 - cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int finish_building(self) except -1 diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 05705c5e..46a5441c 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -49,9 +49,16 @@ cdef extern from "nanoarrow/nanoarrow.c": cdef struct ArrowArrayView: ArrowBufferView *buffer_views + cdef struct ArrowDecimal: + pass + cdef struct ArrowError: pass + cdef struct ArrowStringView: + const char* data + int64_t size_bytes + cdef ArrowErrorCode NANOARROW_OK void ArrowArrayRelease(ArrowArray *array) @@ -64,13 +71,13 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) - ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray * array, - const ArrowDecimal * value) + ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, + const ArrowDecimal* value) ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, ArrowError* error) ArrowErrorCode ArrowArrayReserve(ArrowArray* array, int64_t additional_size_elements) - inline ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) + ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) ArrowErrorCode ArrowArrayViewInitFromSchema(ArrowArrayView* array_view, const ArrowSchema* schema, ArrowError* error) @@ -90,9 +97,9 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) int64_t ArrowSchemaToString(const ArrowSchema* schema, char* out, int64_t n, char recursive) - void ArrowDecimalInit(ArrowDecimal * decimal, int32_t bitwidth, + void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, int32_t precision, int32_t scale) - ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal * decimal, + ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, ArrowStringView value) @@ -200,6 +207,24 @@ cdef class OracleArrowArray: data.size_bytes = num_bytes _check_nanoarrow(ArrowArrayAppendBytes(self.arrow_array, data)) + cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1: + """ + Append a value of type ArrowDecimal to the array + + Arrow decimals are fixed-point decimal numbers encoded as a + scaled integer. decimal128(7, 3) can exactly represent the numbers + 1234.567 and -1234.567 encoded internally as the 128-bit integers + 1234567 and -1234567, respectively. + """ + cdef: + ArrowStringView decimal_view + ArrowDecimal decimal + decimal_view.data = ptr + decimal_view.size_bytes = num_bytes + ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + _check_nanoarrow(ArrowDecimalSetDigits(&decimal, decimal_view)) + _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) + cdef int append_double(self, double value) except -1: """ Append a value of type double to the array. @@ -224,32 +249,6 @@ cdef class OracleArrowArray: """ _check_nanoarrow(ArrowArrayAppendNull(self.arrow_array, 1)) - cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1: - """ - Append a value of type ArrowDecimal to the array - - Arrow decimals are fixed-point decimal numbers encoded as a - scaled integer. decimal128(7, 3) can exactly represent the numbers - 1234.567 and -1234.567 encoded internally as the 128-bit integers - 1234567 and -1234567, respectively - - """ - cdef: - int64_t i = 0, j = 0 - char* digits = ptr - ArrowStringView decimal_view - ArrowDecimal * decimal = \ - cpython.PyMem_Malloc(sizeof(ArrowDecimal)) - - try: - decimal_view.data = digits - decimal_view.size_bytes = num_bytes - ArrowDecimalInit(decimal, 128, self.precision, self.scale) - _check_nanoarrow(ArrowDecimalSetDigits(decimal, decimal_view)) - _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, decimal)) - finally: - cpython.PyMem_Free(decimal) - cdef int finish_building(self) except -1: """ Finish building the array. No more data will be added to it. From c4bd99560a46f0129da62534f87466cc13be8061 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:08:12 -0700 Subject: [PATCH 006/239] Improved performance when a significant amount of data is duplicated from one row to the next. --- doc/src/release_notes.rst | 9 ++- src/oracledb/impl/thin/messages.pyx | 27 ++++++--- src/oracledb/impl/thin/var.pyx | 1 + src/oracledb/interchange/nanoarrow_bridge.pxd | 1 + src/oracledb/interchange/nanoarrow_bridge.pyx | 58 ++++++++++++++++++- src/oracledb/thin_impl.pyx | 4 ++ 6 files changed, 88 insertions(+), 12 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9453d6e8..e7f12670 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,9 +30,12 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Internal change: avoid memory allocation/free cycles for decimal data when - calling :meth:`Connection.fetch_df_all()` and - :meth:`Connection.fetch_df_batches()`. +#) Improved the performance of :meth:`Connection.fetch_df_all()` and + :meth:`Connection.fetch_df_batches()`: + + - more efficient processing when a significant amount of data is duplicated + from one row to the next + - avoid memory allocation/free cycles for decimal data oracledb 3.0.0 (March 2025) diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index da5fe7e7..bf2a872e 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -1163,6 +1163,15 @@ cdef class MessageWithData(Message): for j in range(num_rows): values[j] = self._process_column_data(buf, var_impl, j) var_impl._values[self.row_index] = values + elif self.cursor_impl.fetching_arrow: + if self._is_duplicate_data(i): + if var_impl._last_arrow_array is None: + var_impl._last_arrow_array = var_impl._arrow_array + var_impl._arrow_array.append_last_value( + var_impl._last_arrow_array + ) + else: + self._process_column_data(buf, var_impl, self.row_index) elif self._is_duplicate_data(i): if self.row_index == 0 and var_impl.outconverter is not None: value = var_impl._last_raw_value @@ -1397,8 +1406,11 @@ cdef class MessageWithData(Message): for var_impl in self.out_var_impls: if var_impl is None or var_impl.outconverter is None: continue - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] + if self.cursor_impl.fetching_arrow: + var_impl._last_arrow_array = var_impl._arrow_array + else: + var_impl._last_raw_value = \ + var_impl._values[self.cursor_impl._last_row_index] if var_impl.is_array: num_elements = var_impl.num_elements_in_array else: @@ -1431,8 +1443,11 @@ cdef class MessageWithData(Message): for var_impl in self.out_var_impls: if var_impl is None or var_impl.outconverter is None: continue - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] + if self.cursor_impl.fetching_arrow: + var_impl._last_arrow_array = var_impl._arrow_array + else: + var_impl._last_raw_value = \ + var_impl._values[self.cursor_impl._last_row_index] if var_impl.is_array: num_elements = var_impl.num_elements_in_array else: @@ -2125,8 +2140,6 @@ cdef class ExecuteMessage(MessageWithData): self.cursor_impl._set_fetch_array_size(num_iters) if num_iters > 0 and not stmt._no_prefetch: options |= TNS_EXEC_OPTION_FETCH - if self.cursor_impl.fetching_arrow: - options |= TNS_EXEC_OPTION_NO_COMPRESSED_FETCH if not stmt._is_plsql and not self.parse_only: options |= TNS_EXEC_OPTION_NOT_PLSQL elif stmt._is_plsql and num_params > 0: @@ -2250,8 +2263,6 @@ cdef class ExecuteMessage(MessageWithData): and not info._is_return_bind] if self.function_code == TNS_FUNC_REEXECUTE_AND_FETCH: exec_flags_1 |= TNS_EXEC_OPTION_EXECUTE - if self.cursor_impl.fetching_arrow: - exec_flags_1 |= TNS_EXEC_OPTION_NO_COMPRESSED_FETCH num_iters = self.cursor_impl.prefetchrows self.cursor_impl._set_fetch_array_size(num_iters) else: diff --git a/src/oracledb/impl/thin/var.pyx b/src/oracledb/impl/thin/var.pyx index ab08882f..61cc8e66 100644 --- a/src/oracledb/impl/thin/var.pyx +++ b/src/oracledb/impl/thin/var.pyx @@ -32,6 +32,7 @@ cdef class ThinVarImpl(BaseVarImpl): cdef: object _last_raw_value + OracleArrowArray _last_arrow_array list _coroutine_indexes cdef int _bind(self, object conn, BaseCursorImpl cursor_impl, diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index f8c52872..94dea92a 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -91,5 +91,6 @@ cdef class OracleArrowArray: cdef int append_double(self, double value) except -1 cdef int append_float(self, float value) except -1 cdef int append_int64(self, int64_t value) except -1 + cdef int append_last_value(self, OracleArrowArray array) except -1 cdef int append_null(self) except -1 cdef int finish_building(self) except -1 diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 46a5441c..9a526e60 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -30,7 +30,7 @@ cimport cpython from libc.stdint cimport uintptr_t -from libc.string cimport strlen, strchr +from libc.string cimport memcpy, strlen, strchr from cpython.pycapsule cimport PyCapsule_New from .. import errors @@ -49,6 +49,10 @@ cdef extern from "nanoarrow/nanoarrow.c": cdef struct ArrowArrayView: ArrowBufferView *buffer_views + cdef struct ArrowBuffer: + uint8_t *data + int64_t size_bytes + cdef struct ArrowDecimal: pass @@ -73,6 +77,7 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, const ArrowDecimal* value) + ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, ArrowError* error) ArrowErrorCode ArrowArrayReserve(ArrowArray* array, @@ -99,6 +104,7 @@ cdef extern from "nanoarrow/nanoarrow.c": int64_t n, char recursive) void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, int32_t precision, int32_t scale) + void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, ArrowStringView value) @@ -243,6 +249,56 @@ cdef class OracleArrowArray: """ _check_nanoarrow(ArrowArrayAppendInt(self.arrow_array, value)) + cdef int append_last_value(self, OracleArrowArray array) except -1: + """ + Appends the last value of the given array to this array. + """ + cdef: + int32_t start_offset, end_offset + ArrowBuffer *offsets_buffer + ArrowBuffer *data_buffer + ArrowDecimal decimal + int64_t *as_int64 + int32_t *as_int32 + double *as_double + float *as_float + int64_t index + uint8_t *ptr + void* temp + index = array.arrow_array.length - 1 + if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_int64 = data_buffer.data + self.append_int64(as_int64[index]) + elif array.arrow_type == NANOARROW_TYPE_DOUBLE: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_double = data_buffer.data + self.append_double(as_double[index]) + elif array.arrow_type == NANOARROW_TYPE_FLOAT: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_float = data_buffer.data + self.append_double(as_float[index]) + elif array.arrow_type == NANOARROW_TYPE_DECIMAL128: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ptr = data_buffer.data + index * 16 + ArrowDecimalSetBytes(&decimal, ptr) + _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, + &decimal)) + elif array.arrow_type == NANOARROW_TYPE_STRING: + offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) + data_buffer = ArrowArrayBuffer(array.arrow_array, 2) + as_int32 = offsets_buffer.data + start_offset = as_int32[index] + end_offset = as_int32[index + 1] + temp = cpython.PyMem_Malloc(end_offset - start_offset) + memcpy(temp, &data_buffer.data[start_offset], + end_offset - start_offset) + try: + self.append_bytes(temp, end_offset - start_offset) + finally: + cpython.PyMem_Free(temp) + cdef int append_null(self) except -1: """ Append a null value to the array. diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index 7b305e6a..81cc703e 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -192,6 +192,10 @@ from .base_impl import ( DB_TYPE_XMLTYPE, ) +from .interchange.nanoarrow_bridge cimport ( + OracleArrowArray, +) + ctypedef unsigned char char_type # flag whether the cryptography package exists From 00cb68e2eabdbf1f4253cdebcee925e2d0ac4bb4 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:09:02 -0700 Subject: [PATCH 007/239] Improve error message when payload type doesn't match the payload type supported by the queue. --- doc/src/release_notes.rst | 7 +++++++ src/oracledb/aq.py | 12 ++++++++++++ src/oracledb/errors.py | 5 +++++ tests/test_2700_aq.py | 16 +++++++++++++++- tests/test_2800_bulk_aq.py | 2 +- tests/test_7800_aq_raw.py | 9 +++++++++ tests/test_7900_aq_raw_async.py | 9 +++++++++ 7 files changed, 58 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e7f12670..c6b2575c 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -37,6 +37,13 @@ Common Changes from one row to the next - avoid memory allocation/free cycles for decimal data +#) Error ``DPY-2062: payload cannot be enqueued since it does not match the + payload type supported by the queue`` is now raised when the payload of a + message being enqueued is not supported by the queue. Previously, + python-oracledb Thick mode raised the error ``DPI-1071: payload type in + message properties must match the payload type of the queue`` and thin mode + raised an internal error. + oracledb 3.0.0 (March 2025) --------------------------- diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index 7144dab6..7bc94257 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -56,6 +56,18 @@ def _verify_message(self, message: "MessageProperties") -> None: raise TypeError("expecting MessageProperties object") if message.payload is None: errors._raise_err(errors.ERR_MESSAGE_HAS_NO_PAYLOAD) + if isinstance(self.payload_type, DbObjectType): + if ( + not isinstance(message.payload, DbObject) + or message.payload.type != self.payload_type + ): + errors._raise_err(errors.ERR_PAYLOAD_CANNOT_BE_ENQUEUED) + elif self.payload_type == "JSON": + if not isinstance(message.payload, (dict, list)): + errors._raise_err(errors.ERR_PAYLOAD_CANNOT_BE_ENQUEUED) + else: + if not isinstance(message.payload, (str, bytes)): + errors._raise_err(errors.ERR_PAYLOAD_CANNOT_BE_ENQUEUED) @property def connection(self) -> "connection_module.Connection": diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 2672452d..82c95be4 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -281,6 +281,7 @@ def _raise_not_supported(feature: str) -> None: ERR_MISSING_CONNECT_DESCRIPTOR = 2059 ERR_ARROW_C_API_ERROR = 2060 ERR_PARAMS_HOOK_HANDLER_FAILED = 2061 +ERR_PAYLOAD_CANNOT_BE_ENQUEUED = 2062 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -749,6 +750,10 @@ def _raise_not_supported(feature: str) -> None: ERR_PASSWORD_TYPE_HANDLER_FAILED: ( 'registered handler for password type "{password_type}" failed' ), + ERR_PAYLOAD_CANNOT_BE_ENQUEUED: ( + "payload cannot be enqueued since it does not match the payload type " + "supported by the queue" + ), ERR_PLAINTEXT_PASSWORD_IN_CONFIG: ( "password in configuration must specify a type" ), diff --git a/tests/test_2700_aq.py b/tests/test_2700_aq.py index b24c3df0..17487f27 100644 --- a/tests/test_2700_aq.py +++ b/tests/test_2700_aq.py @@ -567,7 +567,7 @@ def test_2722(self): queue = self.get_and_clear_queue(self.json_queue_name, "JSON") string_message = "This is a string message" props = self.conn.msgproperties(payload=string_message) - with self.assertRaisesFullCode("DPI-1071"): + with self.assertRaisesFullCode("DPY-2062"): queue.enqone(props) def test_2723(self): @@ -767,6 +767,20 @@ def test_2735(self): props = queue.deqOne() self.assertEqual(props.payload, value) + def test_2736(self): + "2736 - test enqueuing to an object queue with the wrong payload" + queue = self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + props = self.conn.msgproperties(payload="A string") + with self.assertRaisesFullCode("DPY-2062"): + queue.enqone(props) + typ = self.conn.gettype("UDT_SUBOBJECT") + obj = typ.newobject() + props = self.conn.msgproperties(payload=obj) + with self.assertRaisesFullCode("DPY-2062"): + queue.enqone(props) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_2800_bulk_aq.py b/tests/test_2800_bulk_aq.py index 16f4cf69..45cf5f32 100644 --- a/tests/test_2800_bulk_aq.py +++ b/tests/test_2800_bulk_aq.py @@ -200,7 +200,7 @@ def test_2808(self): "2808 - test enqueuing to a JSON queue without a JSON payload" queue = self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") props = self.conn.msgproperties(payload="string message") - with self.assertRaisesFullCode("DPI-1071"): + with self.assertRaisesFullCode("DPY-2062"): queue.enqmany([props, props]) def test_2809(self): diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index 6bfab15a..44adbdfe 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -395,6 +395,15 @@ def test_7824(self): props = queue.deqOne() self.assertEqual(props.payload, value) + def test_7825(self): + "7825 - test wrong payload type" + queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + typ = self.conn.gettype("UDT_BOOK") + obj = typ.newobject() + props = self.conn.msgproperties(payload=obj) + with self.assertRaisesFullCode("DPY-2062"): + queue.enqone(props) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index 941d7a78..fd403a79 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -393,6 +393,15 @@ async def test_7923(self): self.assertEqual(queue.enqOptions, queue.enqoptions) self.assertEqual(queue.deqOptions, queue.deqoptions) + async def test_7924(self): + "7924 - test wrong payload type" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + typ = await self.conn.gettype("UDT_BOOK") + obj = typ.newobject() + props = self.conn.msgproperties(payload=obj) + with self.assertRaisesFullCode("DPY-2062"): + await queue.enqone(props) + if __name__ == "__main__": test_env.run_test_cases() From fae1b1735fa69ac4b5573bcc178fd2e436b8239a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 3 Mar 2025 16:10:11 -0700 Subject: [PATCH 008/239] Fix formatting and simplify code a bit. --- src/oracledb/impl/thin/messages.pyx | 4 ++-- src/oracledb/impl/thin/queue.pyx | 11 ----------- 2 files changed, 2 insertions(+), 13 deletions(-) diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index bf2a872e..fbdbca3a 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -2487,7 +2487,7 @@ cdef class DeqMessage(Message): else: self.props_impl.payload = b'' ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) - self.props_impl.msgid =ptr[:TNS_AQ_MESSAGE_ID_LENGTH] + self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] cdef int _write_message(self, WriteBuffer buf) except -1: """ @@ -2702,7 +2702,7 @@ cdef class EnqMessage(Message): buf.write_uint8(0) # JSON payload (pointer) buf.write_bytes_with_length(queue_name_bytes) - buf.write_bytes(self.props_impl.toid) + buf.write_bytes(self.queue_impl.payload_toid) if not self.queue_impl.is_json: if self.queue_impl.payload_type is not None: buf.write_dbobject(self.props_impl.payloadObject) diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 59790f9e..6d84744c 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -43,7 +43,6 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): ThinMsgPropsImpl props_impl DeqMessage message props_impl = ThinMsgPropsImpl() - props_impl._initialize(self) message = self._conn_impl._create_message(DeqMessage) message.queue_impl = self message.deq_options_impl = self.deq_options_impl @@ -323,7 +322,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): bytes msgid int32_t state object payloadObject - bytes toid int32_t version BaseThinConnImpl _conn_impl bytes enq_txn_id @@ -339,13 +337,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): self.version = 1 self.sender_agent_protocol = 0 - cdef int _initialize(self, BaseThinQueueImpl queue_impl) except -1: - """ - Internal method to initialize the message properties. - """ - self._conn_impl = queue_impl._conn_impl - self.toid = queue_impl.payload_toid - def get_num_attempts(self): """ Internal method for getting the number of attempts made. @@ -435,7 +426,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): Internal method for setting the payload from bytes. """ self.payloadObject = value - self.toid = bytes([0]*15+[0x17]) def set_payload_object(self, ThinDbObjectImpl value): """ @@ -444,7 +434,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): if not isinstance(value, ThinDbObjectImpl): raise TypeError("Expected ThinDbObjectImpl instance.") self.payloadObject = value - self.toid = value.toid[4:20] def set_priority(self, int32_t value): """ From d61334900d547ee2e146b2fbcfd4f61dd645c07b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 4 Mar 2025 20:20:06 -0700 Subject: [PATCH 009/239] Added support for enqueuing and dequeuing JSON payloads. --- doc/src/release_notes.rst | 2 ++ doc/src/user_guide/appendix_a.rst | 2 +- doc/src/user_guide/aq.rst | 8 ++++---- src/oracledb/impl/thin/messages.pyx | 14 ++++++++++---- src/oracledb/impl/thin/packet.pyx | 11 +++++++---- src/oracledb/impl/thin/queue.pyx | 8 +++++++- tests/test_2700_aq.py | 8 -------- 7 files changed, 31 insertions(+), 22 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index c6b2575c..2097514e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,8 @@ oracledb 3.1.0 (TBD) Thin Mode Changes +++++++++++++++++ +#) Added :ref:`Oracle Advanced Queuing ` support for single + enqueue and dequeue of JSON payloads. #) Added Async :ref:`Oracle Advanced Queuing ` support for single enqueue and dequeue of RAW and Oracle object payload types. #) Improved error message when the cryptography package cannot be imported diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 6f65dfee..26209b22 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -252,7 +252,7 @@ see :ref:`driverdiff` and :ref:`compatibility`. - Yes - Yes * - Oracle Transactional Event Queues and Advanced Queuing (AQ) (see :ref:`aqusermanual`) - - Yes - RAW and named Oracle object payloads + - Yes - only "Classic" queue is supported (RAW, named Oracle object, and JSON payloads) - Yes - Yes * - Call timeouts (see :attr:`Connection.call_timeout`) diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 03a5a916..3491640f 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -21,12 +21,12 @@ types. **Classic Queue Support** -- RAW, named Oracle objects, and JMS payloads are supported. +- RAW, named Oracle objects, JSON, and JMS payloads are supported. -- The JSON payload requires Oracle Client libraries 21c (or later) and Oracle - Database 21c (or later). +- JSON payloads require Oracle Database 21c (or later). In python-oracle Thick + mode, Oracle Client libraries 21c (or later) are also needed. -JSON and JMS payloads, array message queuing and dequeuing operations, and +JMS payloads, array message queuing and dequeuing operations, and :ref:`Recipient Lists ` are only supported in python-oracledb :ref:`Thick mode `. diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index fbdbca3a..dde7645d 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -2462,14 +2462,13 @@ cdef class DeqMessage(Message): buf.read_ub4(&num_bytes) # num recipients if num_bytes > 0: errors._raise_err(errors.ERR_NOT_IMPLEMENTED) - if (not self.queue_impl.is_json and - self.queue_impl.payload_type is not None): + if self.queue_impl.payload_type is not None: type_impl = self.queue_impl.payload_type obj_impl = buf.read_dbobject(type_impl) if obj_impl is None: obj_impl = type_impl.create_new_object() self.props_impl.payload = PY_TYPE_DB_OBJECT._from_impl(obj_impl) - elif self.queue_impl.payload_type is None: + else: buf.read_ub4(&num_bytes) # TOID len if num_bytes > 0: buf.skip_raw_bytes(num_bytes) @@ -2484,8 +2483,12 @@ cdef class DeqMessage(Message): buf.skip_ub2() # flags if imageLength > 0: self.props_impl.payload = buf.read_bytes()[4:imageLength] + if self.queue_impl.is_json: + self.props_impl.payload = \ + self.conn_impl.decode_oson(self.props_impl.payload) else: - self.props_impl.payload = b'' + if not self.queue_impl.is_json: + self.props_impl.payload = b'' ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] @@ -2708,6 +2711,9 @@ cdef class EnqMessage(Message): buf.write_dbobject(self.props_impl.payloadObject) else: buf.write_bytes(self.props_impl.payloadObject) + if self.queue_impl.is_json: + buf.write_oson(self.props_impl.payloadObject, + self.conn_impl._oson_max_fname_size, False) @cython.final diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index ba895b66..d68b51f9 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -906,12 +906,14 @@ cdef class WriteBuffer(Buffer): self.write_ub4(len(lob_impl._locator)) self.write_bytes_with_length(lob_impl._locator) - cdef int write_qlocator(self, uint64_t data_length) except -1: + cdef int write_qlocator(self, uint64_t data_length, + bint write_length=True) except -1: """ Writes a QLocator. QLocators are always 40 bytes in length. """ self.write_ub4(40) # QLocator length - self.write_uint8(40) # chunk length + if write_length: + self.write_uint8(40) # chunk length self.write_uint16be(38) # QLocator length less 2 bytes self.write_uint16be(TNS_LOB_QLOCATOR_VERSION) self.write_uint8(TNS_LOB_LOC_FLAGS_VALUE_BASED | \ @@ -927,14 +929,15 @@ cdef class WriteBuffer(Buffer): self.write_uint64be(0) # unused self.write_uint64be(0) # unused - cdef object write_oson(self, value, ssize_t max_fname_size): + cdef object write_oson(self, value, ssize_t max_fname_size, + bint write_length=True): """ Encodes the given value to OSON and then writes that to the buffer. it. """ cdef OsonEncoder encoder = OsonEncoder.__new__(OsonEncoder) encoder.encode(value, max_fname_size) - self.write_qlocator(encoder._pos) + self.write_qlocator(encoder._pos, write_length) self._write_raw_bytes_and_length(encoder._data, encoder._pos) cdef int write_seq_num(self) except -1: diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 6d84744c..fdf75ccc 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -71,7 +71,7 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): self.enq_options_impl = ThinEnqOptionsImpl() self.payload_type = payload_type if self.is_json: - errors._raise_not_supported("JSON payload in AQ") + self.payload_toid = bytes([0]*15+[0x47]) elif self.payload_type is not None: self.payload_toid = payload_type.oid else: @@ -435,6 +435,12 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): raise TypeError("Expected ThinDbObjectImpl instance.") self.payloadObject = value + def set_payload_json(self, object json_val): + """ + Internal method for setting the payload from a JSON object + """ + self.payloadObject = json_val + def set_priority(self, int32_t value): """ Internal method for setting the priority. diff --git a/tests/test_2700_aq.py b/tests/test_2700_aq.py index 17487f27..e8139fa9 100644 --- a/tests/test_2700_aq.py +++ b/tests/test_2700_aq.py @@ -536,10 +536,6 @@ def notification_callback(message): self.assertTrue(condition.wait(5)) conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support JSON payload for AQ yet", - ) def test_2721(self): "2721 - test enqueuing and dequeuing JSON payloads" queue = self.get_and_clear_queue(self.json_queue_name, "JSON") @@ -558,10 +554,6 @@ def test_2721(self): self.conn.commit() self.assertEqual(results, self.json_data) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support JSON payload for AQ yet", - ) def test_2722(self): "2722 - test enqueuing to a JSON queue without a JSON payload" queue = self.get_and_clear_queue(self.json_queue_name, "JSON") From 72636ffaedfab9fed00edd01475f26959f8019b1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 4 Mar 2025 20:20:55 -0700 Subject: [PATCH 010/239] Test improvements. --- tests/test_8000_dataframe.py | 1 + tests/test_8100_dataframe_async.py | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 251b1ea4..8be8a603 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -273,6 +273,7 @@ def __convert_to_df(self, data): "LASTUPDATED", ] pa_tab = pyarrow.Table.from_arrays(arrays, names=names) + pa_tab.validate(full=True) return pa_tab.to_pandas() def __get_data_from_df(self, df): diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 0b0fd9a2..c389b2d9 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -278,6 +278,7 @@ def __convert_to_df(self, data): "LASTUPDATED", ] pa_tab = pyarrow.Table.from_arrays(arrays, names=names) + pa_tab.validate(full=True) return pa_tab.to_pandas() def __get_data_from_df(self, df): From eee8e9d24ee071d8c3fbb462a642323977c506fb Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 4 Mar 2025 20:21:10 -0700 Subject: [PATCH 011/239] Correct memory management of OracleArrowArray. --- doc/src/release_notes.rst | 5 ++- src/oracledb/base_impl.pxd | 1 + src/oracledb/impl/base/cursor.pyx | 4 +- src/oracledb/impl/base/var.pyx | 11 ++++++ src/oracledb/impl/thin/messages.pyx | 11 ++---- src/oracledb/impl/thin/var.pyx | 11 ++++++ src/oracledb/interchange/nanoarrow_bridge.pxd | 3 +- src/oracledb/interchange/nanoarrow_bridge.pyx | 37 ++++++++++++++----- tests/test_8000_dataframe.py | 4 ++ tests/test_8100_dataframe_async.py | 6 +++ 10 files changed, 71 insertions(+), 22 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2097514e..8e0a7e4b 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -32,12 +32,15 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Improved the performance of :meth:`Connection.fetch_df_all()` and +#) Improved the performance and memory management of + :meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()`: - more efficient processing when a significant amount of data is duplicated from one row to the next - avoid memory allocation/free cycles for decimal data + - eliminated memory leak if OracleDataFrame is not converted to an external + data frame #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 1d1e1d2b..fe72f0a1 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -736,6 +736,7 @@ cdef class BaseVarImpl: cdef DbType _check_fetch_conversion(self) cdef int _create_arrow_array(self) except -1 cdef int _finalize_init(self) except -1 + cdef OracleArrowArray _finish_building_arrow_array(self) cdef DbType _get_adjusted_type(self, uint8_t ora_type_num) cdef list _get_array_value(self) cdef object _get_scalar_value(self, uint32_t pos) diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index a4ae7c78..ed82659b 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -518,9 +518,7 @@ cdef class BaseCursorImpl: BaseVarImpl var_impl list columns = [] for var_impl in self.fetch_var_impls: - var_impl._arrow_array.finish_building() - columns.append(var_impl._arrow_array) - var_impl._arrow_array = None + columns.append(var_impl._finish_building_arrow_array()) return PY_TYPE_DATAFRAME(columns) def close(self, bint in_del=False): diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 0cf47f93..564b5729 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -277,6 +277,17 @@ cdef class BaseVarImpl: if self.num_elements == 0: self.num_elements = 1 + cdef OracleArrowArray _finish_building_arrow_array(self): + """ + Finish building the Arrow array associated with the variable and then + return that array (after clearing it in the variable so that a new + array will be built if more rows are fetched). + """ + cdef OracleArrowArray array = self._arrow_array + array.finish_building() + self._arrow_array = None + return array + cdef DbType _get_adjusted_type(self, uint8_t ora_type_num): """ Returns an adjusted type based on the desired Oracle type and the diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index dde7645d..59cf5c1e 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -1165,13 +1165,12 @@ cdef class MessageWithData(Message): var_impl._values[self.row_index] = values elif self.cursor_impl.fetching_arrow: if self._is_duplicate_data(i): - if var_impl._last_arrow_array is None: - var_impl._last_arrow_array = var_impl._arrow_array var_impl._arrow_array.append_last_value( var_impl._last_arrow_array ) else: self._process_column_data(buf, var_impl, self.row_index) + var_impl._last_arrow_array = None elif self._is_duplicate_data(i): if self.row_index == 0 and var_impl.outconverter is not None: value = var_impl._last_raw_value @@ -1406,9 +1405,7 @@ cdef class MessageWithData(Message): for var_impl in self.out_var_impls: if var_impl is None or var_impl.outconverter is None: continue - if self.cursor_impl.fetching_arrow: - var_impl._last_arrow_array = var_impl._arrow_array - else: + if not self.cursor_impl.fetching_arrow: var_impl._last_raw_value = \ var_impl._values[self.cursor_impl._last_row_index] if var_impl.is_array: @@ -1443,9 +1440,7 @@ cdef class MessageWithData(Message): for var_impl in self.out_var_impls: if var_impl is None or var_impl.outconverter is None: continue - if self.cursor_impl.fetching_arrow: - var_impl._last_arrow_array = var_impl._arrow_array - else: + if not self.cursor_impl.fetching_arrow: var_impl._last_raw_value = \ var_impl._values[self.cursor_impl._last_row_index] if var_impl.is_array: diff --git a/src/oracledb/impl/thin/var.pyx b/src/oracledb/impl/thin/var.pyx index 61cc8e66..0fa7c613 100644 --- a/src/oracledb/impl/thin/var.pyx +++ b/src/oracledb/impl/thin/var.pyx @@ -113,6 +113,17 @@ cdef class ThinVarImpl(BaseVarImpl): BaseVarImpl._finalize_init(self) self._values = [None] * self.num_elements + cdef OracleArrowArray _finish_building_arrow_array(self): + """ + Finish building the Arrow array associated with the variable and then + return that array (after clearing it in the variable so that a new + array will be built if more rows are fetched). In thin mode, the + duplicate row handling requires the last array to be retained, so do + that here. + """ + self._last_arrow_array = BaseVarImpl._finish_building_arrow_array(self) + return self._last_arrow_array + cdef list _get_array_value(self): """ Internal method to return the value of the array. diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index 94dea92a..cc6fd842 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -41,7 +41,7 @@ cdef extern from "nanoarrow.h": int64_t null_count int64_t offset int64_t n_buffers - void (*release)(ArrowSchema *) + void (*release)(ArrowArray*) cdef struct ArrowSchema: void (*release)(ArrowSchema*) @@ -84,6 +84,7 @@ cdef class OracleArrowArray: double factor ArrowArray *arrow_array ArrowSchema *arrow_schema + void (*actual_array_release)(ArrowArray*) noexcept cdef str _schema_to_string(self) cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 9a526e60..4fd9ff56 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -118,23 +118,32 @@ cdef int _check_nanoarrow(int code) except -1: errors._raise_err(errors.ERR_ARROW_C_API_ERROR, code=code) -cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: - cdef ArrowSchema * schema = cpython.PyCapsule_GetPointer( - schema_capsule, 'arrow_schema' - ) - if schema.release != NULL: - ArrowSchemaRelease(schema) +cdef void array_deleter(ArrowArray *array) noexcept: + """ + Called when an external library calls the release for an Arrow array. This + method simply marks the release as completed but doesn't actually do it, so + that the handling of duplicate rows can still make use of the array, even + if the external library no longer requires it! + """ + array.release = NULL cdef void pycapsule_array_deleter(object array_capsule) noexcept: - cdef ArrowArray * array = cpython.PyCapsule_GetPointer( - array_capsule, 'arrow_array' + cdef ArrowArray* array = cpython.PyCapsule_GetPointer( + array_capsule, "arrow_array" ) - # Do not invoke the deleter on a used/moved capsule if array.release != NULL: ArrowArrayRelease(array) +cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: + cdef ArrowSchema* schema = cpython.PyCapsule_GetPointer( + schema_capsule, "arrow_schema" + ) + if schema.release != NULL: + ArrowSchemaRelease(schema) + + cdef class OracleArrowArray: def __cinit__(self, ArrowType arrow_type, str name, int8_t precision, @@ -179,8 +188,14 @@ cdef class OracleArrowArray: def __dealloc__(self): if self.arrow_array != NULL: + if self.arrow_array.release == NULL: + self.arrow_array.release = self.actual_array_release + if self.arrow_array.release != NULL: + ArrowArrayRelease(self.arrow_array) cpython.PyMem_Free(self.arrow_array) if self.arrow_schema != NULL: + if self.arrow_schema.release != NULL: + ArrowSchemaRelease(self.arrow_schema) cpython.PyMem_Free(self.arrow_schema) def __len__(self): @@ -265,6 +280,8 @@ cdef class OracleArrowArray: int64_t index uint8_t *ptr void* temp + if array is None: + array = self index = array.arrow_array.length - 1 if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): data_buffer = ArrowArrayBuffer(array.arrow_array, 1) @@ -383,6 +400,8 @@ cdef class OracleArrowArray: array_capsule = PyCapsule_New( self.arrow_array, 'arrow_array', &pycapsule_array_deleter ) + self.actual_array_release = self.arrow_array.release + self.arrow_array.release = array_deleter schema_capsule = PyCapsule_New( self.arrow_schema, "arrow_schema", &pycapsule_schema_deleter ) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 8be8a603..0de568b9 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -521,6 +521,10 @@ def test_8020(self): self.assertEqual(metadata, ora_col.metadata) self.assertEqual(ora_col.null_count, 1) + def test_8021(self): + "8021 - batches with size that has duplicate rows across batches" + self.__test_df_batches_interop(DATASET_4, batch_size=3, num_batches=2) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index c389b2d9..65ba8d79 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -489,6 +489,12 @@ async def test_8117(self): DATASET_2, batch_size=len(DATASET_2), num_batches=1 ) + async def test_8118(self): + "8118 - batches with size that has duplicate rows across batches" + await self.__test_df_batches_interop( + DATASET_4, batch_size=3, num_batches=2 + ) + if __name__ == "__main__": test_env.run_test_cases() From 3bc10dd1941ab8c565bb67e8cdef66335ca61771 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 15:44:03 -0600 Subject: [PATCH 012/239] Fixed bug when NUMBER data is fetched with fetch_df_all() or fetch_df_batches() that does not have precision or scale specified and oracledb.defaults.fetch_decimals is set to True. --- doc/src/release_notes.rst | 4 ++++ src/oracledb/impl/base/metadata.pyx | 2 +- tests/test_8000_dataframe.py | 13 +++++++++++++ tests/test_8100_dataframe_async.py | 13 +++++++++++++ 4 files changed, 31 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 8e0a7e4b..24320ab8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -42,6 +42,10 @@ Common Changes - eliminated memory leak if OracleDataFrame is not converted to an external data frame +#) Fixed bug when NUMBER data is fetched with + :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()` + that does not have precision or scale specified and + :attr:`defaults.fetch_decimals` is set to *True*. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index e4bda9ca..a4b3c4c0 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -62,7 +62,7 @@ cdef class OracleMetadata: uint8_t py_type_num = self._py_type_num uint32_t db_type_num = self.dbtype.num if db_type_num == DB_TYPE_NUM_NUMBER: - if py_type_num == PY_TYPE_NUM_DECIMAL: + if py_type_num == PY_TYPE_NUM_DECIMAL and self.precision > 0: self._arrow_type = NANOARROW_TYPE_DECIMAL128 elif py_type_num == PY_TYPE_NUM_STR: self._arrow_type = NANOARROW_TYPE_STRING diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 0de568b9..8c399e29 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -525,6 +525,19 @@ def test_8021(self): "8021 - batches with size that has duplicate rows across batches" self.__test_df_batches_interop(DATASET_4, batch_size=3, num_batches=2) + def test_8022(self): + "8022 - fetch_decimals without precision and scale specified" + data = [(1.0,)] + self.__check_interop() + with test_env.DefaultsContextManager("fetch_decimals", True): + ora_df = self.conn.fetch_df_all("select 1.0 from dual") + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 65ba8d79..76ea43b7 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -495,6 +495,19 @@ async def test_8118(self): DATASET_4, batch_size=3, num_batches=2 ) + async def test_8119(self): + "8119 - fetch_decimals without precision and scale specified" + data = [(1.0,)] + self.__check_interop() + with test_env.DefaultsContextManager("fetch_decimals", True): + ora_df = await self.conn.fetch_df_all("select 1.0 from dual") + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() From 9a06595df90754f8339da2bf4beb7332b40f3619 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 15:59:52 -0600 Subject: [PATCH 013/239] Remove trailing double period. --- src/oracledb/connect_params.py | 12 ++++++------ src/oracledb/connection.py | 8 ++++---- src/oracledb/pool.py | 8 ++++---- src/oracledb/pool_params.py | 8 ++++---- utils/fields.cfg | 4 ++-- 5 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index 52e2ccbf..8e5bd5eb 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -196,7 +196,7 @@ def __init__( matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used. (default: None) + hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet @@ -229,7 +229,7 @@ def __init__( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode. (default: False) + thick mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. @@ -540,7 +540,7 @@ def machine(self) -> str: def matchanytag(self) -> bool: """ Boolean specifying whether any tag can be used when acquiring a - connection from the pool. This value is only used in thick mode.. + connection from the pool. This value is only used in thick mode. """ return self._impl.matchanytag @@ -695,7 +695,7 @@ def ssl_server_cert_dn(self) -> Union[list, str]: The distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. - Otherwise the hostname will be used.. + Otherwise the hostname will be used. """ return [ d.ssl_server_cert_dn for d in self._impl.description_list.children @@ -998,7 +998,7 @@ def set( matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used. + hostname will be used - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet @@ -1029,7 +1029,7 @@ def set( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode. + thick mode - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index e7c1c635..56424ab8 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1437,7 +1437,7 @@ def connect( - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used. (default: None) + for any verfication. Otherwise the hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file @@ -1469,7 +1469,7 @@ def connect( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode. (default: False) + mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use @@ -2273,7 +2273,7 @@ def connect_async( - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used. (default: None) + for any verfication. Otherwise the hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file @@ -2305,7 +2305,7 @@ def connect_async( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode. (default: False) + mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index ea411da4..216a2ad8 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -844,7 +844,7 @@ def create_pool( - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used. (default: None) + for any verfication. Otherwise the hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file @@ -876,7 +876,7 @@ def create_pool( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode. (default: False) + mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use @@ -1376,7 +1376,7 @@ def create_pool_async( - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used. (default: None) + for any verfication. Otherwise the hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file @@ -1408,7 +1408,7 @@ def create_pool_async( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode. (default: False) + mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index 919e379b..fd9e9b2d 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -265,7 +265,7 @@ def __init__( matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used. (default: None) + hostname will be used (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet @@ -298,7 +298,7 @@ def __init__( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode. (default: False) + thick mode (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. @@ -790,7 +790,7 @@ def set( matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used. + hostname will be used - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet @@ -821,7 +821,7 @@ def set( - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode. + thick mode - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. diff --git a/utils/fields.cfg b/utils/fields.cfg index ac0f6ac7..30ea95a3 100644 --- a/utils/fields.cfg +++ b/utils/fields.cfg @@ -315,7 +315,7 @@ description = the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. - Otherwise the hostname will be used. + Otherwise the hostname will be used [wallet_location] type = str @@ -376,7 +376,7 @@ type = bool default: False description = boolean specifying whether any tag can be used when acquiring a connection - from the pool. This value is only used in thick mode. + from the pool. This value is only used in thick mode [config_dir] type = str From 86c989a896703ef71b30e199ca00d500d09901f6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:00:21 -0600 Subject: [PATCH 014/239] Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. --- doc/src/release_notes.rst | 2 ++ src/oracledb/base_impl.pxd | 1 + src/oracledb/impl/base/bind_var.pyx | 6 +++++- src/oracledb/impl/base/var.pyx | 1 + src/oracledb/impl/thick/var.pyx | 5 ++--- src/oracledb/impl/thin/messages.pyx | 1 + tests/test_1600_dml_returning.py | 16 +++++++++++++++- tests/test_5900_dml_returning_async.py | 16 +++++++++++++++- 8 files changed, 42 insertions(+), 6 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 24320ab8..774cd785 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -46,6 +46,8 @@ Common Changes :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()` that does not have precision or scale specified and :attr:`defaults.fetch_decimals` is set to *True*. +#) Fixed bug when binding a variable that was previously bound as an output + variable in a DML RETURNING statement. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index fe72f0a1..2811da5f 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -725,6 +725,7 @@ cdef class BaseVarImpl: OracleMetadata _fetch_metadata list _values OracleArrowArray _arrow_array + bint _has_returned_data bint _is_value_set cdef int _bind(self, object conn, BaseCursorImpl cursor, diff --git a/src/oracledb/impl/base/bind_var.pyx b/src/oracledb/impl/base/bind_var.pyx index 66fa802f..a3896255 100644 --- a/src/oracledb/impl/base/bind_var.pyx +++ b/src/oracledb/impl/base/bind_var.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2022, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -97,6 +97,8 @@ cdef class BindVar: if isinstance(typ, PY_TYPE_VAR): self.var = typ self.var_impl = typ._impl + if self.var_impl._has_returned_data: + self.var_impl._finalize_init() else: self._create_var_from_type(conn, cursor_impl, typ) @@ -121,6 +123,8 @@ cdef class BindVar: if value is not self.var: self.var = value self.var_impl = value._impl + if self.var_impl._has_returned_data: + self.var_impl._finalize_init() return 0 # if a variable already exists check to see if the value can be set on diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 564b5729..5565972f 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -276,6 +276,7 @@ cdef class BaseVarImpl: self.metadata._finalize_init() if self.num_elements == 0: self.num_elements = 1 + self._has_returned_data = False cdef OracleArrowArray _finish_building_arrow_array(self): """ diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 25584df0..74c7481c 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -34,7 +34,6 @@ cdef class ThickVarImpl(BaseVarImpl): dpiVar *_handle dpiData *_data StringBuffer _buf - bint _get_returned_data object _conn def __dealloc__(self): @@ -71,7 +70,7 @@ cdef class ThickVarImpl(BaseVarImpl): self._handle) < 0: _raise_from_odpi() if thick_cursor_impl._stmt_info.isReturning and not self._is_value_set: - self._get_returned_data = True + self._has_returned_data = True self._is_value_set = False cdef int _create_handle(self) except -1: @@ -180,7 +179,7 @@ cdef class ThickVarImpl(BaseVarImpl): cdef: uint32_t num_returned_rows dpiData *returned_data - if self._get_returned_data: + if self._has_returned_data: if dpiVar_getReturnedData(self._handle, pos, &num_returned_rows, &returned_data) < 0: _raise_from_odpi() diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx index 59cf5c1e..9353d34e 100644 --- a/src/oracledb/impl/thin/messages.pyx +++ b/src/oracledb/impl/thin/messages.pyx @@ -1163,6 +1163,7 @@ cdef class MessageWithData(Message): for j in range(num_rows): values[j] = self._process_column_data(buf, var_impl, j) var_impl._values[self.row_index] = values + var_impl._has_returned_data = True elif self.cursor_impl.fetching_arrow: if self._is_duplicate_data(i): var_impl._arrow_array.append_last_value( diff --git a/tests/test_1600_dml_returning.py b/tests/test_1600_dml_returning.py index 151468bf..d19f20b0 100644 --- a/tests/test_1600_dml_returning.py +++ b/tests/test_1600_dml_returning.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -540,6 +540,20 @@ def test_1623(self): with self.assertRaisesFullCode("DPY-2048"): self.cursor.execute(sql, id_val=1, str_val=str_val) + def test_1624(self): + "1624 - use bind variable in new statement after RETURNING statement" + self.cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning IntCol + 15 into :out_val" + ) + out_val = self.cursor.var(int, arraysize=5) + self.cursor.execute(sql, in_val=25, out_val=out_val) + self.assertEqual(out_val.getvalue(), [40]) + sql = "begin :out_val := :in_val + 35; end;" + self.cursor.execute(sql, in_val=35, out_val=out_val) + self.assertEqual(out_val.getvalue(), 70) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5900_dml_returning_async.py b/tests/test_5900_dml_returning_async.py index a1617c04..6603b5ca 100644 --- a/tests/test_5900_dml_returning_async.py +++ b/tests/test_5900_dml_returning_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -518,6 +518,20 @@ async def test_5921(self): await self.cursor.execute(sql, in_val=25, out_val=out_val) self.assertEqual(out_val.getvalue(), [25]) + async def test_5922(self): + "5922 - use bind variable in new statement after RETURNING statement" + await self.cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning IntCol + 15 into :out_val" + ) + out_val = self.cursor.var(int, arraysize=5) + await self.cursor.execute(sql, in_val=25, out_val=out_val) + self.assertEqual(out_val.getvalue(), [40]) + sql = "begin :out_val := :in_val + 35; end;" + await self.cursor.execute(sql, in_val=35, out_val=out_val) + self.assertEqual(out_val.getvalue(), 70) + if __name__ == "__main__": test_env.run_test_cases() From c9368a8232dd77e04d7f95c5247db1a66922686c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:01:16 -0600 Subject: [PATCH 015/239] Refactor messages.pyx into multiple files (one per message class) for ease of maintenance. --- src/oracledb/impl/thin/connection.pyx | 33 +- src/oracledb/impl/thin/messages.pyx | 3161 ----------------- src/oracledb/impl/thin/messages/auth.pyx | 454 +++ src/oracledb/impl/thin/messages/base.pyx | 1482 ++++++++ src/oracledb/impl/thin/messages/commit.pyx | 40 + src/oracledb/impl/thin/messages/connect.pyx | 140 + .../impl/thin/{ => messages}/data_types.pyx | 50 +- src/oracledb/impl/thin/messages/deq.pyx | 252 ++ .../impl/thin/messages/end_pipeline.pyx | 46 + src/oracledb/impl/thin/messages/enq.pyx | 169 + src/oracledb/impl/thin/messages/execute.pyx | 263 ++ src/oracledb/impl/thin/messages/fast_auth.pyx | 74 + src/oracledb/impl/thin/messages/fetch.pyx | 48 + src/oracledb/impl/thin/messages/lob_op.pyx | 147 + src/oracledb/impl/thin/messages/logoff.pyx | 40 + src/oracledb/impl/thin/messages/ping.pyx | 40 + src/oracledb/impl/thin/messages/protocol.pyx | 89 + src/oracledb/impl/thin/messages/rollback.pyx | 40 + .../impl/thin/messages/session_release.pyx | 53 + .../impl/thin/messages/tpc_change_state.pyx | 103 + .../impl/thin/messages/tpc_switch.pyx | 134 + src/oracledb/thin_impl.pyx | 21 +- 22 files changed, 3705 insertions(+), 3174 deletions(-) delete mode 100644 src/oracledb/impl/thin/messages.pyx create mode 100644 src/oracledb/impl/thin/messages/auth.pyx create mode 100644 src/oracledb/impl/thin/messages/base.pyx create mode 100644 src/oracledb/impl/thin/messages/commit.pyx create mode 100644 src/oracledb/impl/thin/messages/connect.pyx rename src/oracledb/impl/thin/{ => messages}/data_types.pyx (95%) create mode 100644 src/oracledb/impl/thin/messages/deq.pyx create mode 100644 src/oracledb/impl/thin/messages/end_pipeline.pyx create mode 100644 src/oracledb/impl/thin/messages/enq.pyx create mode 100644 src/oracledb/impl/thin/messages/execute.pyx create mode 100644 src/oracledb/impl/thin/messages/fast_auth.pyx create mode 100644 src/oracledb/impl/thin/messages/fetch.pyx create mode 100644 src/oracledb/impl/thin/messages/lob_op.pyx create mode 100644 src/oracledb/impl/thin/messages/logoff.pyx create mode 100644 src/oracledb/impl/thin/messages/ping.pyx create mode 100644 src/oracledb/impl/thin/messages/protocol.pyx create mode 100644 src/oracledb/impl/thin/messages/rollback.pyx create mode 100644 src/oracledb/impl/thin/messages/session_release.pyx create mode 100644 src/oracledb/impl/thin/messages/tpc_change_state.pyx create mode 100644 src/oracledb/impl/thin/messages/tpc_switch.pyx diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index 54a0a3ab..a92f8f85 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -117,6 +117,25 @@ cdef class BaseThinConnImpl(BaseConnImpl): message._initialize(self) return message + cdef AuthMessage _create_change_password_message(self, str old_password, + str new_password): + """ + Creates a change password message which is an authentication message + with different attributes set. + """ + cdef AuthMessage message + message = self._create_message(AuthMessage) + message.change_password = True + message.function_code = TNS_FUNC_AUTH_PHASE_TWO + message.user_bytes = self.username.encode() + message.user_bytes_len = len(message.user_bytes) + message.auth_mode = TNS_AUTH_MODE_WITH_PASSWORD | \ + TNS_AUTH_MODE_CHANGE_PASSWORD + message.password = old_password.encode() + message.newpassword = new_password.encode() + message.resend = False + return message + cdef TransactionChangeStateMessage _create_tpc_commit_message( self, object xid, bint one_phase ): @@ -410,10 +429,9 @@ cdef class ThinConnImpl(BaseThinConnImpl): def change_password(self, str old_password, str new_password): cdef: Protocol protocol = self._protocol - ChangePasswordMessage message - message = self._create_message(ChangePasswordMessage) - message.password = old_password.encode() - message.newpassword = new_password.encode() + Message message + message = self._create_change_password_message(old_password, + new_password) protocol._process_single_message(message) def close(self, bint in_del=False): @@ -919,10 +937,9 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): async def change_password(self, str old_password, str new_password): cdef: BaseAsyncProtocol protocol = self._protocol - ChangePasswordMessage message - message = self._create_message(ChangePasswordMessage) - message.password = old_password.encode() - message.newpassword = new_password.encode() + Message message + message = self._create_change_password_message(old_password, + new_password) await protocol._process_single_message(message) async def close(self, bint in_del=False): diff --git a/src/oracledb/impl/thin/messages.pyx b/src/oracledb/impl/thin/messages.pyx deleted file mode 100644 index 9353d34e..00000000 --- a/src/oracledb/impl/thin/messages.pyx +++ /dev/null @@ -1,3161 +0,0 @@ -#------------------------------------------------------------------------------ -# Copyright (c) 2020, 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#------------------------------------------------------------------------------ - -#------------------------------------------------------------------------------ -# messages.pyx -# -# Cython file defining the various messages that are sent to the database and -# the responses that are received by the client (embedded in thin_impl.pyx). -#------------------------------------------------------------------------------ - -@cython.freelist(20) -cdef class _OracleErrorInfo: - cdef: - uint32_t num - uint16_t cursor_id - uint64_t pos - uint64_t rowcount - str message - Rowid rowid - list batcherrors - - -cdef class Message: - cdef: - BaseThinConnImpl conn_impl - PipelineOpResultImpl pipeline_result_impl - _OracleErrorInfo error_info - uint8_t message_type - uint8_t function_code - uint32_t call_status - uint16_t end_to_end_seq_num - uint64_t token_num - bint end_of_response - bint error_occurred - bint flush_out_binds - bint resend - bint retry - object warning - - cdef int _check_and_raise_exception(self) except -1: - """ - Checks to see if an error has occurred. If one has, an error object is - created and then the appropriate exception raised. Note that if a "dead - connection" error is detected, the connection is forced closed - immediately. - """ - cdef bint is_recoverable = False - if self.error_occurred: - if self.error_info.num in ( - 28, # session has been terminated - 31, # session marked for kill - 376, # file %s cannot be read at this time - 603, # ORACLE server session terminated - 1012, # not logged on - 1033, # ORACLE initialization or shutdown in progress - 1034, # the Oracle instance is not available for use - 1089, # immediate shutdown or close in progress - 1090, # shutdown in progress - 1092, # ORACLE instance terminated - 1115, # IO error reading block from file %s (block # %s) - 2396, # exceeded maximum idle time - 3113, # end-of-file on communication channel - 3114, # not connected to ORACLE - 3135, # connection lost contact - 12153, # TNS:not connected - 12514, # Service %s is not registered with the listener - 12537, # TNS:connection closed - 12547, # TNS:lost contact - 12570, # TNS:packet reader failure - 12571, # TNS:packet writer failure - 12583, # TNS:no reader - 12757, # instance does not currently know of requested service - 16456, # missing or invalid value - ): - is_recoverable = True - error = errors._Error(self.error_info.message, - code=self.error_info.num, - offset=self.error_info.pos, - isrecoverable=is_recoverable) - if error.is_session_dead: - self.conn_impl._protocol._force_close() - raise error.exc_type(error) - - cdef int _initialize(self, BaseThinConnImpl conn_impl) except -1: - """ - Initializes the message to contain the connection and a place to store - error information. For DRCP, the status of the connection may change - after the first round-trip to the database so this information needs to - be preserved. Child classes may have their own initialization. In order - to avoid overhead using the constructor, a special hook method is used - instead. - """ - conn_impl._protocol._read_buf._check_connected() - self.conn_impl = conn_impl - self.message_type = TNS_MSG_TYPE_FUNCTION - self.error_info = _OracleErrorInfo.__new__(_OracleErrorInfo) - self._initialize_hook() - - cdef int _initialize_hook(self) except -1: - """ - A hook that is used by subclasses to perform any necessary - initialization specific to that class. - """ - pass - - cdef int _process_error_info(self, ReadBuffer buf) except -1: - cdef: - uint32_t num_bytes, i, offset, num_offsets - _OracleErrorInfo info = self.error_info - uint16_t temp16, num_errors, error_code - uint8_t first_byte, flags - int16_t error_pos - str error_msg - buf.read_ub4(&self.call_status) # end of call status - buf.skip_ub2() # end to end seq# - buf.skip_ub4() # current row number - buf.skip_ub2() # error number - buf.skip_ub2() # array elem error - buf.skip_ub2() # array elem error - buf.read_ub2(&info.cursor_id) # cursor id - buf.read_sb2(&error_pos) # error position - buf.skip_ub1() # sql type (19c and earlier) - buf.skip_ub1() # fatal? - buf.skip_ub1() # flags - buf.skip_ub1() # user cursor options - buf.skip_ub1() # UPI parameter - buf.read_ub1(&flags) - if flags & 0x20: - self.warning = errors._create_warning(errors.WRN_COMPILATION_ERROR) - buf.read_rowid(&info.rowid) # rowid - buf.skip_ub4() # OS error - buf.skip_ub1() # statement number - buf.skip_ub1() # call number - buf.skip_ub2() # padding - buf.skip_ub4() # success iters - buf.read_ub4(&num_bytes) # oerrdd (logical rowid) - if num_bytes > 0: - buf.skip_raw_bytes_chunked() - - # batch error codes - buf.read_ub2(&num_errors) # batch error codes array - if num_errors > 0: - info.batcherrors = [] - buf.read_ub1(&first_byte) - for i in range(num_errors): - if first_byte == TNS_LONG_LENGTH_INDICATOR: - buf.skip_ub4() # chunk length ignored - buf.read_ub2(&error_code) - info.batcherrors.append(errors._Error(code=error_code)) - if first_byte == TNS_LONG_LENGTH_INDICATOR: - buf.skip_raw_bytes(1) # ignore end marker - - # batch error offsets - buf.read_ub4(&num_offsets) # batch error row offset array - if num_offsets > 0: - if num_offsets > 65535: - errors._raise_err(errors.ERR_TOO_MANY_BATCH_ERRORS) - buf.read_ub1(&first_byte) - for i in range(num_offsets): - if first_byte == TNS_LONG_LENGTH_INDICATOR: - buf.skip_ub4() # chunk length ignored - buf.read_ub4(&offset) - if i < num_errors: - info.batcherrors[i].offset = offset - if first_byte == TNS_LONG_LENGTH_INDICATOR: - buf.skip_raw_bytes(1) # ignore end marker - - # batch error messages - buf.read_ub2(&temp16) # batch error messages array - if temp16 > 0: - buf.skip_raw_bytes(1) # ignore packed size - for i in range(temp16): - buf.skip_ub2() # skip chunk length - info.batcherrors[i].message = \ - buf.read_str(CS_FORM_IMPLICIT).rstrip() - info.batcherrors[i]._make_adjustments() - buf.skip_raw_bytes(2) # ignore end marker - - buf.read_ub4(&info.num) # error number (extended) - buf.read_ub8(&info.rowcount) # row number (extended) - - # fields added in Oracle Database 20c - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: - buf.skip_ub4() # sql type - buf.skip_ub4() # server checksum - - # error message - if info.num != 0: - self.error_occurred = True - if error_pos > 0: - info.pos = error_pos - info.message = buf.read_str(CS_FORM_IMPLICIT).rstrip() - - # an error message marks the end of a response if no explicit end of - # response is available - if not buf._caps.supports_end_of_response: - self.end_of_response = True - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - cdef uint64_t token_num - if message_type == TNS_MSG_TYPE_ERROR: - self._process_error_info(buf) - elif message_type == TNS_MSG_TYPE_WARNING: - self._process_warning_info(buf) - elif message_type == TNS_MSG_TYPE_TOKEN: - buf.read_ub8(&token_num) - if token_num != self.token_num: - errors._raise_err(errors.ERR_MISMATCHED_TOKEN, - token_num=token_num, - expected_token_num=self.token_num) - elif message_type == TNS_MSG_TYPE_STATUS: - buf.read_ub4(&self.call_status) - buf.read_ub2(&self.end_to_end_seq_num) - if not buf._caps.supports_end_of_response: - self.end_of_response = True - elif message_type == TNS_MSG_TYPE_PARAMETER: - self._process_return_parameters(buf) - elif message_type == TNS_MSG_TYPE_SERVER_SIDE_PIGGYBACK: - self._process_server_side_piggyback(buf) - elif message_type == TNS_MSG_TYPE_END_OF_RESPONSE: - self.end_of_response = True - else: - errors._raise_err(errors.ERR_MESSAGE_TYPE_UNKNOWN, - message_type=message_type, - position=buf._pos - 1) - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - raise NotImplementedError() - - cdef int _process_server_side_piggyback(self, ReadBuffer buf) except -1: - cdef: - uint16_t num_elements, i, temp16 - uint32_t num_bytes, flags - uint8_t opcode - buf.read_ub1(&opcode) - if opcode == TNS_SERVER_PIGGYBACK_LTXID: - buf.read_ub4(&num_bytes) - if num_bytes > 0: - self.conn_impl._ltxid = buf.read_bytes() - elif opcode == TNS_SERVER_PIGGYBACK_QUERY_CACHE_INVALIDATION \ - or opcode == TNS_SERVER_PIGGYBACK_TRACE_EVENT: - pass - elif opcode == TNS_SERVER_PIGGYBACK_OS_PID_MTS: - buf.read_ub2(&temp16) - buf.skip_raw_bytes_chunked() - elif opcode == TNS_SERVER_PIGGYBACK_SYNC: - buf.skip_ub2() # skip number of DTYs - buf.skip_ub1() # skip length of DTYs - buf.read_ub2(&num_elements) - buf.skip_ub1() # skip length - for i in range(num_elements): - buf.read_ub2(&temp16) - if temp16 > 0: # skip key - buf.skip_raw_bytes_chunked() - buf.read_ub2(&temp16) - if temp16 > 0: # skip value - buf.skip_raw_bytes_chunked() - buf.skip_ub2() # skip flags - buf.skip_ub4() # skip overall flags - elif opcode == TNS_SERVER_PIGGYBACK_EXT_SYNC: - buf.skip_ub2() # skip number of DTYs - buf.skip_ub1() # skip length of DTYs - elif opcode == TNS_SERVER_PIGGYBACK_AC_REPLAY_CONTEXT: - buf.skip_ub2() # skip number of DTYs - buf.skip_ub1() # skip length of DTYs - buf.skip_ub4() # skip flags - buf.skip_ub4() # skip error code - buf.skip_ub1() # skip queue - buf.read_ub4(&num_bytes) # skip replay context - if num_bytes > 0: - buf.skip_raw_bytes_chunked() - elif opcode == TNS_SERVER_PIGGYBACK_SESS_RET: - buf.skip_ub2() - buf.skip_ub1() - buf.read_ub2(&num_elements) - if num_elements > 0: - buf.skip_ub1() - for i in range(num_elements): - buf.read_ub2(&temp16) - if temp16 > 0: # skip key - buf.skip_raw_bytes_chunked() - buf.read_ub2(&temp16) - if temp16 > 0: # skip value - buf.skip_raw_bytes_chunked() - buf.skip_ub2() # skip flags - buf.read_ub4(&flags) # session flags - if flags & TNS_SESSGET_SESSION_CHANGED: - if self.conn_impl._drcp_establish_session: - self.conn_impl._statement_cache.clear_open_cursors() - self.conn_impl._drcp_establish_session = False - buf.read_ub4(&self.conn_impl._session_id) - buf.read_ub2(&self.conn_impl._serial_num) - else: - errors._raise_err(errors.ERR_UNKNOWN_SERVER_PIGGYBACK, - opcode=opcode) - - cdef int _process_warning_info(self, ReadBuffer buf) except -1: - cdef: - uint16_t num_bytes, error_num - str message - buf.read_ub2(&error_num) # error number - buf.read_ub2(&num_bytes) # length of error message - buf.skip_ub2() # flags - if error_num != 0 and num_bytes > 0: - message = buf.read_str(CS_FORM_IMPLICIT).rstrip() - self.warning = errors._Error(message, code=error_num, - iswarning=True) - - cdef int _write_begin_pipeline_piggyback(self, WriteBuffer buf) except -1: - """ - Writes the piggyback to the server that informs the server that a - pipeline is beginning. - """ - buf._data_flags |= TNS_DATA_FLAGS_BEGIN_PIPELINE - self._write_piggyback_code(buf, TNS_FUNC_PIPELINE_BEGIN) - buf.write_ub2(0) # error set ID - buf.write_uint8(0) # error set mode - buf.write_uint8(self.conn_impl.pipeline_mode) - - cdef int _write_close_cursors_piggyback(self, WriteBuffer buf) except -1: - """ - Writes the piggyback that informs the server of the cursors that can be - closed. - """ - self._write_piggyback_code(buf, TNS_FUNC_CLOSE_CURSORS) - buf.write_uint8(1) # pointer - self.conn_impl._statement_cache.write_cursors_to_close(buf) - - cdef int _write_current_schema_piggyback(self, WriteBuffer buf) except -1: - """ - Writes the piggyback that informs the server that a new current schema - is desired. - """ - cdef bytes schema_bytes - self._write_piggyback_code(buf, TNS_FUNC_SET_SCHEMA) - buf.write_uint8(1) # pointer - schema_bytes = self.conn_impl._current_schema.encode() - buf.write_ub4(len(schema_bytes)) - buf.write_bytes_with_length(schema_bytes) - - cdef int _write_close_temp_lobs_piggyback(self, - WriteBuffer buf) except -1: - """ - Writes the piggyback that informs the server of the temporary LOBs that - can be closed. - """ - cdef: - list lobs_to_close = self.conn_impl._temp_lobs_to_close - uint64_t total_size = 0 - self._write_piggyback_code(buf, TNS_FUNC_LOB_OP) - op_code = TNS_LOB_OP_FREE_TEMP | TNS_LOB_OP_ARRAY - - # temp lob data - buf.write_uint8(1) # pointer - buf.write_ub4(self.conn_impl._temp_lobs_total_size) - buf.write_uint8(0) # dest lob locator - buf.write_ub4(0) - buf.write_ub4(0) # source lob locator - buf.write_ub4(0) - buf.write_uint8(0) # source lob offset - buf.write_uint8(0) # dest lob offset - buf.write_uint8(0) # charset - buf.write_ub4(op_code) - buf.write_uint8(0) # scn - buf.write_ub4(0) # losbscn - buf.write_ub8(0) # lobscnl - buf.write_ub8(0) - buf.write_uint8(0) - - # array lob fields - buf.write_uint8(0) - buf.write_ub4(0) - buf.write_uint8(0) - buf.write_ub4(0) - buf.write_uint8(0) - buf.write_ub4(0) - for i in range(len(lobs_to_close)): - buf.write_bytes(lobs_to_close[i]) - - # reset values - self.conn_impl._temp_lobs_to_close = None - self.conn_impl._temp_lobs_total_size = 0 - - cdef int _write_end_to_end_piggyback(self, WriteBuffer buf) except -1: - """ - Writes the piggyback that informs the server of end-to-end attributes - that are being changed. - """ - cdef: - bytes action_bytes, client_identifier_bytes, client_info_bytes - BaseThinConnImpl conn_impl = self.conn_impl - bytes module_bytes, dbop_bytes - uint32_t flags = 0 - - # determine which flags to send - if conn_impl._action_modified: - flags |= TNS_END_TO_END_ACTION - if conn_impl._client_identifier_modified: - flags |= TNS_END_TO_END_CLIENT_IDENTIFIER - if conn_impl._client_info_modified: - flags |= TNS_END_TO_END_CLIENT_INFO - if conn_impl._module_modified: - flags |= TNS_END_TO_END_MODULE - if conn_impl._dbop_modified: - flags |= TNS_END_TO_END_DBOP - - # write initial packet data - self._write_piggyback_code(buf, TNS_FUNC_SET_END_TO_END_ATTR) - buf.write_uint8(0) # pointer (cidnam) - buf.write_uint8(0) # pointer (cidser) - buf.write_ub4(flags) - - # write client identifier header info - if conn_impl._client_identifier_modified: - buf.write_uint8(1) # pointer (client identifier) - if conn_impl._client_identifier is None: - buf.write_ub4(0) - else: - client_identifier_bytes = conn_impl._client_identifier.encode() - buf.write_ub4(len(client_identifier_bytes)) - else: - buf.write_uint8(0) # pointer (client identifier) - buf.write_ub4(0) # length of client identifier - - # write module header info - if conn_impl._module_modified: - buf.write_uint8(1) # pointer (module) - if conn_impl._module is None: - buf.write_ub4(0) - else: - module_bytes = conn_impl._module.encode() - buf.write_ub4(len(module_bytes)) - else: - buf.write_uint8(0) # pointer (module) - buf.write_ub4(0) # length of module - - # write action header info - if conn_impl._action_modified: - buf.write_uint8(1) # pointer (action) - if conn_impl._action is None: - buf.write_ub4(0) - else: - action_bytes = conn_impl._action.encode() - buf.write_ub4(len(action_bytes)) - else: - buf.write_uint8(0) # pointer (action) - buf.write_ub4(0) # length of action - - # write unsupported bits - buf.write_uint8(0) # pointer (cideci) - buf.write_ub4(0) # length (cideci) - buf.write_uint8(0) # cidcct - buf.write_ub4(0) # cidecs - - # write client info header info - if conn_impl._client_info_modified: - buf.write_uint8(1) # pointer (client info) - if conn_impl._client_info is None: - buf.write_ub4(0) - else: - client_info_bytes = conn_impl._client_info.encode() - buf.write_ub4(len(client_info_bytes)) - else: - buf.write_uint8(0) # pointer (client info) - buf.write_ub4(0) # length of client info - - # write more unsupported bits - buf.write_uint8(0) # pointer (cidkstk) - buf.write_ub4(0) # length (cidkstk) - buf.write_uint8(0) # pointer (cidktgt) - buf.write_ub4(0) # length (cidktgt) - - # write dbop header info - if conn_impl._dbop_modified: - buf.write_uint8(1) # pointer (dbop) - if conn_impl._dbop is None: - buf.write_ub4(0) - else: - dbop_bytes = conn_impl._dbop.encode() - buf.write_ub4(len(dbop_bytes)) - else: - buf.write_uint8(0) # pointer (dbop) - buf.write_ub4(0) # length of dbop - - # write strings - if conn_impl._client_identifier_modified \ - and conn_impl._client_identifier is not None: - buf.write_bytes_with_length(client_identifier_bytes) - if conn_impl._module_modified and conn_impl._module is not None: - buf.write_bytes_with_length(module_bytes) - if conn_impl._action_modified and conn_impl._action is not None: - buf.write_bytes_with_length(action_bytes) - if conn_impl._client_info_modified \ - and conn_impl._client_info is not None: - buf.write_bytes_with_length(client_info_bytes) - if conn_impl._dbop_modified and conn_impl._dbop is not None: - buf.write_bytes_with_length(dbop_bytes) - - # reset flags and values - conn_impl._action_modified = False - conn_impl._action = None - conn_impl._client_identifier_modified = False - conn_impl._client_identifier = None - conn_impl._client_info_modified = False - conn_impl._client_info = None - conn_impl._dbop_modified = False - conn_impl._dbop = None - conn_impl._module_modified = False - conn_impl._module = None - - cdef int _write_function_code(self, WriteBuffer buf) except -1: - self._write_piggybacks(buf) - buf.write_uint8(self.message_type) - buf.write_uint8(self.function_code) - buf.write_seq_num() - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_1: - buf.write_ub8(self.token_num) - - cdef int _write_message(self, WriteBuffer buf) except -1: - self._write_function_code(buf) - - cdef int _write_piggyback_code(self, WriteBuffer buf, - uint8_t code) except -1: - """ - Writes the header for piggybacks for the specified function code. - """ - buf.write_uint8(TNS_MSG_TYPE_PIGGYBACK) - buf.write_uint8(code) - buf.write_seq_num() - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_1: - buf.write_ub8(self.token_num) - - cdef int _write_piggybacks(self, WriteBuffer buf) except -1: - """ - Writes all of the piggybacks to the server. - """ - if self.conn_impl.pipeline_mode != 0: - self._write_begin_pipeline_piggyback(buf) - self.conn_impl.pipeline_mode = 0 - if self.conn_impl._current_schema_modified: - self._write_current_schema_piggyback(buf) - if self.conn_impl._statement_cache is not None \ - and self.conn_impl._statement_cache._num_cursors_to_close > 0 \ - and not self.conn_impl._drcp_establish_session: - self._write_close_cursors_piggyback(buf) - if self.conn_impl._action_modified \ - or self.conn_impl._client_identifier_modified \ - or self.conn_impl._client_info_modified \ - or self.conn_impl._dbop_modified \ - or self.conn_impl._module_modified: - self._write_end_to_end_piggyback(buf) - if self.conn_impl._temp_lobs_total_size > 0: - self._write_close_temp_lobs_piggyback(buf) - if self.conn_impl._session_state_desired != 0: - self._write_session_state_piggyback(buf) - - cdef int _write_session_state_piggyback(self, WriteBuffer buf) except -1: - """ - Write the session state piggyback. This is used to let the database - know when the client is beginning and ending a request. The database - uses this information to optimise its resources. - """ - cdef uint8_t state = self.conn_impl._session_state_desired - self._write_piggyback_code(buf, TNS_FUNC_SESSION_STATE) - buf.write_ub8(state | TNS_SESSION_STATE_EXPLICIT_BOUNDARY) - self.conn_impl._session_state_desired = 0 - - cdef int postprocess(self) except -1: - pass - - async def postprocess_async(self): - pass - - cdef int preprocess(self) except -1: - pass - - cdef int process(self, ReadBuffer buf) except -1: - cdef uint8_t message_type - self.end_of_response = False - self.flush_out_binds = False - while not self.end_of_response: - buf.save_point() - buf.read_ub1(&message_type) - self._process_message(buf, message_type) - - cdef int send(self, WriteBuffer buf) except -1: - buf.start_request(TNS_PACKET_TYPE_DATA) - self._write_message(buf) - if self.pipeline_result_impl is not None: - buf._data_flags |= TNS_DATA_FLAGS_END_OF_REQUEST - buf.end_request() - - -cdef class MessageWithData(Message): - cdef: - BaseThinDbObjectTypeCache type_cache - BaseThinCursorImpl cursor_impl - array.array bit_vector_buf - const char_type *bit_vector - bint arraydmlrowcounts - uint32_t row_index - uint32_t num_execs - uint16_t num_columns_sent - list dmlrowcounts - bint batcherrors - list out_var_impls - bint in_fetch - bint parse_only - object cursor - uint32_t offset - - cdef int _adjust_metadata(self, ThinVarImpl prev_var_impl, - OracleMetadata metadata) except -1: - """ - When a query is re-executed but the data type of a column has changed - the server returns the type information of the new type. However, if - the data type returned now is a CLOB or BLOB and the data type - previously returned was CHAR/VARCHAR/RAW (or the equivalent long - types), then the server returns the data as LONG (RAW), similarly to - what happens when a define is done to return CLOB/BLOB as string/bytes. - Detect these situations and adjust the fetch type appropriately. - """ - cdef uint8_t type_num, prev_type_num, csfrm - type_num = metadata.dbtype._ora_type_num - prev_type_num = prev_var_impl._fetch_metadata.dbtype._ora_type_num - if type_num == ORA_TYPE_NUM_CLOB \ - and prev_type_num in (ORA_TYPE_NUM_CHAR, - ORA_TYPE_NUM_LONG, - ORA_TYPE_NUM_VARCHAR): - type_num = ORA_TYPE_NUM_LONG - csfrm = prev_var_impl._fetch_metadata.dbtype._csfrm - metadata.dbtype = DbType._from_ora_type_and_csfrm(type_num, csfrm) - elif type_num == ORA_TYPE_NUM_BLOB \ - and prev_type_num in (ORA_TYPE_NUM_RAW, ORA_TYPE_NUM_LONG_RAW): - type_num = ORA_TYPE_NUM_LONG_RAW - metadata.dbtype = DbType._from_ora_type_and_csfrm(type_num, 0) - - cdef object _create_cursor_from_describe(self, ReadBuffer buf, - object cursor=None): - cdef BaseThinCursorImpl cursor_impl - if cursor is None: - cursor = self.cursor.connection.cursor() - cursor_impl = cursor._impl - cursor_impl._statement = self.conn_impl._get_statement() - cursor_impl._more_rows_to_fetch = True - cursor_impl._statement._is_query = True - self._process_describe_info(buf, cursor, cursor_impl) - return cursor - - cdef int _get_bit_vector(self, ReadBuffer buf, - ssize_t num_bytes) except -1: - """ - Gets the bit vector from the buffer and stores it for later use by the - row processing code. Since it is possible that the packet buffer may be - overwritten by subsequent packet retrieval, the bit vector must be - copied. An array is stored and a pointer to the underlying memory is - used for performance reasons. - """ - cdef const char_type *ptr = buf.read_raw_bytes(num_bytes) - if self.bit_vector_buf is None: - self.bit_vector_buf = array.array('B') - array.resize(self.bit_vector_buf, num_bytes) - self.bit_vector = self.bit_vector_buf.data.as_chars - memcpy( self.bit_vector, ptr, num_bytes) - - cdef bint _is_duplicate_data(self, uint32_t column_num): - """ - Returns a boolean indicating if the given column contains data - duplicated from the previous row. When duplicate data exists, the - server sends a bit vector. Bits that are set indicate that data is sent - with the row data; bits that are not set indicate that data should be - duplicated from the previous row. - """ - cdef int byte_num, bit_num - if self.bit_vector == NULL: - return False - byte_num = column_num // 8 - bit_num = column_num % 8 - return self.bit_vector[byte_num] & (1 << bit_num) == 0 - - cdef int _write_bind_params(self, WriteBuffer buf, list params) except -1: - cdef: - bint has_data = False - list bind_var_impls - BindInfo bind_info - bind_var_impls = [] - for bind_info in params: - if not bind_info._is_return_bind: - has_data = True - bind_var_impls.append(bind_info._bind_var_impl) - self._write_column_metadata(buf, bind_var_impls) - - # write parameter values unless statement contains only returning binds - if has_data: - for i in range(self.num_execs): - buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) - self._write_bind_params_row(buf, params, i) - - cdef int _preprocess_query(self) except -1: - """ - Actions that takes place before query data is processed. - """ - cdef: - BaseThinCursorImpl cursor_impl = self.cursor_impl - Statement statement = cursor_impl._statement - object type_handler, conn - ThinVarImpl var_impl - ssize_t i, num_vals - bint uses_metadata - - # set values to indicate the start of a new fetch operation - self.in_fetch = True - cursor_impl._more_rows_to_fetch = True - cursor_impl._buffer_rowcount = cursor_impl._buffer_index = 0 - self.row_index = 0 - - # if no fetch variables exist, nothing further to do at this point; the - # processing that follows will take the metadata returned by the server - # and use it to create new fetch variables - if statement._fetch_var_impls is None: - return 0 - - # if the type handler set on the cursor or connection does not match - # the one that was used during the last fetch, rebuild the fetch - # variables in order to take the new type handler into account - conn = self.cursor.connection - type_handler = cursor_impl._get_output_type_handler(&uses_metadata) - if type_handler is not statement._last_output_type_handler: - for i, var_impl in enumerate(cursor_impl.fetch_var_impls): - cursor_impl._create_fetch_var(conn, self.cursor, type_handler, - uses_metadata, i, - var_impl._fetch_metadata) - statement._last_output_type_handler = type_handler - - # Create OracleArrowArray if fetching arrow is enabled - if cursor_impl.fetching_arrow: - cursor_impl._create_arrow_arrays() - - # the list of output variables is equivalent to the fetch variables - self.out_var_impls = cursor_impl.fetch_var_impls - - cdef int _process_bit_vector(self, ReadBuffer buf) except -1: - cdef ssize_t num_bytes - buf.read_ub2(&self.num_columns_sent) - num_bytes = self.cursor_impl._num_columns // 8 - if self.cursor_impl._num_columns % 8 > 0: - num_bytes += 1 - self._get_bit_vector(buf, num_bytes) - - cdef object _process_column_data(self, ReadBuffer buf, - ThinVarImpl var_impl, uint32_t pos): - cdef: - uint8_t num_bytes, ora_type_num, csfrm - ThinDbObjectTypeImpl typ_impl - BaseThinCursorImpl cursor_impl - object column_value = None - ThinDbObjectImpl obj_impl - int32_t actual_num_bytes - OracleMetadata metadata - OracleData data - Rowid rowid - if self.in_fetch: - metadata = var_impl._fetch_metadata - else: - metadata = var_impl.metadata - ora_type_num = metadata.dbtype._ora_type_num - csfrm = metadata.dbtype._csfrm - if var_impl.bypass_decode: - ora_type_num = ORA_TYPE_NUM_RAW - if metadata.buffer_size == 0 and self.in_fetch \ - and ora_type_num not in (ORA_TYPE_NUM_LONG, - ORA_TYPE_NUM_LONG_RAW, - ORA_TYPE_NUM_UROWID): - column_value = None # column is null by describe - elif ora_type_num == ORA_TYPE_NUM_ROWID: - if not self.in_fetch: - column_value = buf.read_str(CS_FORM_IMPLICIT) - else: - buf.read_ub1(&num_bytes) - if num_bytes == 0 or num_bytes == TNS_NULL_LENGTH_INDICATOR: - column_value = None - else: - buf.read_rowid(&rowid) - column_value = _encode_rowid(&rowid) - elif ora_type_num == ORA_TYPE_NUM_UROWID: - if not self.in_fetch: - column_value = buf.read_str(CS_FORM_IMPLICIT) - else: - column_value = buf.read_urowid() - elif ora_type_num == ORA_TYPE_NUM_CURSOR: - buf.skip_ub1() # length (fixed value) - if not self.in_fetch: - column_value = var_impl._values[pos] - column_value = self._create_cursor_from_describe(buf, column_value) - cursor_impl = column_value._impl - buf.read_ub2(&cursor_impl._statement._cursor_id) - elif ora_type_num in (ORA_TYPE_NUM_CLOB, - ORA_TYPE_NUM_BLOB, - ORA_TYPE_NUM_BFILE): - column_value = buf.read_lob_with_length(self.conn_impl, - metadata.dbtype) - elif ora_type_num == ORA_TYPE_NUM_JSON: - column_value = buf.read_oson() - elif ora_type_num == ORA_TYPE_NUM_VECTOR: - column_value = buf.read_vector() - elif ora_type_num == ORA_TYPE_NUM_OBJECT: - typ_impl = metadata.objtype - if typ_impl is None: - column_value = buf.read_xmltype(self.conn_impl) - else: - obj_impl = buf.read_dbobject(typ_impl) - if obj_impl is not None: - if not self.in_fetch: - column_value = var_impl._values[pos] - if column_value is not None: - column_value._impl = obj_impl - else: - column_value = PY_TYPE_DB_OBJECT._from_impl(obj_impl) - else: - buf.read_oracle_data(metadata, &data, from_dbobject=False) - if metadata.dbtype._csfrm == CS_FORM_NCHAR: - buf._caps._check_ncharset_id() - if self.cursor_impl.fetching_arrow: - convert_oracle_data_to_arrow( - metadata, var_impl.metadata, &data, var_impl._arrow_array - ) - else: - column_value = convert_oracle_data_to_python( - metadata, var_impl.metadata, &data, - var_impl._encoding_errors, from_dbobject=False - ) - if not self.in_fetch: - buf.read_sb4(&actual_num_bytes) - if actual_num_bytes < 0 and ora_type_num == ORA_TYPE_NUM_BOOLEAN: - column_value = None - elif actual_num_bytes != 0 and column_value is not None: - unit_type = "bytes" if isinstance(column_value, bytes) \ - else "characters" - errors._raise_err(errors.ERR_COLUMN_TRUNCATED, - col_value_len=len(column_value), - unit=unit_type, actual_len=actual_num_bytes) - elif ora_type_num == ORA_TYPE_NUM_LONG \ - or ora_type_num == ORA_TYPE_NUM_LONG_RAW: - buf.skip_sb4() # null indicator - buf.skip_ub4() # return code - return column_value - - cdef OracleMetadata _process_column_info(self, ReadBuffer buf, - BaseThinCursorImpl cursor_impl): - cdef: - uint32_t num_bytes, uds_flags, num_annotations, i - ThinDbObjectTypeImpl typ_impl - str schema, name, key, value - uint8_t ora_type_num, csfrm - OracleMetadata metadata - uint8_t nulls_allowed - int cache_num - bytes oid - buf.read_ub1(&ora_type_num) - metadata = OracleMetadata.__new__(OracleMetadata) - buf.skip_ub1() # flags - buf.read_sb1(&metadata.precision) - buf.read_sb1(&metadata.scale) - buf.read_ub4(&metadata.buffer_size) - buf.skip_ub4() # max number of array elements - buf.skip_ub8() # cont flags - buf.read_ub4(&num_bytes) # OID - if num_bytes > 0: - oid = buf.read_bytes() - buf.skip_ub2() # version - buf.skip_ub2() # character set id - buf.read_ub1(&csfrm) # character set form - metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) - buf.read_ub4(&metadata.max_size) - if ora_type_num == ORA_TYPE_NUM_RAW: - metadata.max_size = metadata.buffer_size - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: - buf.skip_ub4() # oaccolid - buf.read_ub1(&nulls_allowed) - metadata.nulls_allowed = nulls_allowed - buf.skip_ub1() # v7 length of name - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.name = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - name = buf.read_str(CS_FORM_IMPLICIT) - buf.skip_ub2() # column position - buf.read_ub4(&uds_flags) - metadata.is_json = uds_flags & TNS_UDS_FLAGS_IS_JSON - metadata.is_oson = uds_flags & TNS_UDS_FLAGS_IS_OSON - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_name = buf.read_str(CS_FORM_IMPLICIT) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_3: - buf.read_ub4(&num_annotations) - if num_annotations > 0: - buf.skip_ub1() - metadata.annotations = {} - buf.read_ub4(&num_annotations) - buf.skip_ub1() - for i in range(num_annotations): - buf.skip_ub4() # length of key - key = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - value = buf.read_str(CS_FORM_IMPLICIT) - else: - value = "" - metadata.annotations[key] = value - buf.skip_ub4() # flags - buf.skip_ub4() # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_4: - buf.read_ub4(&metadata.vector_dimensions) - buf.read_ub1(&metadata.vector_format) - buf.read_ub1(&metadata.vector_flags) - if ora_type_num == ORA_TYPE_NUM_OBJECT: - if self.type_cache is None: - cache_num = self.conn_impl._dbobject_type_cache_num - self.type_cache = get_dbobject_type_cache(cache_num) - typ_impl = self.type_cache.get_type_for_info(oid, schema, None, - name) - if typ_impl.is_xml_type: - metadata.dbtype = DB_TYPE_XMLTYPE - else: - metadata.objtype = typ_impl - return metadata - - cdef int _process_describe_info(self, ReadBuffer buf, - object cursor, - BaseThinCursorImpl cursor_impl) except -1: - cdef: - Statement stmt = cursor_impl._statement - list prev_fetch_var_impls - object type_handler, conn - OracleMetadata metadata - uint32_t num_bytes, i - bint uses_metadata - str message - buf.skip_ub4() # max row size - buf.read_ub4(&cursor_impl._num_columns) - prev_fetch_var_impls = stmt._fetch_var_impls - cursor_impl._init_fetch_vars(cursor_impl._num_columns) - if cursor_impl._num_columns > 0: - buf.skip_ub1() - type_handler = cursor_impl._get_output_type_handler(&uses_metadata) - conn = self.cursor.connection - for i in range(cursor_impl._num_columns): - metadata = self._process_column_info(buf, cursor_impl) - if prev_fetch_var_impls is not None \ - and i < len(prev_fetch_var_impls): - self._adjust_metadata(prev_fetch_var_impls[i], metadata) - if metadata.dbtype._ora_type_num in (ORA_TYPE_NUM_BLOB, - ORA_TYPE_NUM_CLOB, - ORA_TYPE_NUM_JSON, - ORA_TYPE_NUM_VECTOR): - stmt._requires_define = True - stmt._no_prefetch = True - cursor_impl._create_fetch_var(conn, self.cursor, type_handler, - uses_metadata, i, metadata) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.skip_raw_bytes_chunked() # current date - buf.skip_ub4() # dcbflag - buf.skip_ub4() # dcbmdbz - buf.skip_ub4() # dcbmnpr - buf.skip_ub4() # dcbmxpr - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.skip_raw_bytes_chunked() # dcbqcky - stmt._fetch_metadata = cursor_impl.fetch_metadata - stmt._fetch_vars = cursor_impl.fetch_vars - stmt._fetch_var_impls = cursor_impl.fetch_var_impls - stmt._num_columns = cursor_impl._num_columns - stmt._last_output_type_handler = type_handler - - cdef int _process_error_info(self, ReadBuffer buf) except -1: - cdef: - BaseThinCursorImpl cursor_impl = self.cursor_impl - BaseThinConnImpl conn_impl = self.conn_impl - object exc_type - Message._process_error_info(self, buf) - if self.error_info.cursor_id != 0: - cursor_impl._statement._cursor_id = self.error_info.cursor_id - if not cursor_impl._statement._is_plsql and not self.in_fetch: - cursor_impl.rowcount = self.error_info.rowcount - elif self.in_fetch and self.row_index > 0: - cursor_impl._statement._requires_define = False - cursor_impl._lastrowid = self.error_info.rowid - cursor_impl._batcherrors = self.error_info.batcherrors - if self.batcherrors and cursor_impl._batcherrors is None: - cursor_impl._batcherrors = [] - if self.error_info.num == TNS_ERR_NO_DATA_FOUND and self.in_fetch: - self.error_info.num = 0 - cursor_impl._more_rows_to_fetch = False - cursor_impl._last_row_index = 0 - cursor_impl._statement._requires_define = False - self.error_occurred = False - elif self.error_info.num == TNS_ERR_ARRAY_DML_ERRORS: - self.error_info.num = 0 - self.error_occurred = False - elif self.retry: - self.retry = False - elif cursor_impl._statement._is_query \ - and self.error_info.num in (TNS_ERR_VAR_NOT_IN_SELECT_LIST, - TNS_ERR_INCONSISTENT_DATA_TYPES): - self.retry = True - conn_impl._statement_cache.clear_cursor(cursor_impl._statement) - elif self.error_info.num != 0 and self.error_info.cursor_id != 0: - if self.error_info.num not in errors.ERR_INTEGRITY_ERROR_CODES: - conn_impl._statement_cache.clear_cursor(cursor_impl._statement) - - cdef int _process_implicit_result(self, ReadBuffer buf) except -1: - cdef: - BaseThinCursorImpl child_cursor_impl - uint32_t i, num_results - object child_cursor - uint8_t num_bytes - self.cursor_impl._implicit_resultsets = [] - buf.read_ub4(&num_results) - for i in range(num_results): - buf.read_ub1(&num_bytes) - buf.skip_raw_bytes(num_bytes) - child_cursor = self._create_cursor_from_describe(buf) - child_cursor_impl = child_cursor._impl - buf.read_ub2(&child_cursor_impl._statement._cursor_id) - self.cursor_impl._implicit_resultsets.append(child_cursor) - - cdef int _process_io_vector(self, ReadBuffer buf) except -1: - """ - An I/O vector is sent by the database in response to a PL/SQL execute. - It indicates whether binds are IN only, IN/OUT or OUT only. - """ - cdef: - uint16_t i, num_bytes, temp16 - uint32_t temp32, num_binds - BindInfo bind_info - buf.skip_ub1() # flag - buf.read_ub2(&temp16) # num requests - buf.read_ub4(&temp32) # num iters - num_binds = temp32 * 256 + temp16 - buf.skip_ub4() # num iters this time - buf.skip_ub2() # uac buffer length - buf.read_ub2(&num_bytes) # bit vector for fast fetch - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub2(&num_bytes) # rowid - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - self.out_var_impls = [] - for i in range(num_binds): # bind directions - bind_info = self.cursor_impl._statement._bind_info_list[i] - buf.read_ub1(&bind_info.bind_dir) - if bind_info.bind_dir == TNS_BIND_DIR_INPUT: - continue - self.out_var_impls.append(bind_info._bind_var_impl) - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - if message_type == TNS_MSG_TYPE_ROW_HEADER: - self._process_row_header(buf) - elif message_type == TNS_MSG_TYPE_ROW_DATA: - self._process_row_data(buf) - elif message_type == TNS_MSG_TYPE_FLUSH_OUT_BINDS: - self.flush_out_binds = True - self.end_of_response = True - elif message_type == TNS_MSG_TYPE_DESCRIBE_INFO: - buf.skip_raw_bytes_chunked() - self._process_describe_info(buf, self.cursor, self.cursor_impl) - self.out_var_impls = self.cursor_impl.fetch_var_impls - elif message_type == TNS_MSG_TYPE_ERROR: - self._process_error_info(buf) - elif message_type == TNS_MSG_TYPE_BIT_VECTOR: - self._process_bit_vector(buf) - elif message_type == TNS_MSG_TYPE_IO_VECTOR: - self._process_io_vector(buf) - elif message_type == TNS_MSG_TYPE_IMPLICIT_RESULTSET: - self._process_implicit_result(buf) - else: - Message._process_message(self, buf, message_type) - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - cdef: - uint16_t keyword_num, num_params, num_bytes - uint32_t num_rows, i - uint64_t rowcount - bytes key_value - list rowcounts - buf.read_ub2(&num_params) # al8o4l (ignored) - for i in range(num_params): - buf.skip_ub4() - buf.read_ub2(&num_bytes) # al8txl (ignored) - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub2(&num_params) # num key/value pairs - for i in range(num_params): - buf.read_ub2(&num_bytes) # key - if num_bytes > 0: - key_value = buf.read_bytes() - buf.read_ub2(&num_bytes) # value - if num_bytes > 0: - buf.skip_raw_bytes_chunked() - buf.read_ub2(&keyword_num) # keyword num - if keyword_num == TNS_KEYWORD_NUM_CURRENT_SCHEMA: - self.conn_impl._current_schema = key_value.decode() - elif keyword_num == TNS_KEYWORD_NUM_EDITION: - self.conn_impl._edition = key_value.decode() - buf.read_ub2(&num_bytes) # registration - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - if self.arraydmlrowcounts: - buf.read_ub4(&num_rows) - rowcounts = self.cursor_impl._dmlrowcounts = [] - for i in range(num_rows): - buf.read_ub8(&rowcount) - rowcounts.append(rowcount) - - cdef int _process_row_data(self, ReadBuffer buf) except -1: - cdef: - uint32_t num_rows, pos - ThinVarImpl var_impl - ssize_t i, j - object value - list values - for i, var_impl in enumerate(self.out_var_impls): - if var_impl.is_array: - buf.read_ub4(&var_impl.num_elements_in_array) - for pos in range(var_impl.num_elements_in_array): - value = self._process_column_data(buf, var_impl, pos) - var_impl._values[pos] = value - elif self.cursor_impl._statement._is_returning: - buf.read_ub4(&num_rows) - values = [None] * num_rows - for j in range(num_rows): - values[j] = self._process_column_data(buf, var_impl, j) - var_impl._values[self.row_index] = values - var_impl._has_returned_data = True - elif self.cursor_impl.fetching_arrow: - if self._is_duplicate_data(i): - var_impl._arrow_array.append_last_value( - var_impl._last_arrow_array - ) - else: - self._process_column_data(buf, var_impl, self.row_index) - var_impl._last_arrow_array = None - elif self._is_duplicate_data(i): - if self.row_index == 0 and var_impl.outconverter is not None: - value = var_impl._last_raw_value - else: - value = var_impl._values[self.cursor_impl._last_row_index] - var_impl._values[self.row_index] = value - else: - value = self._process_column_data(buf, var_impl, - self.row_index) - var_impl._values[self.row_index] = value - self.row_index += 1 - if self.in_fetch: - self.cursor_impl._last_row_index = self.row_index - 1 - self.cursor_impl._buffer_rowcount = self.row_index - self.bit_vector = NULL - - cdef int _process_row_header(self, ReadBuffer buf) except -1: - cdef uint32_t num_bytes - buf.skip_ub1() # flags - buf.skip_ub2() # num requests - buf.skip_ub4() # iteration number - buf.skip_ub4() # num iters - buf.skip_ub2() # buffer length - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.skip_ub1() # skip repeated length - self._get_bit_vector(buf, num_bytes) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.skip_raw_bytes_chunked() # rxhrid - - cdef int _write_column_metadata(self, WriteBuffer buf, - list bind_var_impls) except -1: - cdef: - uint32_t buffer_size, cont_flag, lob_prefetch_length - ThinDbObjectTypeImpl typ_impl - uint8_t ora_type_num, flag - OracleMetadata metadata - ThinVarImpl var_impl - for var_impl in bind_var_impls: - metadata = var_impl.metadata - ora_type_num = metadata.dbtype._ora_type_num - buffer_size = metadata.buffer_size - if ora_type_num in (ORA_TYPE_NUM_ROWID, ORA_TYPE_NUM_UROWID): - ora_type_num = ORA_TYPE_NUM_VARCHAR - buffer_size = TNS_MAX_UROWID_LENGTH - flag = TNS_BIND_USE_INDICATORS - if var_impl.is_array: - flag |= TNS_BIND_ARRAY - cont_flag = 0 - lob_prefetch_length = 0 - if ora_type_num in (ORA_TYPE_NUM_BLOB, - ORA_TYPE_NUM_CLOB): - cont_flag = TNS_LOB_PREFETCH_FLAG - elif ora_type_num == ORA_TYPE_NUM_JSON: - cont_flag = TNS_LOB_PREFETCH_FLAG - buffer_size = lob_prefetch_length = TNS_JSON_MAX_LENGTH - elif ora_type_num == ORA_TYPE_NUM_VECTOR: - cont_flag = TNS_LOB_PREFETCH_FLAG - buffer_size = lob_prefetch_length = TNS_VECTOR_MAX_LENGTH - buf.write_uint8(ora_type_num) - buf.write_uint8(flag) - # precision and scale are always written as zero as the server - # expects that and complains if any other value is sent! - buf.write_uint8(0) - buf.write_uint8(0) - buf.write_ub4(buffer_size) - if var_impl.is_array: - buf.write_ub4(var_impl.num_elements) - else: - buf.write_ub4(0) # max num elements - buf.write_ub8(cont_flag) - if metadata.objtype is not None: - typ_impl = metadata.objtype - buf.write_ub4(len(typ_impl.oid)) - buf.write_bytes_with_length(typ_impl.oid) - buf.write_ub4(typ_impl.version) - else: - buf.write_ub4(0) # OID - buf.write_ub2(0) # version - if metadata.dbtype._csfrm != 0: - buf.write_ub2(TNS_CHARSET_UTF8) - else: - buf.write_ub2(0) - buf.write_uint8(metadata.dbtype._csfrm) - buf.write_ub4(lob_prefetch_length) # max chars (LOB prefetch) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: - buf.write_ub4(0) # oaccolid - - cdef int _write_bind_params_column(self, WriteBuffer buf, - OracleMetadata metadata, - object value) except -1: - cdef: - uint8_t ora_type_num = metadata.dbtype._ora_type_num - ThinDbObjectTypeImpl typ_impl - BaseThinCursorImpl cursor_impl - BaseThinLobImpl lob_impl - uint32_t num_bytes - bytes temp_bytes - if value is None: - if ora_type_num == ORA_TYPE_NUM_BOOLEAN: - buf.write_uint8(TNS_ESCAPE_CHAR) - buf.write_uint8(1) - elif ora_type_num == ORA_TYPE_NUM_OBJECT: - buf.write_ub4(0) # TOID - buf.write_ub4(0) # OID - buf.write_ub4(0) # snapshot - buf.write_ub2(0) # version - buf.write_ub4(0) # packed data length - buf.write_ub4(TNS_OBJ_TOP_LEVEL) # flags - else: - buf.write_uint8(0) - elif ora_type_num == ORA_TYPE_NUM_VARCHAR \ - or ora_type_num == ORA_TYPE_NUM_CHAR \ - or ora_type_num == ORA_TYPE_NUM_LONG: - if metadata.dbtype._csfrm == CS_FORM_IMPLICIT: - temp_bytes = ( value).encode() - else: - buf._caps._check_ncharset_id() - temp_bytes = ( value).encode(ENCODING_UTF16) - buf.write_bytes_with_length(temp_bytes) - elif ora_type_num == ORA_TYPE_NUM_RAW \ - or ora_type_num == ORA_TYPE_NUM_LONG_RAW: - buf.write_bytes_with_length(value) - elif ora_type_num == ORA_TYPE_NUM_NUMBER \ - or ora_type_num == ORA_TYPE_NUM_BINARY_INTEGER: - if isinstance(value, bool): - temp_bytes = b'1' if value is True else b'0' - else: - temp_bytes = ( cpython.PyObject_Str(value)).encode() - buf.write_oracle_number(temp_bytes) - elif ora_type_num == ORA_TYPE_NUM_DATE \ - or ora_type_num == ORA_TYPE_NUM_TIMESTAMP \ - or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_TZ \ - or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_LTZ: - buf.write_oracle_date(value, metadata.dbtype._buffer_size_factor) - elif ora_type_num == ORA_TYPE_NUM_BINARY_DOUBLE: - buf.write_binary_double(value) - elif ora_type_num == ORA_TYPE_NUM_BINARY_FLOAT: - buf.write_binary_float(value) - elif ora_type_num == ORA_TYPE_NUM_CURSOR: - cursor_impl = value._impl - if cursor_impl is None: - errors._raise_err(errors.ERR_CURSOR_NOT_OPEN) - if cursor_impl._statement is None: - cursor_impl._statement = self.conn_impl._get_statement() - if cursor_impl._statement._cursor_id == 0: - buf.write_uint8(1) - buf.write_uint8(0) - else: - buf.write_ub4(1) - buf.write_ub4(cursor_impl._statement._cursor_id) - cursor_impl.statement = None - elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: - buf.write_bool(value) - elif ora_type_num == ORA_TYPE_NUM_INTERVAL_DS: - buf.write_interval_ds(value) - elif ora_type_num == ORA_TYPE_NUM_INTERVAL_YM: - buf.write_interval_ym(value) - elif ora_type_num in ( - ORA_TYPE_NUM_BLOB, - ORA_TYPE_NUM_CLOB, - ORA_TYPE_NUM_BFILE - ): - buf.write_lob_with_length(value._impl) - elif ora_type_num in (ORA_TYPE_NUM_ROWID, ORA_TYPE_NUM_UROWID): - temp_bytes = ( value).encode() - buf.write_bytes_with_length(temp_bytes) - elif ora_type_num == ORA_TYPE_NUM_OBJECT: - buf.write_dbobject(value._impl) - elif ora_type_num == ORA_TYPE_NUM_JSON: - buf.write_oson(value, self.conn_impl._oson_max_fname_size) - elif ora_type_num == ORA_TYPE_NUM_VECTOR: - buf.write_vector(value) - else: - errors._raise_err(errors.ERR_DB_TYPE_NOT_SUPPORTED, - name=metadata.dbtype.name) - - cdef int _write_bind_params_row(self, WriteBuffer buf, list params, - uint32_t pos) except -1: - """ - Write a row of bind parameters. Note that non-LONG values are written - first followed by any LONG values. - """ - cdef: - uint32_t i, num_elements, offset = self.offset - bint found_long = False - OracleMetadata metadata - ThinVarImpl var_impl - BindInfo bind_info - for i, bind_info in enumerate(params): - if bind_info._is_return_bind: - continue - var_impl = bind_info._bind_var_impl - metadata = var_impl.metadata - if var_impl.is_array: - num_elements = var_impl.num_elements_in_array - buf.write_ub4(num_elements) - for value in var_impl._values[:num_elements]: - self._write_bind_params_column(buf, metadata, value) - else: - if not self.cursor_impl._statement._is_plsql \ - and metadata.buffer_size > buf._caps.max_string_size: - found_long = True - continue - self._write_bind_params_column(buf, metadata, - var_impl._values[pos + offset]) - if found_long: - for i, bind_info in enumerate(params): - if bind_info._is_return_bind: - continue - var_impl = bind_info._bind_var_impl - metadata = var_impl.metadata - if metadata.buffer_size <= buf._caps.max_string_size: - continue - self._write_bind_params_column(buf, metadata, - var_impl._values[pos + offset]) - - cdef int postprocess(self) except -1: - """ - Run any variable out converter functions on all non-null values that - were returned in the current database response. This must be done - independently since the out converter function may itself invoke a - database round-trip. - """ - cdef: - uint32_t i, j, num_elements - object value, element_value - ThinVarImpl var_impl - if self.out_var_impls is None: - return 0 - for var_impl in self.out_var_impls: - if var_impl is None or var_impl.outconverter is None: - continue - if not self.cursor_impl.fetching_arrow: - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] - if var_impl.is_array: - num_elements = var_impl.num_elements_in_array - else: - num_elements = self.row_index - for i in range(num_elements): - value = var_impl._values[i] - if value is None and not var_impl.convert_nulls: - continue - if isinstance(value, list): - for j, element_value in enumerate(value): - if element_value is None: - continue - value[j] = var_impl.outconverter(element_value) - else: - var_impl._values[i] = var_impl.outconverter(value) - - async def postprocess_async(self): - """ - Run any variable out converter functions on all non-null values that - were returned in the current database response. This must be done - independently since the out converter function may itself invoke a - database round-trip. - """ - cdef: - object value, element_value, fn - uint32_t i, j, num_elements - ThinVarImpl var_impl - if self.out_var_impls is None: - return 0 - for var_impl in self.out_var_impls: - if var_impl is None or var_impl.outconverter is None: - continue - if not self.cursor_impl.fetching_arrow: - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] - if var_impl.is_array: - num_elements = var_impl.num_elements_in_array - else: - num_elements = self.row_index - fn = var_impl.outconverter - for i in range(num_elements): - value = var_impl._values[i] - if value is None and not var_impl.convert_nulls: - continue - if isinstance(value, list): - for j, element_value in enumerate(value): - if element_value is None: - continue - element_value = fn(element_value) - if inspect.isawaitable(element_value): - element_value = await element_value - value[j] = element_value - else: - value = fn(value) - if inspect.isawaitable(value): - value = await value - var_impl._values[i] = value - - cdef int preprocess(self) except -1: - cdef: - Statement statement = self.cursor_impl._statement - BindInfo bind_info - if statement._is_returning and not self.parse_only: - self.out_var_impls = [] - for bind_info in statement._bind_info_list: - if not bind_info._is_return_bind: - continue - self.out_var_impls.append(bind_info._bind_var_impl) - elif statement._is_query: - self._preprocess_query() - - -cdef class AuthMessage(Message): - cdef: - str encoded_password - bytes password - bytes newpassword - str encoded_newpassword - str encoded_jdwp_data - str debug_jdwp - str session_key - str speedy_key - str proxy_user - str token - str private_key - str service_name - uint8_t purity - ssize_t user_bytes_len - bytes user_bytes - dict session_data - uint32_t auth_mode - uint32_t verifier_type - bint change_password - str program - str terminal - str machine - str osuser - str driver_name - str edition - list appcontext - str connect_string - - cdef int _encrypt_passwords(self) except -1: - """ - Encrypts the passwords using the session key. - """ - - # encrypt password - salt = secrets.token_bytes(16) - password_with_salt = salt + self.password - encrypted_password = encrypt_cbc(self.conn_impl._combo_key, - password_with_salt) - self.encoded_password = encrypted_password.hex().upper() - - # encrypt new password - if self.newpassword is not None: - newpassword_with_salt = salt + self.newpassword - encrypted_newpassword = encrypt_cbc(self.conn_impl._combo_key, - newpassword_with_salt) - self.encoded_newpassword = encrypted_newpassword.hex().upper() - - cdef int _generate_verifier(self) except -1: - """ - Generate the multi-round verifier. - """ - cdef: - bytes jdwp_data - bytearray b - ssize_t i - - # create password hash - verifier_data = bytes.fromhex(self.session_data['AUTH_VFR_DATA']) - if self.verifier_type == TNS_VERIFIER_TYPE_12C: - keylen = 32 - iterations = int(self.session_data['AUTH_PBKDF2_VGEN_COUNT']) - salt = verifier_data + b'AUTH_PBKDF2_SPEEDY_KEY' - password_key = get_derived_key(self.password, salt, 64, - iterations) - h = hashlib.new("sha512") - h.update(password_key) - h.update(verifier_data) - password_hash = h.digest()[:32] - else: - keylen = 24 - h = hashlib.sha1(self.password) - h.update(verifier_data) - password_hash = h.digest() + bytes(4) - - # decrypt first half of session key - encoded_server_key = bytes.fromhex(self.session_data['AUTH_SESSKEY']) - session_key_part_a = decrypt_cbc(password_hash, encoded_server_key) - - # generate second half of session key - session_key_part_b = secrets.token_bytes(len(session_key_part_a)) - encoded_client_key = encrypt_cbc(password_hash, session_key_part_b) - - # create session key and combo key - if len(session_key_part_a) == 48: - self.session_key = encoded_client_key.hex().upper()[:96] - b = bytearray(24) - for i in range(16, 40): - b[i - 16] = session_key_part_a[i] ^ session_key_part_b[i] - part1 = hashlib.md5(b[:16]).digest() - part2 = hashlib.md5(b[16:]).digest() - combo_key = (part1 + part2)[:keylen] - else: - self.session_key = encoded_client_key.hex().upper()[:64] - salt = bytes.fromhex(self.session_data['AUTH_PBKDF2_CSK_SALT']) - iterations = int(self.session_data['AUTH_PBKDF2_SDER_COUNT']) - temp_key = session_key_part_b[:keylen] + session_key_part_a[:keylen] - combo_key = get_derived_key(temp_key.hex().upper().encode(), salt, - keylen, iterations) - - # retain session key for use by the change password API - self.conn_impl._combo_key = combo_key - - # generate speedy key for 12c verifiers - if self.verifier_type == TNS_VERIFIER_TYPE_12C: - salt = secrets.token_bytes(16) - speedy_key = encrypt_cbc(combo_key, salt + password_key) - self.speedy_key = speedy_key[:80].hex().upper() - - # encrypts the passwords - self._encrypt_passwords() - - # check if debug_jdwp is set. if set, encode the data using the - # combo session key with zeros padding - if self.debug_jdwp is not None: - jdwp_data = self.debug_jdwp.encode() - encrypted_jdwp_data = encrypt_cbc(combo_key, jdwp_data, zeros=True) - # Add a "01" at the end of the hex encrypted data to indicate the - # use of AES encryption - self.encoded_jdwp_data = encrypted_jdwp_data.hex().upper() + "01" - - cdef str _get_alter_timezone_statement(self): - """ - Returns the statement required to change the session time zone to match - the time zone in use by the Python interpreter. - """ - cdef: - int tz_hour, tz_minute, timezone - str sign, tz_repr - tz_repr = os.environ.get("ORA_SDTZ") - if tz_repr is None: - timezone = time.localtime().tm_gmtoff - tz_hour = timezone // 3600 - tz_minute = (timezone - (tz_hour * 3600)) // 60 - if tz_hour < 0: - sign = "-" - tz_hour = -tz_hour - else: - sign = "+" - tz_repr = f"{sign}{tz_hour:02}:{tz_minute:02}" - return f"ALTER SESSION SET TIME_ZONE='{tz_repr}'\x00" - - cdef tuple _get_version_tuple(self, ReadBuffer buf): - """ - Return the 5-tuple for the database version. Note that the format - changed with Oracle Database 18. - """ - cdef uint32_t full_version_num - full_version_num = int(self.session_data["AUTH_VERSION_NO"]) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_18_1_EXT_1: - return ((full_version_num >> 24) & 0xFF, - (full_version_num >> 16) & 0xFF, - (full_version_num >> 12) & 0x0F, - (full_version_num >> 4) & 0xFF, - (full_version_num & 0x0F)) - else: - return ((full_version_num >> 24) & 0xFF, - (full_version_num >> 20) & 0x0F, - (full_version_num >> 12) & 0x0F, - (full_version_num >> 8) & 0x0F, - (full_version_num & 0x0F)) - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_AUTH_PHASE_ONE - self.session_data = {} - if self.conn_impl.username is not None: - self.user_bytes = self.conn_impl.username.encode() - self.user_bytes_len = len(self.user_bytes) - self.resend = True - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - cdef: - uint16_t num_params, i - uint32_t num_bytes - str key, value - buf.read_ub2(&num_params) - for i in range(num_params): - buf.skip_ub4() - key = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - value = buf.read_str(CS_FORM_IMPLICIT) - else: - value = "" - if key == "AUTH_VFR_DATA": - buf.read_ub4(&self.verifier_type) - else: - buf.skip_ub4() # skip flags - self.session_data[key] = value - if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: - self.function_code = TNS_FUNC_AUTH_PHASE_TWO - elif not self.change_password: - self.conn_impl._session_id = \ - int(self.session_data["AUTH_SESSION_ID"]) - self.conn_impl._serial_num = \ - int(self.session_data["AUTH_SERIAL_NUM"]) - self.conn_impl._db_domain = \ - self.session_data.get("AUTH_SC_DB_DOMAIN") - self.conn_impl._db_name = \ - self.session_data.get("AUTH_SC_DBUNIQUE_NAME") - self.conn_impl._max_open_cursors = \ - int(self.session_data.get("AUTH_MAX_OPEN_CURSORS", 0)) - self.conn_impl._service_name = \ - self.session_data.get("AUTH_SC_SERVICE_NAME") - self.conn_impl._instance_name = \ - self.session_data.get("AUTH_INSTANCENAME") - self.conn_impl._max_identifier_length = \ - int(self.session_data.get("AUTH_MAX_IDEN_LENGTH", 30)) - self.conn_impl.server_version = self._get_version_tuple(buf) - self.conn_impl.supports_bool = \ - buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1 - self.conn_impl._edition = self.edition - - cdef int _set_params(self, ConnectParamsImpl params, - Description description) except -1: - """ - Sets the parameters to use for the AuthMessage. The user and auth mode - are retained in order to avoid duplicating this effort for both trips - to the server. - """ - self.password = params._get_password() - self.newpassword = params._get_new_password() - self.service_name = description.service_name - self.proxy_user = params.proxy_user - self.debug_jdwp = params.debug_jdwp - self.program = params.program - self.terminal = params.terminal - self.machine = params.machine - self.osuser = params.osuser - self.driver_name = params.driver_name - if self.driver_name is None: - self.driver_name = f"{DRIVER_NAME} thn : {DRIVER_VERSION}" - self.edition = params.edition - self.appcontext = params.appcontext - self.connect_string = params._get_connect_string() - - # if drcp is used, use purity = NEW as the default purity for - # standalone connections and purity = SELF for connections that belong - # to a pool - if description.purity == PURITY_DEFAULT \ - and self.conn_impl._drcp_enabled: - if self.conn_impl._pool is None: - self.purity = PURITY_NEW - else: - self.purity = PURITY_SELF - else: - self.purity = description.purity - - # set token parameters; adjust processing so that only phase two is - # sent - if params._token is not None \ - or params.access_token_callback is not None: - self.token = params._get_token() - if params._private_key is not None: - self.private_key = params._get_private_key() - self.function_code = TNS_FUNC_AUTH_PHASE_TWO - self.resend = False - - # set authentication mode - if params._new_password is None: - self.auth_mode = TNS_AUTH_MODE_LOGON - if params.mode & AUTH_MODE_SYSDBA: - self.auth_mode |= TNS_AUTH_MODE_SYSDBA - if params.mode & AUTH_MODE_SYSOPER: - self.auth_mode |= TNS_AUTH_MODE_SYSOPER - if params.mode & AUTH_MODE_SYSASM: - self.auth_mode |= TNS_AUTH_MODE_SYSASM - if params.mode & AUTH_MODE_SYSBKP: - self.auth_mode |= TNS_AUTH_MODE_SYSBKP - if params.mode & AUTH_MODE_SYSDGD: - self.auth_mode |= TNS_AUTH_MODE_SYSDGD - if params.mode & AUTH_MODE_SYSKMT: - self.auth_mode |= TNS_AUTH_MODE_SYSKMT - if params.mode & AUTH_MODE_SYSRAC: - self.auth_mode |= TNS_AUTH_MODE_SYSRAC - if self.private_key is not None: - self.auth_mode |= TNS_AUTH_MODE_IAM_TOKEN - - cdef int _write_key_value(self, WriteBuffer buf, str key, str value, - uint32_t flags=0) except -1: - cdef: - bytes key_bytes = key.encode() - bytes value_bytes = value.encode() - uint32_t key_len = len(key_bytes) - uint32_t value_len = len(value_bytes) - buf.write_ub4(key_len) - buf.write_bytes_with_length(key_bytes) - buf.write_ub4(value_len) - if value_len > 0: - buf.write_bytes_with_length(value_bytes) - buf.write_ub4(flags) - - cdef int _write_message(self, WriteBuffer buf) except -1: - cdef: - uint8_t has_user = 1 if self.user_bytes_len > 0 else 0 - uint32_t num_pairs - - # perform final determination of data to write - if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: - num_pairs = 5 - elif self.change_password: - self._encrypt_passwords() - num_pairs = 2 - else: - num_pairs = 4 - - # token authentication - if self.token is not None: - num_pairs += 1 - - # normal user/password authentication - else: - num_pairs += 2 - self.auth_mode |= TNS_AUTH_MODE_WITH_PASSWORD - if self.verifier_type == TNS_VERIFIER_TYPE_12C: - num_pairs += 1 - elif self.verifier_type not in (TNS_VERIFIER_TYPE_11G_1, - TNS_VERIFIER_TYPE_11G_2): - errors._raise_err(errors.ERR_UNSUPPORTED_VERIFIER_TYPE, - verifier_type=self.verifier_type) - self._generate_verifier() - - # determine which other key/value pairs to write - if self.newpassword is not None: - num_pairs += 1 - self.auth_mode |= TNS_AUTH_MODE_CHANGE_PASSWORD - if self.proxy_user is not None: - num_pairs += 1 - if self.conn_impl._cclass is not None: - num_pairs += 1 - if self.purity != 0: - num_pairs += 1 - if self.private_key is not None: - num_pairs += 2 - if self.encoded_jdwp_data is not None: - num_pairs += 1 - if self.edition is not None: - num_pairs += 1 - if self.appcontext is not None: - num_pairs += len(self.appcontext) * 3 - if self.connect_string is not None: - num_pairs += 1 - - # write basic data to packet - self._write_function_code(buf) - buf.write_uint8(has_user) # pointer (authusr) - buf.write_ub4(self.user_bytes_len) - buf.write_ub4(self.auth_mode) # authentication mode - buf.write_uint8(1) # pointer (authivl) - buf.write_ub4(num_pairs) # number of key/value pairs - buf.write_uint8(1) # pointer (authovl) - buf.write_uint8(1) # pointer (authovln) - if has_user: - buf.write_bytes_with_length(self.user_bytes) - - # write key/value pairs - if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: - self._write_key_value(buf, "AUTH_TERMINAL", self.terminal) - self._write_key_value(buf, "AUTH_PROGRAM_NM", self.program) - self._write_key_value(buf, "AUTH_MACHINE", self.machine) - self._write_key_value(buf, "AUTH_PID", _connect_constants.pid) - self._write_key_value(buf, "AUTH_SID", self.osuser) - else: - if self.proxy_user is not None: - self._write_key_value(buf, "PROXY_CLIENT_NAME", - self.proxy_user) - if self.token is not None: - self._write_key_value(buf, "AUTH_TOKEN", self.token) - elif not self.change_password: - self._write_key_value(buf, "AUTH_SESSKEY", self.session_key, 1) - if self.verifier_type == TNS_VERIFIER_TYPE_12C: - self._write_key_value(buf, "AUTH_PBKDF2_SPEEDY_KEY", - self.speedy_key) - if self.encoded_password is not None: - self._write_key_value(buf, "AUTH_PASSWORD", - self.encoded_password) - if self.encoded_newpassword is not None: - self._write_key_value(buf, "AUTH_NEWPASSWORD", - self.encoded_newpassword) - if not self.change_password: - self._write_key_value(buf, "SESSION_CLIENT_CHARSET", "873") - self._write_key_value(buf, "SESSION_CLIENT_DRIVER_NAME", - self.driver_name) - self._write_key_value(buf, "SESSION_CLIENT_VERSION", - str(_connect_constants.full_version_num)) - self._write_key_value(buf, "AUTH_ALTER_SESSION", - self._get_alter_timezone_statement(), 1) - if self.conn_impl._cclass is not None: - self._write_key_value(buf, "AUTH_KPPL_CONN_CLASS", - self.conn_impl._cclass) - if self.purity != 0: - self._write_key_value(buf, "AUTH_KPPL_PURITY", - str(self.purity), 1) - if self.private_key is not None: - date_format = "%a, %d %b %Y %H:%M:%S GMT" - now = datetime.datetime.utcnow().strftime(date_format) - host_info = "%s:%d" % buf._transport.get_host_info() - header = f"date: {now}\n" + \ - f"(request-target): {self.service_name}\n" + \ - f"host: {host_info}" - signature = get_signature(self.private_key, header) - self._write_key_value(buf, "AUTH_HEADER", header) - self._write_key_value(buf, "AUTH_SIGNATURE", signature) - if self.encoded_jdwp_data is not None: - self._write_key_value(buf, "AUTH_ORA_DEBUG_JDWP", - self.encoded_jdwp_data) - if self.edition is not None: - self._write_key_value(buf, "AUTH_ORA_EDITION", self.edition) - if self.appcontext is not None: - # NOTE: these keys require a trailing null character as the - # server expects it! - for entry in self.appcontext: - self._write_key_value(buf, "AUTH_APPCTX_NSPACE\0", entry[0]) - self._write_key_value(buf, "AUTH_APPCTX_ATTR\0", entry[1]) - self._write_key_value(buf, "AUTH_APPCTX_VALUE\0", entry[2]) - if self.connect_string is not None: - self._write_key_value(buf, "AUTH_CONNECT_STRING", - self.connect_string) - - -@cython.final -cdef class ChangePasswordMessage(AuthMessage): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.change_password = True - self.function_code = TNS_FUNC_AUTH_PHASE_TWO - self.user_bytes = self.conn_impl.username.encode() - self.user_bytes_len = len(self.user_bytes) - self.auth_mode = TNS_AUTH_MODE_WITH_PASSWORD | \ - TNS_AUTH_MODE_CHANGE_PASSWORD - - -@cython.final -cdef class CommitMessage(Message): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_COMMIT - - -@cython.final -cdef class ConnectMessage(Message): - cdef: - bytes connect_string_bytes - uint16_t connect_string_len, redirect_data_len - bint read_redirect_data_len - Description description - uint8_t packet_flags - str redirect_data - str host - int port - - cdef int process(self, ReadBuffer buf) except -1: - cdef: - uint16_t protocol_version, protocol_options - const char_type *redirect_data - uint32_t flags2 = 0 - uint8_t flags1 - bytes db_uuid - if buf._current_packet.packet_type == TNS_PACKET_TYPE_REDIRECT: - if not self.read_redirect_data_len: - buf.read_uint16be(&self.redirect_data_len) - self.read_redirect_data_len = True - buf.wait_for_packets_sync() - redirect_data = buf.read_raw_bytes(self.redirect_data_len) - if self.redirect_data_len > 0: - self.redirect_data = \ - redirect_data[:self.redirect_data_len].decode() - self.read_redirect_data_len = False - elif buf._current_packet.packet_type == TNS_PACKET_TYPE_ACCEPT: - buf.read_uint16be(&protocol_version) - # check if the protocol version supported by the database is high - # enough; if not, reject the connection immediately - if protocol_version < TNS_VERSION_MIN_ACCEPTED: - errors._raise_err(errors.ERR_SERVER_VERSION_NOT_SUPPORTED) - buf.read_uint16be(&protocol_options) - buf.skip_raw_bytes(10) - buf.read_ub1(&flags1) - if flags1 & TNS_NSI_NA_REQUIRED: - feature = "Native Network Encryption and Data Integrity" - errors._raise_not_supported(feature) - buf.skip_raw_bytes(9) - buf.read_uint32be(&buf._caps.sdu) - if protocol_version >= TNS_VERSION_MIN_OOB_CHECK: - buf.skip_raw_bytes(5) - buf.read_uint32be(&flags2) - buf._caps._adjust_for_protocol(protocol_version, protocol_options, - flags2) - buf._transport._full_packet_size = True - elif buf._current_packet.packet_type == TNS_PACKET_TYPE_REFUSE: - response = self.error_info.message - error_code = "unknown" - error_code_int = 0 - if response is not None: - pos = response.find("(ERR=") - if pos > 0: - end_pos = response.find(")", pos) - if end_pos > 0: - error_code = response[pos + 5:end_pos] - error_code_int = int(error_code) - if error_code_int == 0: - errors._raise_err(errors.ERR_UNEXPECTED_REFUSE) - if error_code_int == TNS_ERR_INVALID_SERVICE_NAME: - errors._raise_err(errors.ERR_INVALID_SERVICE_NAME, - service_name=self.description.service_name, - host=self.host, port=self.port) - elif error_code_int == TNS_ERR_INVALID_SID: - errors._raise_err(errors.ERR_INVALID_SID, - sid=self.description.sid, - host=self.host, port=self.port) - errors._raise_err(errors.ERR_LISTENER_REFUSED_CONNECTION, - error_code=error_code) - - cdef int send(self, WriteBuffer buf) except -1: - cdef: - uint16_t service_options = TNS_GSO_DONT_CARE - uint32_t connect_flags_1 = 0, connect_flags_2 = 0 - uint8_t nsi_flags = \ - TNS_NSI_SUPPORT_SECURITY_RENEG | TNS_NSI_DISABLE_NA - if buf._caps.supports_oob: - service_options |= TNS_GSO_CAN_RECV_ATTENTION - connect_flags_2 |= TNS_CHECK_OOB - buf.start_request(TNS_PACKET_TYPE_CONNECT, self.packet_flags) - buf.write_uint16be(TNS_VERSION_DESIRED) - buf.write_uint16be(TNS_VERSION_MINIMUM) - buf.write_uint16be(service_options) - buf.write_uint16be(self.description.sdu) - buf.write_uint16be(self.description.sdu) - buf.write_uint16be(TNS_PROTOCOL_CHARACTERISTICS) - buf.write_uint16be(0) # line turnaround - buf.write_uint16be(1) # value of 1 - buf.write_uint16be(self.connect_string_len) - buf.write_uint16be(74) # offset to connect data - buf.write_uint32be(0) # max receivable data - buf.write_uint8(nsi_flags) - buf.write_uint8(nsi_flags) - buf.write_uint64be(0) # obsolete - buf.write_uint64be(0) # obsolete - buf.write_uint64be(0) # obsolete - buf.write_uint32be(self.description.sdu) # SDU (large) - buf.write_uint32be(self.description.sdu) # TDU (large) - buf.write_uint32be(connect_flags_1) - buf.write_uint32be(connect_flags_2) - if self.connect_string_len > TNS_MAX_CONNECT_DATA: - buf.end_request() - buf.start_request(TNS_PACKET_TYPE_DATA) - buf.write_bytes(self.connect_string_bytes) - buf.end_request() - - -@cython.final -cdef class DataTypesMessage(Message): - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - cdef uint16_t data_type, conv_data_type - while True: - buf.read_uint16be(&data_type) - if data_type == 0: - break - buf.read_uint16be(&conv_data_type) - if conv_data_type != 0: - buf.skip_raw_bytes(4) - if not buf._caps.supports_end_of_response: - self.end_of_response = True - - cdef int _write_message(self, WriteBuffer buf) except -1: - cdef: - DataType* data_type - int i - - # write character set and capabilities - buf.write_uint8(TNS_MSG_TYPE_DATA_TYPES) - buf.write_uint16le(TNS_CHARSET_UTF8) - buf.write_uint16le(TNS_CHARSET_UTF8) - buf.write_uint8(TNS_ENCODING_MULTI_BYTE | TNS_ENCODING_CONV_LENGTH) - buf.write_bytes_with_length(bytes(buf._caps.compile_caps)) - buf.write_bytes_with_length(bytes(buf._caps.runtime_caps)) - - # write data types - i = 0 - while True: - data_type = &DATA_TYPES[i] - if data_type.data_type == 0: - break - i += 1 - buf.write_uint16be(data_type.data_type) - buf.write_uint16be(data_type.conv_data_type) - buf.write_uint16be(data_type.representation) - buf.write_uint16be(0) - buf.write_uint16be(0) - - -@cython.final -cdef class EndPipelineMessage(Message): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_PIPELINE_END - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Write the message to the buffer. - """ - self._write_function_code(buf) - buf.write_ub4(0) # ID (unused) - - -@cython.final -cdef class ExecuteMessage(MessageWithData): - - cdef int _write_execute_message(self, WriteBuffer buf) except -1: - """ - Write the message for a full execute. - """ - cdef: - uint32_t options, dml_options = 0, num_params = 0, num_iters = 1 - Statement stmt = self.cursor_impl._statement - BaseThinCursorImpl cursor_impl = self.cursor_impl - list params = stmt._bind_info_list - - # determine the options to use for the execute - options = 0 - if not stmt._requires_define and not self.parse_only \ - and params is not None: - num_params = len(params) - if stmt._requires_define: - options |= TNS_EXEC_OPTION_DEFINE - elif not self.parse_only and stmt._sql is not None: - dml_options = TNS_EXEC_OPTION_IMPLICIT_RESULTSET - options |= TNS_EXEC_OPTION_EXECUTE - if stmt._cursor_id == 0 or stmt._is_ddl: - options |= TNS_EXEC_OPTION_PARSE - if stmt._is_query: - if self.parse_only: - options |= TNS_EXEC_OPTION_DESCRIBE - else: - if stmt._cursor_id == 0 or stmt._requires_define: - num_iters = self.cursor_impl.prefetchrows - else: - num_iters = self.cursor_impl.arraysize - self.cursor_impl._set_fetch_array_size(num_iters) - if num_iters > 0 and not stmt._no_prefetch: - options |= TNS_EXEC_OPTION_FETCH - if not stmt._is_plsql and not self.parse_only: - options |= TNS_EXEC_OPTION_NOT_PLSQL - elif stmt._is_plsql and num_params > 0: - options |= TNS_EXEC_OPTION_PLSQL_BIND - if num_params > 0: - options |= TNS_EXEC_OPTION_BIND - if self.batcherrors: - options |= TNS_EXEC_OPTION_BATCH_ERRORS - if self.arraydmlrowcounts: - dml_options = TNS_EXEC_OPTION_DML_ROWCOUNTS - if self.conn_impl.autocommit and not self.parse_only: - options |= TNS_EXEC_OPTION_COMMIT - - # write body of message - self._write_function_code(buf) - buf.write_ub4(options) # execute options - buf.write_ub4(stmt._cursor_id) # cursor id - if stmt._cursor_id == 0 or stmt._is_ddl: - buf.write_uint8(1) # pointer (cursor id) - buf.write_ub4(stmt._sql_length) - else: - buf.write_uint8(0) # pointer (cursor id) - buf.write_ub4(0) - buf.write_uint8(1) # pointer (vector) - buf.write_ub4(13) # al8i4 array length - buf.write_uint8(0) # pointer (al8o4) - buf.write_uint8(0) # pointer (al8o4l) - buf.write_ub4(0) # prefetch buffer size - buf.write_ub4(num_iters) # prefetch number of rows - buf.write_ub4(TNS_MAX_LONG_LENGTH) # maximum long size - if num_params == 0: - buf.write_uint8(0) # pointer (binds) - buf.write_ub4(0) # number of binds - else: - buf.write_uint8(1) # pointer (binds) - buf.write_ub4(num_params) # number of binds - buf.write_uint8(0) # pointer (al8app) - buf.write_uint8(0) # pointer (al8txn) - buf.write_uint8(0) # pointer (al8txl) - buf.write_uint8(0) # pointer (al8kv) - buf.write_uint8(0) # pointer (al8kvl) - if stmt._requires_define: - buf.write_uint8(1) # pointer (al8doac) - buf.write_ub4(len(self.cursor_impl.fetch_vars)) - # number of defines - else: - buf.write_uint8(0) - buf.write_ub4(0) - buf.write_ub4(0) # registration id - buf.write_uint8(0) # pointer (al8objlist) - buf.write_uint8(1) # pointer (al8objlen) - buf.write_uint8(0) # pointer (al8blv) - buf.write_ub4(0) # al8blvl - buf.write_uint8(0) # pointer (al8dnam) - buf.write_ub4(0) # al8dnaml - buf.write_ub4(0) # al8regid_msb - if self.arraydmlrowcounts: - buf.write_uint8(1) # pointer (al8pidmlrc) - buf.write_ub4(self.num_execs) # al8pidmlrcbl - buf.write_uint8(1) # pointer (al8pidmlrcl) - else: - buf.write_uint8(0) # pointer (al8pidmlrc) - buf.write_ub4(0) # al8pidmlrcbl - buf.write_uint8(0) # pointer (al8pidmlrcl) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: - buf.write_uint8(0) # pointer (al8sqlsig) - buf.write_ub4(0) # SQL signature length - buf.write_uint8(0) # pointer (SQL ID) - buf.write_ub4(0) # allocated size of SQL ID - buf.write_uint8(0) # pointer (length of SQL ID) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2_EXT1: - buf.write_uint8(0) # pointer (chunk ids) - buf.write_ub4(0) # number of chunk ids - if stmt._cursor_id == 0 or stmt._is_ddl: - if stmt._sql_bytes is None: - errors._raise_err(errors.ERR_INVALID_REF_CURSOR) - buf.write_bytes_with_length(stmt._sql_bytes) - buf.write_ub4(1) # al8i4[0] parse - else: - buf.write_ub4(0) # al8i4[0] parse - if stmt._is_query: - if stmt._cursor_id == 0: - buf.write_ub4(0) # al8i4[1] execution count - else: - buf.write_ub4(num_iters) - else: - buf.write_ub4(self.num_execs) # al8i4[1] execution count - buf.write_ub4(0) # al8i4[2] - buf.write_ub4(0) # al8i4[3] - buf.write_ub4(0) # al8i4[4] - buf.write_ub4(0) # al8i4[5] SCN (part 1) - buf.write_ub4(0) # al8i4[6] SCN (part 2) - buf.write_ub4(stmt._is_query) # al8i4[7] is query - buf.write_ub4(0) # al8i4[8] - buf.write_ub4(dml_options) # al8i4[9] DML row counts/implicit - buf.write_ub4(0) # al8i4[10] - buf.write_ub4(0) # al8i4[11] - buf.write_ub4(0) # al8i4[12] - if stmt._requires_define: - self._write_column_metadata(buf, self.cursor_impl.fetch_var_impls) - elif num_params > 0: - self._write_bind_params(buf, params) - - cdef int _write_reexecute_message(self, WriteBuffer buf) except -1: - """ - Write the message for a re-execute. - """ - cdef: - uint32_t i, exec_flags_1 = 0, exec_flags_2 = 0, num_iters - Statement stmt = self.cursor_impl._statement - list params = stmt._bind_info_list - BindInfo info - - if params: - if not stmt._is_query and not stmt._is_returning: - self.out_var_impls = [info._bind_var_impl \ - for info in params \ - if info.bind_dir != TNS_BIND_DIR_INPUT] - params = [info for info in params \ - if info.bind_dir != TNS_BIND_DIR_OUTPUT \ - and not info._is_return_bind] - if self.function_code == TNS_FUNC_REEXECUTE_AND_FETCH: - exec_flags_1 |= TNS_EXEC_OPTION_EXECUTE - num_iters = self.cursor_impl.prefetchrows - self.cursor_impl._set_fetch_array_size(num_iters) - else: - if self.conn_impl.autocommit: - exec_flags_2 |= TNS_EXEC_OPTION_COMMIT_REEXECUTE - num_iters = self.num_execs - - self._write_function_code(buf) - buf.write_ub4(stmt._cursor_id) - buf.write_ub4(num_iters) - buf.write_ub4(exec_flags_1) - buf.write_ub4(exec_flags_2) - if params: - for i in range(self.num_execs): - buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) - self._write_bind_params_row(buf, params, i) - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Write the execute message to the buffer. Two types of execute messages - are possible: one for a full execute and the second, simpler message, - for when an existing cursor is being re-executed. A full execute is - required under the following circumstances: - - the statement has never been executed - - the statement refers to a REF cursor (no sql is defined) - - prefetch is not possible (LOB columns fetched) - - bind metadata has changed - - parse is being performed - - define is being performed - - DDL is being executed - - batch errors mode is enabled - """ - cdef: - Statement stmt = self.cursor_impl._statement - if stmt._cursor_id == 0 or not stmt._executed \ - or stmt._sql is None \ - or stmt._no_prefetch \ - or stmt._binds_changed \ - or self.parse_only \ - or stmt._requires_define \ - or stmt._is_ddl \ - or self.batcherrors: - self.function_code = TNS_FUNC_EXECUTE - self._write_execute_message(buf) - elif stmt._is_query and self.cursor_impl.prefetchrows > 0: - self.function_code = TNS_FUNC_REEXECUTE_AND_FETCH - self._write_reexecute_message(buf) - else: - self.function_code = TNS_FUNC_REEXECUTE - self._write_reexecute_message(buf) - stmt._binds_changed = False - - cdef int process(self, ReadBuffer buf) except -1: - """ - Runs after the database response has been processed. If the statement - executed requires define and is not a REF cursor (which would already - have performed the define during its execute), then mark the message as - needing to be resent. If this is after the second time the message has - been sent, mark the statement as no longer needing a define (since this - only needs to happen once). - """ - cdef Statement stmt = self.cursor_impl._statement - MessageWithData.process(self, buf) - if self.error_occurred and self.error_info.pos == 0 and stmt._is_plsql: - self.error_info.pos = self.error_info.rowcount + self.offset - if not self.parse_only: - stmt._executed = True - if stmt._requires_define and stmt._sql is not None: - if self.resend: - stmt._requires_define = False - else: - self.resend = True - - -@cython.final -cdef class FetchMessage(MessageWithData): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_FETCH - - cdef int _write_message(self, WriteBuffer buf) except -1: - self.cursor_impl._set_fetch_array_size(self.cursor_impl.arraysize) - self._write_function_code(buf) - if self.cursor_impl._statement._cursor_id == 0: - errors._raise_err(errors.ERR_CURSOR_HAS_BEEN_CLOSED) - buf.write_ub4(self.cursor_impl._statement._cursor_id) - buf.write_ub4(self.cursor_impl._fetch_array_size) - - -@cython.final -cdef class DeqMessage(Message): - cdef: - BaseThinQueueImpl queue_impl - ThinDeqOptionsImpl deq_options_impl - ThinMsgPropsImpl props_impl - bint no_msg_found - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization - """ - self.function_code = TNS_FUNC_AQ_DEQ - - cdef int _process_error_info(self, ReadBuffer buf) except -1: - """ - Process error information from the buffer. If the error that indicates - that no messages were received is detected, the error is cleared and - the flag set so that the dequeue can handle that case. - """ - Message._process_error_info(self, buf) - if self.error_info.num == TNS_ERR_NO_MESSAGES_FOUND: - self.error_info.num = 0 - self.error_occurred = False - self.no_msg_found = True - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - """ - Process the return parameters of the AQ Dequeue request. - """ - cdef: - uint32_t num_bytes, num_extensions, i - ssize_t temp_num_bytes - const char_type *ptr - uint16_t temp16, keyword - bytes temp - OracleData data - uint32_t imageLength - ThinDbObjectImpl obj_impl - ThinDbObjectTypeImpl type_impl - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.read_sb4(&self.props_impl.priority) # priority - buf.read_sb4(&self.props_impl.delay) # delay - buf.read_sb4(&self.props_impl.expiration) # expiration - # correlation id - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - self.props_impl.correlation = ptr[:temp_num_bytes].decode() - buf.read_sb4(&self.props_impl.num_attempts) - # exception queue name - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - self.props_impl.exceptionq = ptr[:temp_num_bytes].decode() - buf.read_sb4(&self.props_impl.state) - buf.read_ub4(&num_bytes) # enqueue time - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - decode_date(ptr, temp_num_bytes, &data.buffer) - self.props_impl.enq_time = convert_date_to_python(&data.buffer) - buf.read_ub4(&num_bytes) # transaction id - if num_bytes > 0: - ptr = buf._get_raw(num_bytes) - self.props_impl.enq_txn_id = ptr[:num_bytes] - else: - self.props_impl.enq_txn_id = None - buf.read_ub4(&num_extensions) # number of extensions - if num_extensions > 0: - buf.skip_ub1() - for i in range(num_extensions): - temp = None - temp16 = 0 - buf.read_ub4(&num_bytes) # text value length - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - temp = ptr[:temp_num_bytes] - temp16 = temp_num_bytes - buf.read_ub4(&num_bytes) # binary value length - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - temp = ptr[:temp_num_bytes] - buf.read_ub2(&keyword) # extension keyword - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME and - temp is not None and temp16 > 0): - self.props_impl.sender_agent_name = temp - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS and - temp is not None and temp16 > 0): - self.props_impl.sender_agent_address = temp - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL and - temp is not None): - self.props_impl.sender_agent_protocol = temp - if (keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID and - temp is not None): - self.props_impl.original_msg_id = temp - buf.read_ub4(&num_bytes) # user properties - if num_bytes > 0: - errors._raise_err(errors.ERR_NOT_IMPLEMENTED) - buf.skip_ub4() # csn - buf.skip_ub4() # dsn - buf.skip_ub4() # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: - buf.skip_ub4() # shard number - buf.read_ub4(&num_bytes) # num recipients - if num_bytes > 0: - errors._raise_err(errors.ERR_NOT_IMPLEMENTED) - if self.queue_impl.payload_type is not None: - type_impl = self.queue_impl.payload_type - obj_impl = buf.read_dbobject(type_impl) - if obj_impl is None: - obj_impl = type_impl.create_new_object() - self.props_impl.payload = PY_TYPE_DB_OBJECT._from_impl(obj_impl) - else: - buf.read_ub4(&num_bytes) # TOID len - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub4(&num_bytes) # OID len - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub4(&num_bytes) # snapshot - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.skip_ub2() # version no - buf.read_ub4(&imageLength) # image len - buf.skip_ub2() # flags - if imageLength > 0: - self.props_impl.payload = buf.read_bytes()[4:imageLength] - if self.queue_impl.is_json: - self.props_impl.payload = \ - self.conn_impl.decode_oson(self.props_impl.payload) - else: - if not self.queue_impl.is_json: - self.props_impl.payload = b'' - ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) - self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Write message to the network buffers. - """ - cdef: - bytes queue_name_bytes - bytes consumer_name_bytes - bytes correlation_bytes - bytes condition_bytes - uint16_t delivery_mode - int deq_flags - self._write_function_code(buf) - queue_name_bytes = self.queue_impl.name.encode() - buf.write_uint8(1) # queue name (pointer) - buf.write_ub4(len(queue_name_bytes)) # queue name length - buf.write_uint8(1) # message properties - buf.write_uint8(1) # msg props length - buf.write_uint8(1) # recipient list - buf.write_uint8(1) # recipient list length - if self.deq_options_impl.consumer_name: - consumer_name_bytes = self.deq_options_impl.consumer_name.encode() - buf.write_uint8(1) # consumer name - buf.write_ub4(len(consumer_name_bytes)) - else: - consumer_name_bytes = None - buf.write_uint8(0) # consumer name - buf.write_ub4(0) # consumer name length - buf.write_sb4(self.deq_options_impl.mode) # dequeue mode - buf.write_sb4(self.deq_options_impl.navigation) # navigation - buf.write_sb4(self.deq_options_impl.visibility) # visibility - buf.write_sb4(self.deq_options_impl.wait) # wait - if self.deq_options_impl.msgid: - buf.write_uint8(1) # select mesg id - buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) # mesg id len - else: - buf.write_uint8(0) # select mesg id - buf.write_ub4(0) # select mesg id length - if self.deq_options_impl.correlation: - correlation_bytes = self.deq_options_impl.correlation.encode() - buf.write_uint8(1) # correlation id - buf.write_ub4(len(correlation_bytes)) # correlation id len - else: - correlation_bytes = None - buf.write_uint8(0) # correlation id - buf.write_ub4(0) # correlation id len - buf.write_uint8(1) # toid of payload - buf.write_ub4(16) # toid length - buf.write_ub2(self.props_impl.version) # version of type - buf.write_uint8(1) # payload - buf.write_uint8(1) # return msg id - buf.write_ub4(16) # mesg id length - deq_flags = 0 - delivery_mode = self.deq_options_impl.delivery_mode - if (delivery_mode == TNS_AQ_MSG_BUFFERED): - deq_flags |= TNS_KPD_AQ_BUFMSG - elif (delivery_mode == TNS_AQ_MSG_PERSISTENT_OR_BUFFERED): - deq_flags |= TNS_KPD_AQ_EITHER - buf.write_ub4(deq_flags) # dequeue flags - if self.deq_options_impl.condition: - condition_bytes = self.deq_options_impl.condition.encode() - buf.write_uint8(1) # condition (pointer) - buf.write_ub4(len(condition_bytes)) # condition length - else: - condition_bytes = None - buf.write_uint8(0) # condition - buf.write_ub4(0) # condition length - buf.write_uint8(0) # extensions - buf.write_ub4(0) # number of extensions - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: - buf.write_uint8(0) # JSON payload - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: - buf.write_ub4(-1) # shard id - - buf.write_bytes_with_length(queue_name_bytes) - if consumer_name_bytes is not None: - buf.write_bytes_with_length(consumer_name_bytes) - if self.deq_options_impl.msgid: - buf.write_bytes(self.deq_options_impl.msgid) - if correlation_bytes is not None: - buf.write_bytes_with_length(correlation_bytes) - buf.write_bytes(self.queue_impl.payload_toid) - if condition_bytes is not None: - buf.write_bytes_with_length(condition_bytes) - - -@cython.final -cdef class EnqMessage(Message): - cdef: - BaseThinQueueImpl queue_impl - ThinEnqOptionsImpl enq_options_impl - ThinMsgPropsImpl props_impl - - cdef int _initialize_hook(self) except -1: - """ - perform initialization - """ - self.function_code = TNS_FUNC_AQ_ENQ - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - """ - Process the return parameters for the AQ enqueue request. - """ - cdef const char_type *ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) - self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] - buf.skip_ub2() # extensions length - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Write message to the network buffers. - """ - cdef: - bytes queue_name_bytes - bytes correlation_bytes - bytes exceptionq_bytes - int enq_flags - - self._write_function_code(buf) - queue_name_bytes = self.queue_impl.name.encode() - buf.write_uint8(1) # queue name (pointer) - buf.write_ub4(len(queue_name_bytes)) # queue name length - buf.write_ub4(self.props_impl.priority) - buf.write_ub4(self.props_impl.delay) - buf.write_sb4(self.props_impl.expiration) - if self.props_impl.correlation is None: - buf.write_ub4(0) # correlation - else: - correlation_bytes = self.props_impl.correlation.encode() - buf.write_ub4(len(correlation_bytes)) - buf.write_bytes_with_length(correlation_bytes) - buf.write_ub4(0) # number of attempts - if self.props_impl.exceptionq is None: - buf.write_ub4(0) # exception queue - else: - exceptionq_bytes = self.props_impl.exceptionq.encode() - buf.write_ub4(len(exceptionq_bytes)) - buf.write_bytes_with_length(exceptionq_bytes) - buf.write_ub4(self.props_impl.state) # message state - buf.write_ub4(0) # enqueue time length - if self.props_impl.enq_txn_id is None: - buf.write_ub4(0) # enqueue txn id length - else: - buf.write_ub4(len(self.props_impl.enq_txn_id)) - buf.write_bytes_with_length(self.props_impl.enq_txn_id) - buf.write_ub4(4) # number of extensions - buf.write_uint8(0x0e) # unknown extra byte - buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) - buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) - buf.write_extension_values(None, b'\x00', - TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) - buf.write_extension_values(None, None, - TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) - buf.write_ub4(0) # user property - buf.write_ub4(0) # cscn - buf.write_ub4(0) # dscn - buf.write_ub4(0) # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: - buf.write_ub4(0xffffffffl) # shard id - - if self.props_impl.recipients is None: - buf.write_uint8(0) # recipients (pointer) - buf.write_ub4(0) # number of key/value pairs - else: - buf.write_uint8(1) - buf.write_ub4(len(self.props_impl.recipients) * 3) - buf.write_ub4(self.enq_options_impl.visibility) - buf.write_uint8(0) # relative message id - buf.write_ub4(0) # relative message length - buf.write_ub4(0) # sequence deviation - buf.write_uint8(1) # TOID of payload (pointer) - buf.write_ub4(16) # TOID of payload length - buf.write_ub2(self.props_impl.version) - if self.queue_impl.is_json: - buf.write_uint8(0) # payload (pointer) - buf.write_uint8(0) # RAW payload (pointer) - buf.write_ub4(0) # RAW payload length - elif self.queue_impl.payload_type is not None: - buf.write_uint8(1) # payload (pointer) - buf.write_uint8(0) # RAW payload (pointer) - buf.write_ub4(0) # RAW payload (length) - else: - buf.write_uint8(0) # payload (pointer) - buf.write_uint8(1) # RAW payload (pointer) - buf.write_ub4(len(self.props_impl.payloadObject)) - buf.write_uint8(1) # return message id (pointer) - buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) # return message id length - enq_flags = 0 - if self.enq_options_impl.delivery_mode == TNS_AQ_MSG_BUFFERED: - enq_flags |= TNS_KPD_AQ_BUFMSG - buf.write_ub4(enq_flags) # enqueue flags - buf.write_uint8(0) # extensions 1 (pointer) - buf.write_ub4(0) # number of extensions 1 - buf.write_uint8(0) # extensions 2 (pointer) - buf.write_ub4(0) # number of extensions 2 - buf.write_uint8(0) # source sequence number - buf.write_ub4(0) # source sequence length - buf.write_uint8(0) # max sequence number - buf.write_ub4(0) # max sequence length - buf.write_uint8(0) # output ack length - buf.write_uint8(0) # correlation (pointer) - buf.write_ub4(0) # correlation length - buf.write_uint8(0) # sender name (pointer) - buf.write_ub4(0) # sender name length - buf.write_uint8(0) # sender address (pointer) - buf.write_ub4(0) # sender address length - buf.write_uint8(0) # sender charset id (pointer) - buf.write_uint8(0) # sender ncharset id (pointer) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: - if self.queue_impl.is_json: - buf.write_uint8(1) # JSON payload (pointer) - else: - buf.write_uint8(0) # JSON payload (pointer) - - buf.write_bytes_with_length(queue_name_bytes) - buf.write_bytes(self.queue_impl.payload_toid) - if not self.queue_impl.is_json: - if self.queue_impl.payload_type is not None: - buf.write_dbobject(self.props_impl.payloadObject) - else: - buf.write_bytes(self.props_impl.payloadObject) - if self.queue_impl.is_json: - buf.write_oson(self.props_impl.payloadObject, - self.conn_impl._oson_max_fname_size, False) - - -@cython.final -cdef class LobOpMessage(Message): - cdef: - uint32_t operation - BaseThinLobImpl source_lob_impl - BaseThinLobImpl dest_lob_impl - uint64_t source_offset - uint64_t dest_offset - int64_t amount - bint send_amount - bint bool_flag - object data - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_LOB_OP - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - cdef: - const char* encoding - const char_type *ptr - ssize_t num_bytes - if message_type == TNS_MSG_TYPE_LOB_DATA: - buf.read_raw_bytes_and_length(&ptr, &num_bytes) - if self.source_lob_impl.dbtype._ora_type_num in \ - (ORA_TYPE_NUM_BLOB, ORA_TYPE_NUM_BFILE): - self.data = ptr[:num_bytes] - else: - encoding = self.source_lob_impl._get_encoding() - self.data = ptr[:num_bytes].decode(encoding) - else: - Message._process_message(self, buf, message_type) - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - cdef: - cdef const char_type *ptr - ssize_t num_bytes - uint8_t temp8 - if self.source_lob_impl is not None: - num_bytes = len(self.source_lob_impl._locator) - ptr = buf.read_raw_bytes(num_bytes) - self.source_lob_impl._locator = ptr[:num_bytes] - if self.dest_lob_impl is not None: - num_bytes = len(self.dest_lob_impl._locator) - ptr = buf.read_raw_bytes(num_bytes) - self.dest_lob_impl._locator = ptr[:num_bytes] - if self.operation == TNS_LOB_OP_CREATE_TEMP: - buf.skip_ub2() # skip character set - buf.skip_raw_bytes(3) # skip trailing flags, amount - elif self.send_amount: - buf.read_sb8(&self.amount) - if self.operation in (TNS_LOB_OP_IS_OPEN, - TNS_LOB_OP_FILE_EXISTS, - TNS_LOB_OP_FILE_ISOPEN): - buf.read_ub1(&temp8) - self.bool_flag = temp8 > 0 - - cdef int _write_message(self, WriteBuffer buf) except -1: - cdef int i - self._write_function_code(buf) - if self.source_lob_impl is None: - buf.write_uint8(0) # source pointer - buf.write_ub4(0) # source length - else: - buf.write_uint8(1) # source pointer - buf.write_ub4(len(self.source_lob_impl._locator)) - if self.dest_lob_impl is None: - buf.write_uint8(0) # dest pointer - buf.write_ub4(0) # dest length - else: - buf.write_uint8(1) # dest pointer - buf.write_ub4(len(self.dest_lob_impl._locator)) - buf.write_ub4(0) # short source offset - buf.write_ub4(0) # short dest offset - if self.operation == TNS_LOB_OP_CREATE_TEMP: - buf.write_uint8(1) # pointer (character set) - else: - buf.write_uint8(0) # pointer (character set) - buf.write_uint8(0) # pointer (short amount) - if self.operation in (TNS_LOB_OP_CREATE_TEMP, - TNS_LOB_OP_IS_OPEN, - TNS_LOB_OP_FILE_EXISTS, - TNS_LOB_OP_FILE_ISOPEN): - buf.write_uint8(1) # pointer (NULL LOB) - else: - buf.write_uint8(0) # pointer (NULL LOB) - buf.write_ub4(self.operation) - buf.write_uint8(0) # pointer (SCN array) - buf.write_uint8(0) # SCN array length - buf.write_ub8(self.source_offset) - buf.write_ub8(self.dest_offset) - if self.send_amount: - buf.write_uint8(1) # pointer (amount) - else: - buf.write_uint8(0) # pointer (amount) - for i in range(3): # array LOB (not used) - buf.write_uint16be(0) - if self.source_lob_impl is not None: - buf.write_bytes(self.source_lob_impl._locator) - if self.dest_lob_impl is not None: - buf.write_bytes(self.dest_lob_impl._locator) - if self.operation == TNS_LOB_OP_CREATE_TEMP: - if self.source_lob_impl.dbtype._csfrm == CS_FORM_NCHAR: - buf._caps._check_ncharset_id() - buf.write_ub4(TNS_CHARSET_UTF16) - else: - buf.write_ub4(TNS_CHARSET_UTF8) - if self.data is not None: - buf.write_uint8(TNS_MSG_TYPE_LOB_DATA) - buf.write_bytes_with_length(self.data) - if self.send_amount: - buf.write_ub8(self.amount) # LOB amount - - -@cython.final -cdef class LogoffMessage(Message): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_LOGOFF - - -@cython.final -cdef class PingMessage(Message): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_PING - - -@cython.final -cdef class ProtocolMessage(Message): - cdef: - uint8_t server_version - uint8_t server_flags - bytes server_compile_caps - bytes server_runtime_caps - bytes server_banner - - cdef int _write_message(self, WriteBuffer buf) except -1: - buf.write_uint8(TNS_MSG_TYPE_PROTOCOL) - buf.write_uint8(6) # protocol version (8.1 and higher) - buf.write_uint8(0) # "array" terminator - buf.write_str(DRIVER_NAME) - buf.write_uint8(0) # NULL terminator - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - if message_type == TNS_MSG_TYPE_PROTOCOL: - self._process_protocol_info(buf) - if not buf._caps.supports_end_of_response: - self.end_of_response = True - else: - Message._process_message(self, buf, message_type) - - cdef int _process_protocol_info(self, ReadBuffer buf) except -1: - """ - Processes the response to the protocol request. - """ - cdef: - uint16_t num_elem, fdo_length - Capabilities caps = buf._caps - const char_type *fdo - bytearray temp_array - ssize_t ix - buf.read_ub1(&self.server_version) - buf.skip_ub1() # skip zero byte - self.server_banner = buf.read_null_terminated_bytes() - buf.read_uint16le(&caps.charset_id) - buf.read_ub1(&self.server_flags) - buf.read_uint16le(&num_elem) - if num_elem > 0: # skip elements - buf.skip_raw_bytes(num_elem * 5) - buf.read_uint16be(&fdo_length) - fdo = buf.read_raw_bytes(fdo_length) - ix = 6 + fdo[5] + fdo[6] - caps.ncharset_id = (fdo[ix + 3] << 8) + fdo[ix + 4] - self.server_compile_caps = buf.read_bytes() - if self.server_compile_caps is not None: - temp_array = bytearray(self.server_compile_caps) - caps._adjust_for_server_compile_caps(temp_array) - if caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: - self.conn_impl._oson_max_fname_size = 65535 - self.server_runtime_caps = buf.read_bytes() - if self.server_runtime_caps is not None: - temp_array = bytearray(self.server_runtime_caps) - caps._adjust_for_server_runtime_caps(temp_array) - - -@cython.final -cdef class FastAuthMessage(Message): - cdef: - DataTypesMessage data_types_message - ProtocolMessage protocol_message - AuthMessage auth_message - - cdef int _process_message(self, ReadBuffer buf, - uint8_t message_type) except -1: - """ - Processes the messages returned from the server response. - """ - if message_type == TNS_MSG_TYPE_PROTOCOL: - ProtocolMessage._process_message(self.protocol_message, buf, - message_type) - elif message_type == TNS_MSG_TYPE_DATA_TYPES: - DataTypesMessage._process_message(self.data_types_message, buf, - message_type) - else: - AuthMessage._process_message(self.auth_message, buf, message_type) - self.end_of_response = self.auth_message.end_of_response - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Writes the message to the buffer. This includes not just this message - but also the protocol, data types and auth messages. This reduces the - number of round-trips to the database and thereby increases - performance. - """ - buf.write_uint8(TNS_MSG_TYPE_FAST_AUTH) - buf.write_uint8(1) # fast auth version - buf.write_uint8(TNS_SERVER_CONVERTS_CHARS) # flag 1 - buf.write_uint8(0) # flag 2 - ProtocolMessage._write_message(self.protocol_message, buf) - buf.write_uint16be(0) # server charset (unused) - buf.write_uint8(0) # server charset flag (unused) - buf.write_uint16be(0) # server ncharset (unused) - buf._caps.ttc_field_version = TNS_CCAP_FIELD_VERSION_19_1_EXT_1 - buf.write_uint8(buf._caps.ttc_field_version) - DataTypesMessage._write_message(self.data_types_message, buf) - AuthMessage._write_message(self.auth_message, buf) - buf._caps.ttc_field_version = TNS_CCAP_FIELD_VERSION_MAX - - -@cython.final -cdef class RollbackMessage(Message): - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_ROLLBACK - - -@cython.final -cdef class SessionReleaseMessage(Message): - - cdef: - uint32_t release_mode - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.message_type = TNS_MSG_TYPE_ONEWAY_FN - self.function_code = TNS_FUNC_SESSION_RELEASE - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Write the message for a DRCP session release. - """ - self._write_function_code(buf) - buf.write_uint8(0) # pointer (tag name) - buf.write_uint8(0) # tag name length - buf.write_ub4(self.release_mode) # mode - - -@cython.final -cdef class TransactionChangeStateMessage(Message): - """ - Used for two-phase commit (TPC) transaction change state: commit, rollback, - forget, etc. - """ - cdef: - uint32_t operation, state, flags - bytes context - object xid - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_TPC_TXN_CHANGE_STATE - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - """ - Process the parameters returned by the database. - """ - buf.read_ub4(&self.state) - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Writes the message to the database. - """ - cdef: - bytes global_transaction_id, branch_qualifier, xid_bytes - uint32_t format_id = 0 - - # acquire data to send to the server - if self.xid is not None: - format_id = self.xid[0] - global_transaction_id = self.xid[1] \ - if isinstance(self.xid[1], bytes) \ - else self.xid[1].encode() - branch_qualifier = self.xid[2] \ - if isinstance(self.xid[2], bytes) \ - else self.xid[2].encode() - xid_bytes = global_transaction_id + branch_qualifier - xid_bytes += bytes(128 - len(xid_bytes)) - - self._write_function_code(buf) - buf.write_ub4(self.operation) - if self.context is not None: - buf.write_uint8(1) # pointer (context) - buf.write_ub4(len(self.context)) - else: - buf.write_uint8(0) # pointer (context) - buf.write_ub4(0) # context length - if self.xid is not None: - buf.write_ub4(format_id) - buf.write_ub4(len(global_transaction_id)) - buf.write_ub4(len(branch_qualifier)) - buf.write_uint8(1) # pointer (xid) - buf.write_ub4(len(xid_bytes)) - else: - buf.write_ub4(0) # format id - buf.write_ub4(0) # global transaction id length - buf.write_ub4(0) # branch qualifier length - buf.write_uint8(0) # pointer (xid) - buf.write_ub4(0) # XID length - buf.write_ub4(0) # timeout - buf.write_ub4(self.state) - buf.write_uint8(1) # pointer (out state) - buf.write_ub4(self.flags) - if self.context is not None: - buf.write_bytes(self.context) - if self.xid is not None: - buf.write_bytes(xid_bytes) - - -@cython.final -cdef class TransactionSwitchMessage(Message): - """ - Used for two-phase commit (TPC) transaction start, attach and detach. - """ - cdef: - uint32_t operation, flags, timeout, application_value - bytes context - object xid - - cdef int _initialize_hook(self) except -1: - """ - Perform initialization. - """ - self.function_code = TNS_FUNC_TPC_TXN_SWITCH - - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: - """ - Process the parameters returned by the database. - """ - cdef: - const char_type* ptr - uint16_t context_len - buf.read_ub4(&self.application_value) - buf.read_ub2(&context_len) - ptr = buf.read_raw_bytes(context_len) - self.context = ptr[:context_len] - - cdef int _write_message(self, WriteBuffer buf) except -1: - """ - Writes the message to the database. - """ - cdef: - bytes global_transaction_id, branch_qualifier, xid_bytes - bytes internal_name = None, external_name = None - uint32_t format_id = 0 - - # acquire data to send to the server - if self.xid is not None: - format_id = self.xid[0] - global_transaction_id = self.xid[1] \ - if isinstance(self.xid[1], bytes) \ - else self.xid[1].encode() - branch_qualifier = self.xid[2] \ - if isinstance(self.xid[2], bytes) \ - else self.xid[2].encode() - xid_bytes = global_transaction_id + branch_qualifier - xid_bytes += bytes(128 - len(xid_bytes)) - if self.conn_impl._internal_name is not None: - internal_name = self.conn_impl._internal_name.encode() - if self.conn_impl._external_name is not None: - external_name = self.conn_impl._external_name.encode() - - # write message - self._write_function_code(buf) - buf.write_ub4(self.operation) - if self.context is not None: - buf.write_uint8(1) # pointer (transaction context) - buf.write_ub4(len(self.context)) - else: - buf.write_uint8(0) # pointer (transaction context) - buf.write_ub4(0) # transaction context length - if self.xid is not None: - buf.write_ub4(format_id) - buf.write_ub4(len(global_transaction_id)) - buf.write_ub4(len(branch_qualifier)) - buf.write_uint8(1) # pointer (XID) - buf.write_ub4(len(xid_bytes)) - else: - buf.write_ub4(0) # format id - buf.write_ub4(0) # global transaction id length - buf.write_ub4(0) # branch qualifier length - buf.write_uint8(0) # pointer (XID) - buf.write_ub4(0) # XID length - buf.write_ub4(self.flags) - buf.write_ub4(self.timeout) - buf.write_uint8(1) # pointer (application value) - buf.write_uint8(1) # pointer (return context) - buf.write_uint8(1) # pointer (return context length) - if internal_name is not None: - buf.write_uint8(1) # pointer (internal name) - buf.write_ub4(len(internal_name)) - else: - buf.write_uint8(0) # pointer (internal name) - buf.write_ub4(0) # length of internal name - if external_name is not None: - external_name = self.conn_impl._external_name.encode() - buf.write_uint8(1) # pointer (external name) - buf.write_ub4(len(external_name)) - else: - buf.write_uint8(0) # pointer (external name) - buf.write_ub4(0) # length of external name - if self.context is not None: - buf.write_bytes(self.context) - if self.xid is not None: - buf.write_bytes(xid_bytes) - buf.write_ub4(self.application_value) - if internal_name is not None: - buf.write_bytes(internal_name) - if external_name is not None: - buf.write_bytes(external_name) diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx new file mode 100644 index 00000000..4597fd7f --- /dev/null +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -0,0 +1,454 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# auth.pyx +# +# Cython file defining the messages sent to the database and the responses that +# are received by the client for authentication (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class AuthMessage(Message): + cdef: + str encoded_password + bytes password + bytes newpassword + str encoded_newpassword + str encoded_jdwp_data + str debug_jdwp + str session_key + str speedy_key + str proxy_user + str token + str private_key + str service_name + uint8_t purity + ssize_t user_bytes_len + bytes user_bytes + dict session_data + uint32_t auth_mode + uint32_t verifier_type + bint change_password + str program + str terminal + str machine + str osuser + str driver_name + str edition + list appcontext + str connect_string + + cdef int _encrypt_passwords(self) except -1: + """ + Encrypts the passwords using the session key. + """ + + # encrypt password + salt = secrets.token_bytes(16) + password_with_salt = salt + self.password + encrypted_password = encrypt_cbc(self.conn_impl._combo_key, + password_with_salt) + self.encoded_password = encrypted_password.hex().upper() + + # encrypt new password + if self.newpassword is not None: + newpassword_with_salt = salt + self.newpassword + encrypted_newpassword = encrypt_cbc(self.conn_impl._combo_key, + newpassword_with_salt) + self.encoded_newpassword = encrypted_newpassword.hex().upper() + + cdef int _generate_verifier(self) except -1: + """ + Generate the multi-round verifier. + """ + cdef: + bytes jdwp_data + bytearray b + ssize_t i + + # create password hash + verifier_data = bytes.fromhex(self.session_data['AUTH_VFR_DATA']) + if self.verifier_type == TNS_VERIFIER_TYPE_12C: + keylen = 32 + iterations = int(self.session_data['AUTH_PBKDF2_VGEN_COUNT']) + salt = verifier_data + b'AUTH_PBKDF2_SPEEDY_KEY' + password_key = get_derived_key(self.password, salt, 64, + iterations) + h = hashlib.new("sha512") + h.update(password_key) + h.update(verifier_data) + password_hash = h.digest()[:32] + else: + keylen = 24 + h = hashlib.sha1(self.password) + h.update(verifier_data) + password_hash = h.digest() + bytes(4) + + # decrypt first half of session key + encoded_server_key = bytes.fromhex(self.session_data['AUTH_SESSKEY']) + session_key_part_a = decrypt_cbc(password_hash, encoded_server_key) + + # generate second half of session key + session_key_part_b = secrets.token_bytes(len(session_key_part_a)) + encoded_client_key = encrypt_cbc(password_hash, session_key_part_b) + + # create session key and combo key + if len(session_key_part_a) == 48: + self.session_key = encoded_client_key.hex().upper()[:96] + b = bytearray(24) + for i in range(16, 40): + b[i - 16] = session_key_part_a[i] ^ session_key_part_b[i] + part1 = hashlib.md5(b[:16]).digest() + part2 = hashlib.md5(b[16:]).digest() + combo_key = (part1 + part2)[:keylen] + else: + self.session_key = encoded_client_key.hex().upper()[:64] + salt = bytes.fromhex(self.session_data['AUTH_PBKDF2_CSK_SALT']) + iterations = int(self.session_data['AUTH_PBKDF2_SDER_COUNT']) + temp_key = session_key_part_b[:keylen] + session_key_part_a[:keylen] + combo_key = get_derived_key(temp_key.hex().upper().encode(), salt, + keylen, iterations) + + # retain session key for use by the change password API + self.conn_impl._combo_key = combo_key + + # generate speedy key for 12c verifiers + if self.verifier_type == TNS_VERIFIER_TYPE_12C: + salt = secrets.token_bytes(16) + speedy_key = encrypt_cbc(combo_key, salt + password_key) + self.speedy_key = speedy_key[:80].hex().upper() + + # encrypts the passwords + self._encrypt_passwords() + + # check if debug_jdwp is set. if set, encode the data using the + # combo session key with zeros padding + if self.debug_jdwp is not None: + jdwp_data = self.debug_jdwp.encode() + encrypted_jdwp_data = encrypt_cbc(combo_key, jdwp_data, zeros=True) + # Add a "01" at the end of the hex encrypted data to indicate the + # use of AES encryption + self.encoded_jdwp_data = encrypted_jdwp_data.hex().upper() + "01" + + cdef str _get_alter_timezone_statement(self): + """ + Returns the statement required to change the session time zone to match + the time zone in use by the Python interpreter. + """ + cdef: + int tz_hour, tz_minute, timezone + str sign, tz_repr + tz_repr = os.environ.get("ORA_SDTZ") + if tz_repr is None: + timezone = time.localtime().tm_gmtoff + tz_hour = timezone // 3600 + tz_minute = (timezone - (tz_hour * 3600)) // 60 + if tz_hour < 0: + sign = "-" + tz_hour = -tz_hour + else: + sign = "+" + tz_repr = f"{sign}{tz_hour:02}:{tz_minute:02}" + return f"ALTER SESSION SET TIME_ZONE='{tz_repr}'\x00" + + cdef tuple _get_version_tuple(self, ReadBuffer buf): + """ + Return the 5-tuple for the database version. Note that the format + changed with Oracle Database 18. + """ + cdef uint32_t full_version_num + full_version_num = int(self.session_data["AUTH_VERSION_NO"]) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_18_1_EXT_1: + return ((full_version_num >> 24) & 0xFF, + (full_version_num >> 16) & 0xFF, + (full_version_num >> 12) & 0x0F, + (full_version_num >> 4) & 0xFF, + (full_version_num & 0x0F)) + else: + return ((full_version_num >> 24) & 0xFF, + (full_version_num >> 20) & 0x0F, + (full_version_num >> 12) & 0x0F, + (full_version_num >> 8) & 0x0F, + (full_version_num & 0x0F)) + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_AUTH_PHASE_ONE + self.session_data = {} + if self.conn_impl.username is not None: + self.user_bytes = self.conn_impl.username.encode() + self.user_bytes_len = len(self.user_bytes) + self.resend = True + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + cdef: + uint16_t num_params, i + uint32_t num_bytes + str key, value + buf.read_ub2(&num_params) + for i in range(num_params): + buf.skip_ub4() + key = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + value = buf.read_str(CS_FORM_IMPLICIT) + else: + value = "" + if key == "AUTH_VFR_DATA": + buf.read_ub4(&self.verifier_type) + else: + buf.skip_ub4() # skip flags + self.session_data[key] = value + if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: + self.function_code = TNS_FUNC_AUTH_PHASE_TWO + elif not self.change_password: + self.conn_impl._session_id = \ + int(self.session_data["AUTH_SESSION_ID"]) + self.conn_impl._serial_num = \ + int(self.session_data["AUTH_SERIAL_NUM"]) + self.conn_impl._db_domain = \ + self.session_data.get("AUTH_SC_DB_DOMAIN") + self.conn_impl._db_name = \ + self.session_data.get("AUTH_SC_DBUNIQUE_NAME") + self.conn_impl._max_open_cursors = \ + int(self.session_data.get("AUTH_MAX_OPEN_CURSORS", 0)) + self.conn_impl._service_name = \ + self.session_data.get("AUTH_SC_SERVICE_NAME") + self.conn_impl._instance_name = \ + self.session_data.get("AUTH_INSTANCENAME") + self.conn_impl._max_identifier_length = \ + int(self.session_data.get("AUTH_MAX_IDEN_LENGTH", 30)) + self.conn_impl.server_version = self._get_version_tuple(buf) + self.conn_impl.supports_bool = \ + buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1 + self.conn_impl._edition = self.edition + + cdef int _set_params(self, ConnectParamsImpl params, + Description description) except -1: + """ + Sets the parameters to use for the AuthMessage. The user and auth mode + are retained in order to avoid duplicating this effort for both trips + to the server. + """ + self.password = params._get_password() + self.newpassword = params._get_new_password() + self.service_name = description.service_name + self.proxy_user = params.proxy_user + self.debug_jdwp = params.debug_jdwp + self.program = params.program + self.terminal = params.terminal + self.machine = params.machine + self.osuser = params.osuser + self.driver_name = params.driver_name + if self.driver_name is None: + self.driver_name = f"{DRIVER_NAME} thn : {DRIVER_VERSION}" + self.edition = params.edition + self.appcontext = params.appcontext + self.connect_string = params._get_connect_string() + + # if drcp is used, use purity = NEW as the default purity for + # standalone connections and purity = SELF for connections that belong + # to a pool + if description.purity == PURITY_DEFAULT \ + and self.conn_impl._drcp_enabled: + if self.conn_impl._pool is None: + self.purity = PURITY_NEW + else: + self.purity = PURITY_SELF + else: + self.purity = description.purity + + # set token parameters; adjust processing so that only phase two is + # sent + if params._token is not None \ + or params.access_token_callback is not None: + self.token = params._get_token() + if params._private_key is not None: + self.private_key = params._get_private_key() + self.function_code = TNS_FUNC_AUTH_PHASE_TWO + self.resend = False + + # set authentication mode + if params._new_password is None: + self.auth_mode = TNS_AUTH_MODE_LOGON + if params.mode & AUTH_MODE_SYSDBA: + self.auth_mode |= TNS_AUTH_MODE_SYSDBA + if params.mode & AUTH_MODE_SYSOPER: + self.auth_mode |= TNS_AUTH_MODE_SYSOPER + if params.mode & AUTH_MODE_SYSASM: + self.auth_mode |= TNS_AUTH_MODE_SYSASM + if params.mode & AUTH_MODE_SYSBKP: + self.auth_mode |= TNS_AUTH_MODE_SYSBKP + if params.mode & AUTH_MODE_SYSDGD: + self.auth_mode |= TNS_AUTH_MODE_SYSDGD + if params.mode & AUTH_MODE_SYSKMT: + self.auth_mode |= TNS_AUTH_MODE_SYSKMT + if params.mode & AUTH_MODE_SYSRAC: + self.auth_mode |= TNS_AUTH_MODE_SYSRAC + if self.private_key is not None: + self.auth_mode |= TNS_AUTH_MODE_IAM_TOKEN + + cdef int _write_key_value(self, WriteBuffer buf, str key, str value, + uint32_t flags=0) except -1: + cdef: + bytes key_bytes = key.encode() + bytes value_bytes = value.encode() + uint32_t key_len = len(key_bytes) + uint32_t value_len = len(value_bytes) + buf.write_ub4(key_len) + buf.write_bytes_with_length(key_bytes) + buf.write_ub4(value_len) + if value_len > 0: + buf.write_bytes_with_length(value_bytes) + buf.write_ub4(flags) + + cdef int _write_message(self, WriteBuffer buf) except -1: + cdef: + uint8_t has_user = 1 if self.user_bytes_len > 0 else 0 + uint32_t num_pairs + + # perform final determination of data to write + if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: + num_pairs = 5 + elif self.change_password: + self._encrypt_passwords() + num_pairs = 2 + else: + num_pairs = 4 + + # token authentication + if self.token is not None: + num_pairs += 1 + + # normal user/password authentication + else: + num_pairs += 2 + self.auth_mode |= TNS_AUTH_MODE_WITH_PASSWORD + if self.verifier_type == TNS_VERIFIER_TYPE_12C: + num_pairs += 1 + elif self.verifier_type not in (TNS_VERIFIER_TYPE_11G_1, + TNS_VERIFIER_TYPE_11G_2): + errors._raise_err(errors.ERR_UNSUPPORTED_VERIFIER_TYPE, + verifier_type=self.verifier_type) + self._generate_verifier() + + # determine which other key/value pairs to write + if self.newpassword is not None: + num_pairs += 1 + self.auth_mode |= TNS_AUTH_MODE_CHANGE_PASSWORD + if self.proxy_user is not None: + num_pairs += 1 + if self.conn_impl._cclass is not None: + num_pairs += 1 + if self.purity != 0: + num_pairs += 1 + if self.private_key is not None: + num_pairs += 2 + if self.encoded_jdwp_data is not None: + num_pairs += 1 + if self.edition is not None: + num_pairs += 1 + if self.appcontext is not None: + num_pairs += len(self.appcontext) * 3 + if self.connect_string is not None: + num_pairs += 1 + + # write basic data to packet + self._write_function_code(buf) + buf.write_uint8(has_user) # pointer (authusr) + buf.write_ub4(self.user_bytes_len) + buf.write_ub4(self.auth_mode) # authentication mode + buf.write_uint8(1) # pointer (authivl) + buf.write_ub4(num_pairs) # number of key/value pairs + buf.write_uint8(1) # pointer (authovl) + buf.write_uint8(1) # pointer (authovln) + if has_user: + buf.write_bytes_with_length(self.user_bytes) + + # write key/value pairs + if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: + self._write_key_value(buf, "AUTH_TERMINAL", self.terminal) + self._write_key_value(buf, "AUTH_PROGRAM_NM", self.program) + self._write_key_value(buf, "AUTH_MACHINE", self.machine) + self._write_key_value(buf, "AUTH_PID", _connect_constants.pid) + self._write_key_value(buf, "AUTH_SID", self.osuser) + else: + if self.proxy_user is not None: + self._write_key_value(buf, "PROXY_CLIENT_NAME", + self.proxy_user) + if self.token is not None: + self._write_key_value(buf, "AUTH_TOKEN", self.token) + elif not self.change_password: + self._write_key_value(buf, "AUTH_SESSKEY", self.session_key, 1) + if self.verifier_type == TNS_VERIFIER_TYPE_12C: + self._write_key_value(buf, "AUTH_PBKDF2_SPEEDY_KEY", + self.speedy_key) + if self.encoded_password is not None: + self._write_key_value(buf, "AUTH_PASSWORD", + self.encoded_password) + if self.encoded_newpassword is not None: + self._write_key_value(buf, "AUTH_NEWPASSWORD", + self.encoded_newpassword) + if not self.change_password: + self._write_key_value(buf, "SESSION_CLIENT_CHARSET", "873") + self._write_key_value(buf, "SESSION_CLIENT_DRIVER_NAME", + self.driver_name) + self._write_key_value(buf, "SESSION_CLIENT_VERSION", + str(_connect_constants.full_version_num)) + self._write_key_value(buf, "AUTH_ALTER_SESSION", + self._get_alter_timezone_statement(), 1) + if self.conn_impl._cclass is not None: + self._write_key_value(buf, "AUTH_KPPL_CONN_CLASS", + self.conn_impl._cclass) + if self.purity != 0: + self._write_key_value(buf, "AUTH_KPPL_PURITY", + str(self.purity), 1) + if self.private_key is not None: + date_format = "%a, %d %b %Y %H:%M:%S GMT" + now = datetime.datetime.utcnow().strftime(date_format) + host_info = "%s:%d" % buf._transport.get_host_info() + header = f"date: {now}\n" + \ + f"(request-target): {self.service_name}\n" + \ + f"host: {host_info}" + signature = get_signature(self.private_key, header) + self._write_key_value(buf, "AUTH_HEADER", header) + self._write_key_value(buf, "AUTH_SIGNATURE", signature) + if self.encoded_jdwp_data is not None: + self._write_key_value(buf, "AUTH_ORA_DEBUG_JDWP", + self.encoded_jdwp_data) + if self.edition is not None: + self._write_key_value(buf, "AUTH_ORA_EDITION", self.edition) + if self.appcontext is not None: + # NOTE: these keys require a trailing null character as the + # server expects it! + for entry in self.appcontext: + self._write_key_value(buf, "AUTH_APPCTX_NSPACE\0", entry[0]) + self._write_key_value(buf, "AUTH_APPCTX_ATTR\0", entry[1]) + self._write_key_value(buf, "AUTH_APPCTX_VALUE\0", entry[2]) + if self.connect_string is not None: + self._write_key_value(buf, "AUTH_CONNECT_STRING", + self.connect_string) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx new file mode 100644 index 00000000..39449d8d --- /dev/null +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -0,0 +1,1482 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# base.pyx +# +# Cython file defining the base classes used for messages sent to the database +# and the responses that are received by the client (embedded in +# thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.freelist(20) +cdef class _OracleErrorInfo: + cdef: + uint32_t num + uint16_t cursor_id + uint64_t pos + uint64_t rowcount + str message + Rowid rowid + list batcherrors + + +cdef class Message: + cdef: + BaseThinConnImpl conn_impl + PipelineOpResultImpl pipeline_result_impl + _OracleErrorInfo error_info + uint8_t message_type + uint8_t function_code + uint32_t call_status + uint16_t end_to_end_seq_num + uint64_t token_num + bint end_of_response + bint error_occurred + bint flush_out_binds + bint resend + bint retry + object warning + + cdef int _check_and_raise_exception(self) except -1: + """ + Checks to see if an error has occurred. If one has, an error object is + created and then the appropriate exception raised. Note that if a "dead + connection" error is detected, the connection is forced closed + immediately. + """ + cdef bint is_recoverable = False + if self.error_occurred: + if self.error_info.num in ( + 28, # session has been terminated + 31, # session marked for kill + 376, # file %s cannot be read at this time + 603, # ORACLE server session terminated + 1012, # not logged on + 1033, # ORACLE initialization or shutdown in progress + 1034, # the Oracle instance is not available for use + 1089, # immediate shutdown or close in progress + 1090, # shutdown in progress + 1092, # ORACLE instance terminated + 1115, # IO error reading block from file %s (block # %s) + 2396, # exceeded maximum idle time + 3113, # end-of-file on communication channel + 3114, # not connected to ORACLE + 3135, # connection lost contact + 12153, # TNS:not connected + 12514, # Service %s is not registered with the listener + 12537, # TNS:connection closed + 12547, # TNS:lost contact + 12570, # TNS:packet reader failure + 12571, # TNS:packet writer failure + 12583, # TNS:no reader + 12757, # instance does not currently know of requested service + 16456, # missing or invalid value + ): + is_recoverable = True + error = errors._Error(self.error_info.message, + code=self.error_info.num, + offset=self.error_info.pos, + isrecoverable=is_recoverable) + if error.is_session_dead: + self.conn_impl._protocol._force_close() + raise error.exc_type(error) + + cdef int _initialize(self, BaseThinConnImpl conn_impl) except -1: + """ + Initializes the message to contain the connection and a place to store + error information. For DRCP, the status of the connection may change + after the first round-trip to the database so this information needs to + be preserved. Child classes may have their own initialization. In order + to avoid overhead using the constructor, a special hook method is used + instead. + """ + conn_impl._protocol._read_buf._check_connected() + self.conn_impl = conn_impl + self.message_type = TNS_MSG_TYPE_FUNCTION + self.error_info = _OracleErrorInfo.__new__(_OracleErrorInfo) + self._initialize_hook() + + cdef int _initialize_hook(self) except -1: + """ + A hook that is used by subclasses to perform any necessary + initialization specific to that class. + """ + pass + + cdef int _process_error_info(self, ReadBuffer buf) except -1: + cdef: + uint32_t num_bytes, i, offset, num_offsets + _OracleErrorInfo info = self.error_info + uint16_t temp16, num_errors, error_code + uint8_t first_byte, flags + int16_t error_pos + str error_msg + buf.read_ub4(&self.call_status) # end of call status + buf.skip_ub2() # end to end seq# + buf.skip_ub4() # current row number + buf.skip_ub2() # error number + buf.skip_ub2() # array elem error + buf.skip_ub2() # array elem error + buf.read_ub2(&info.cursor_id) # cursor id + buf.read_sb2(&error_pos) # error position + buf.skip_ub1() # sql type (19c and earlier) + buf.skip_ub1() # fatal? + buf.skip_ub1() # flags + buf.skip_ub1() # user cursor options + buf.skip_ub1() # UPI parameter + buf.read_ub1(&flags) + if flags & 0x20: + self.warning = errors._create_warning(errors.WRN_COMPILATION_ERROR) + buf.read_rowid(&info.rowid) # rowid + buf.skip_ub4() # OS error + buf.skip_ub1() # statement number + buf.skip_ub1() # call number + buf.skip_ub2() # padding + buf.skip_ub4() # success iters + buf.read_ub4(&num_bytes) # oerrdd (logical rowid) + if num_bytes > 0: + buf.skip_raw_bytes_chunked() + + # batch error codes + buf.read_ub2(&num_errors) # batch error codes array + if num_errors > 0: + info.batcherrors = [] + buf.read_ub1(&first_byte) + for i in range(num_errors): + if first_byte == TNS_LONG_LENGTH_INDICATOR: + buf.skip_ub4() # chunk length ignored + buf.read_ub2(&error_code) + info.batcherrors.append(errors._Error(code=error_code)) + if first_byte == TNS_LONG_LENGTH_INDICATOR: + buf.skip_raw_bytes(1) # ignore end marker + + # batch error offsets + buf.read_ub4(&num_offsets) # batch error row offset array + if num_offsets > 0: + if num_offsets > 65535: + errors._raise_err(errors.ERR_TOO_MANY_BATCH_ERRORS) + buf.read_ub1(&first_byte) + for i in range(num_offsets): + if first_byte == TNS_LONG_LENGTH_INDICATOR: + buf.skip_ub4() # chunk length ignored + buf.read_ub4(&offset) + if i < num_errors: + info.batcherrors[i].offset = offset + if first_byte == TNS_LONG_LENGTH_INDICATOR: + buf.skip_raw_bytes(1) # ignore end marker + + # batch error messages + buf.read_ub2(&temp16) # batch error messages array + if temp16 > 0: + buf.skip_raw_bytes(1) # ignore packed size + for i in range(temp16): + buf.skip_ub2() # skip chunk length + info.batcherrors[i].message = \ + buf.read_str(CS_FORM_IMPLICIT).rstrip() + info.batcherrors[i]._make_adjustments() + buf.skip_raw_bytes(2) # ignore end marker + + buf.read_ub4(&info.num) # error number (extended) + buf.read_ub8(&info.rowcount) # row number (extended) + + # fields added in Oracle Database 20c + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: + buf.skip_ub4() # sql type + buf.skip_ub4() # server checksum + + # error message + if info.num != 0: + self.error_occurred = True + if error_pos > 0: + info.pos = error_pos + info.message = buf.read_str(CS_FORM_IMPLICIT).rstrip() + + # an error message marks the end of a response if no explicit end of + # response is available + if not buf._caps.supports_end_of_response: + self.end_of_response = True + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + cdef uint64_t token_num + if message_type == TNS_MSG_TYPE_ERROR: + self._process_error_info(buf) + elif message_type == TNS_MSG_TYPE_WARNING: + self._process_warning_info(buf) + elif message_type == TNS_MSG_TYPE_TOKEN: + buf.read_ub8(&token_num) + if token_num != self.token_num: + errors._raise_err(errors.ERR_MISMATCHED_TOKEN, + token_num=token_num, + expected_token_num=self.token_num) + elif message_type == TNS_MSG_TYPE_STATUS: + buf.read_ub4(&self.call_status) + buf.read_ub2(&self.end_to_end_seq_num) + if not buf._caps.supports_end_of_response: + self.end_of_response = True + elif message_type == TNS_MSG_TYPE_PARAMETER: + self._process_return_parameters(buf) + elif message_type == TNS_MSG_TYPE_SERVER_SIDE_PIGGYBACK: + self._process_server_side_piggyback(buf) + elif message_type == TNS_MSG_TYPE_END_OF_RESPONSE: + self.end_of_response = True + else: + errors._raise_err(errors.ERR_MESSAGE_TYPE_UNKNOWN, + message_type=message_type, + position=buf._pos - 1) + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + raise NotImplementedError() + + cdef int _process_server_side_piggyback(self, ReadBuffer buf) except -1: + cdef: + uint16_t num_elements, i, temp16 + uint32_t num_bytes, flags + uint8_t opcode + buf.read_ub1(&opcode) + if opcode == TNS_SERVER_PIGGYBACK_LTXID: + buf.read_ub4(&num_bytes) + if num_bytes > 0: + self.conn_impl._ltxid = buf.read_bytes() + elif opcode == TNS_SERVER_PIGGYBACK_QUERY_CACHE_INVALIDATION \ + or opcode == TNS_SERVER_PIGGYBACK_TRACE_EVENT: + pass + elif opcode == TNS_SERVER_PIGGYBACK_OS_PID_MTS: + buf.read_ub2(&temp16) + buf.skip_raw_bytes_chunked() + elif opcode == TNS_SERVER_PIGGYBACK_SYNC: + buf.skip_ub2() # skip number of DTYs + buf.skip_ub1() # skip length of DTYs + buf.read_ub2(&num_elements) + buf.skip_ub1() # skip length + for i in range(num_elements): + buf.read_ub2(&temp16) + if temp16 > 0: # skip key + buf.skip_raw_bytes_chunked() + buf.read_ub2(&temp16) + if temp16 > 0: # skip value + buf.skip_raw_bytes_chunked() + buf.skip_ub2() # skip flags + buf.skip_ub4() # skip overall flags + elif opcode == TNS_SERVER_PIGGYBACK_EXT_SYNC: + buf.skip_ub2() # skip number of DTYs + buf.skip_ub1() # skip length of DTYs + elif opcode == TNS_SERVER_PIGGYBACK_AC_REPLAY_CONTEXT: + buf.skip_ub2() # skip number of DTYs + buf.skip_ub1() # skip length of DTYs + buf.skip_ub4() # skip flags + buf.skip_ub4() # skip error code + buf.skip_ub1() # skip queue + buf.read_ub4(&num_bytes) # skip replay context + if num_bytes > 0: + buf.skip_raw_bytes_chunked() + elif opcode == TNS_SERVER_PIGGYBACK_SESS_RET: + buf.skip_ub2() + buf.skip_ub1() + buf.read_ub2(&num_elements) + if num_elements > 0: + buf.skip_ub1() + for i in range(num_elements): + buf.read_ub2(&temp16) + if temp16 > 0: # skip key + buf.skip_raw_bytes_chunked() + buf.read_ub2(&temp16) + if temp16 > 0: # skip value + buf.skip_raw_bytes_chunked() + buf.skip_ub2() # skip flags + buf.read_ub4(&flags) # session flags + if flags & TNS_SESSGET_SESSION_CHANGED: + if self.conn_impl._drcp_establish_session: + self.conn_impl._statement_cache.clear_open_cursors() + self.conn_impl._drcp_establish_session = False + buf.read_ub4(&self.conn_impl._session_id) + buf.read_ub2(&self.conn_impl._serial_num) + else: + errors._raise_err(errors.ERR_UNKNOWN_SERVER_PIGGYBACK, + opcode=opcode) + + cdef int _process_warning_info(self, ReadBuffer buf) except -1: + cdef: + uint16_t num_bytes, error_num + str message + buf.read_ub2(&error_num) # error number + buf.read_ub2(&num_bytes) # length of error message + buf.skip_ub2() # flags + if error_num != 0 and num_bytes > 0: + message = buf.read_str(CS_FORM_IMPLICIT).rstrip() + self.warning = errors._Error(message, code=error_num, + iswarning=True) + + cdef int _write_begin_pipeline_piggyback(self, WriteBuffer buf) except -1: + """ + Writes the piggyback to the server that informs the server that a + pipeline is beginning. + """ + buf._data_flags |= TNS_DATA_FLAGS_BEGIN_PIPELINE + self._write_piggyback_code(buf, TNS_FUNC_PIPELINE_BEGIN) + buf.write_ub2(0) # error set ID + buf.write_uint8(0) # error set mode + buf.write_uint8(self.conn_impl.pipeline_mode) + + cdef int _write_close_cursors_piggyback(self, WriteBuffer buf) except -1: + """ + Writes the piggyback that informs the server of the cursors that can be + closed. + """ + self._write_piggyback_code(buf, TNS_FUNC_CLOSE_CURSORS) + buf.write_uint8(1) # pointer + self.conn_impl._statement_cache.write_cursors_to_close(buf) + + cdef int _write_current_schema_piggyback(self, WriteBuffer buf) except -1: + """ + Writes the piggyback that informs the server that a new current schema + is desired. + """ + cdef bytes schema_bytes + self._write_piggyback_code(buf, TNS_FUNC_SET_SCHEMA) + buf.write_uint8(1) # pointer + schema_bytes = self.conn_impl._current_schema.encode() + buf.write_ub4(len(schema_bytes)) + buf.write_bytes_with_length(schema_bytes) + + cdef int _write_close_temp_lobs_piggyback(self, + WriteBuffer buf) except -1: + """ + Writes the piggyback that informs the server of the temporary LOBs that + can be closed. + """ + cdef: + list lobs_to_close = self.conn_impl._temp_lobs_to_close + uint64_t total_size = 0 + self._write_piggyback_code(buf, TNS_FUNC_LOB_OP) + op_code = TNS_LOB_OP_FREE_TEMP | TNS_LOB_OP_ARRAY + + # temp lob data + buf.write_uint8(1) # pointer + buf.write_ub4(self.conn_impl._temp_lobs_total_size) + buf.write_uint8(0) # dest lob locator + buf.write_ub4(0) + buf.write_ub4(0) # source lob locator + buf.write_ub4(0) + buf.write_uint8(0) # source lob offset + buf.write_uint8(0) # dest lob offset + buf.write_uint8(0) # charset + buf.write_ub4(op_code) + buf.write_uint8(0) # scn + buf.write_ub4(0) # losbscn + buf.write_ub8(0) # lobscnl + buf.write_ub8(0) + buf.write_uint8(0) + + # array lob fields + buf.write_uint8(0) + buf.write_ub4(0) + buf.write_uint8(0) + buf.write_ub4(0) + buf.write_uint8(0) + buf.write_ub4(0) + for i in range(len(lobs_to_close)): + buf.write_bytes(lobs_to_close[i]) + + # reset values + self.conn_impl._temp_lobs_to_close = None + self.conn_impl._temp_lobs_total_size = 0 + + cdef int _write_end_to_end_piggyback(self, WriteBuffer buf) except -1: + """ + Writes the piggyback that informs the server of end-to-end attributes + that are being changed. + """ + cdef: + bytes action_bytes, client_identifier_bytes, client_info_bytes + BaseThinConnImpl conn_impl = self.conn_impl + bytes module_bytes, dbop_bytes + uint32_t flags = 0 + + # determine which flags to send + if conn_impl._action_modified: + flags |= TNS_END_TO_END_ACTION + if conn_impl._client_identifier_modified: + flags |= TNS_END_TO_END_CLIENT_IDENTIFIER + if conn_impl._client_info_modified: + flags |= TNS_END_TO_END_CLIENT_INFO + if conn_impl._module_modified: + flags |= TNS_END_TO_END_MODULE + if conn_impl._dbop_modified: + flags |= TNS_END_TO_END_DBOP + + # write initial packet data + self._write_piggyback_code(buf, TNS_FUNC_SET_END_TO_END_ATTR) + buf.write_uint8(0) # pointer (cidnam) + buf.write_uint8(0) # pointer (cidser) + buf.write_ub4(flags) + + # write client identifier header info + if conn_impl._client_identifier_modified: + buf.write_uint8(1) # pointer (client identifier) + if conn_impl._client_identifier is None: + buf.write_ub4(0) + else: + client_identifier_bytes = conn_impl._client_identifier.encode() + buf.write_ub4(len(client_identifier_bytes)) + else: + buf.write_uint8(0) # pointer (client identifier) + buf.write_ub4(0) # length of client identifier + + # write module header info + if conn_impl._module_modified: + buf.write_uint8(1) # pointer (module) + if conn_impl._module is None: + buf.write_ub4(0) + else: + module_bytes = conn_impl._module.encode() + buf.write_ub4(len(module_bytes)) + else: + buf.write_uint8(0) # pointer (module) + buf.write_ub4(0) # length of module + + # write action header info + if conn_impl._action_modified: + buf.write_uint8(1) # pointer (action) + if conn_impl._action is None: + buf.write_ub4(0) + else: + action_bytes = conn_impl._action.encode() + buf.write_ub4(len(action_bytes)) + else: + buf.write_uint8(0) # pointer (action) + buf.write_ub4(0) # length of action + + # write unsupported bits + buf.write_uint8(0) # pointer (cideci) + buf.write_ub4(0) # length (cideci) + buf.write_uint8(0) # cidcct + buf.write_ub4(0) # cidecs + + # write client info header info + if conn_impl._client_info_modified: + buf.write_uint8(1) # pointer (client info) + if conn_impl._client_info is None: + buf.write_ub4(0) + else: + client_info_bytes = conn_impl._client_info.encode() + buf.write_ub4(len(client_info_bytes)) + else: + buf.write_uint8(0) # pointer (client info) + buf.write_ub4(0) # length of client info + + # write more unsupported bits + buf.write_uint8(0) # pointer (cidkstk) + buf.write_ub4(0) # length (cidkstk) + buf.write_uint8(0) # pointer (cidktgt) + buf.write_ub4(0) # length (cidktgt) + + # write dbop header info + if conn_impl._dbop_modified: + buf.write_uint8(1) # pointer (dbop) + if conn_impl._dbop is None: + buf.write_ub4(0) + else: + dbop_bytes = conn_impl._dbop.encode() + buf.write_ub4(len(dbop_bytes)) + else: + buf.write_uint8(0) # pointer (dbop) + buf.write_ub4(0) # length of dbop + + # write strings + if conn_impl._client_identifier_modified \ + and conn_impl._client_identifier is not None: + buf.write_bytes_with_length(client_identifier_bytes) + if conn_impl._module_modified and conn_impl._module is not None: + buf.write_bytes_with_length(module_bytes) + if conn_impl._action_modified and conn_impl._action is not None: + buf.write_bytes_with_length(action_bytes) + if conn_impl._client_info_modified \ + and conn_impl._client_info is not None: + buf.write_bytes_with_length(client_info_bytes) + if conn_impl._dbop_modified and conn_impl._dbop is not None: + buf.write_bytes_with_length(dbop_bytes) + + # reset flags and values + conn_impl._action_modified = False + conn_impl._action = None + conn_impl._client_identifier_modified = False + conn_impl._client_identifier = None + conn_impl._client_info_modified = False + conn_impl._client_info = None + conn_impl._dbop_modified = False + conn_impl._dbop = None + conn_impl._module_modified = False + conn_impl._module = None + + cdef int _write_function_code(self, WriteBuffer buf) except -1: + self._write_piggybacks(buf) + buf.write_uint8(self.message_type) + buf.write_uint8(self.function_code) + buf.write_seq_num() + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_1: + buf.write_ub8(self.token_num) + + cdef int _write_message(self, WriteBuffer buf) except -1: + self._write_function_code(buf) + + cdef int _write_piggyback_code(self, WriteBuffer buf, + uint8_t code) except -1: + """ + Writes the header for piggybacks for the specified function code. + """ + buf.write_uint8(TNS_MSG_TYPE_PIGGYBACK) + buf.write_uint8(code) + buf.write_seq_num() + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_1: + buf.write_ub8(self.token_num) + + cdef int _write_piggybacks(self, WriteBuffer buf) except -1: + """ + Writes all of the piggybacks to the server. + """ + if self.conn_impl.pipeline_mode != 0: + self._write_begin_pipeline_piggyback(buf) + self.conn_impl.pipeline_mode = 0 + if self.conn_impl._current_schema_modified: + self._write_current_schema_piggyback(buf) + if self.conn_impl._statement_cache is not None \ + and self.conn_impl._statement_cache._num_cursors_to_close > 0 \ + and not self.conn_impl._drcp_establish_session: + self._write_close_cursors_piggyback(buf) + if self.conn_impl._action_modified \ + or self.conn_impl._client_identifier_modified \ + or self.conn_impl._client_info_modified \ + or self.conn_impl._dbop_modified \ + or self.conn_impl._module_modified: + self._write_end_to_end_piggyback(buf) + if self.conn_impl._temp_lobs_total_size > 0: + self._write_close_temp_lobs_piggyback(buf) + if self.conn_impl._session_state_desired != 0: + self._write_session_state_piggyback(buf) + + cdef int _write_session_state_piggyback(self, WriteBuffer buf) except -1: + """ + Write the session state piggyback. This is used to let the database + know when the client is beginning and ending a request. The database + uses this information to optimise its resources. + """ + cdef uint8_t state = self.conn_impl._session_state_desired + self._write_piggyback_code(buf, TNS_FUNC_SESSION_STATE) + buf.write_ub8(state | TNS_SESSION_STATE_EXPLICIT_BOUNDARY) + self.conn_impl._session_state_desired = 0 + + cdef int postprocess(self) except -1: + pass + + async def postprocess_async(self): + pass + + cdef int preprocess(self) except -1: + pass + + cdef int process(self, ReadBuffer buf) except -1: + cdef uint8_t message_type + self.end_of_response = False + self.flush_out_binds = False + while not self.end_of_response: + buf.save_point() + buf.read_ub1(&message_type) + self._process_message(buf, message_type) + + cdef int send(self, WriteBuffer buf) except -1: + buf.start_request(TNS_PACKET_TYPE_DATA) + self._write_message(buf) + if self.pipeline_result_impl is not None: + buf._data_flags |= TNS_DATA_FLAGS_END_OF_REQUEST + buf.end_request() + + +cdef class MessageWithData(Message): + cdef: + BaseThinDbObjectTypeCache type_cache + BaseThinCursorImpl cursor_impl + array.array bit_vector_buf + const char_type *bit_vector + bint arraydmlrowcounts + uint32_t row_index + uint32_t num_execs + uint16_t num_columns_sent + list dmlrowcounts + bint batcherrors + list out_var_impls + bint in_fetch + bint parse_only + object cursor + uint32_t offset + + cdef int _adjust_metadata(self, ThinVarImpl prev_var_impl, + OracleMetadata metadata) except -1: + """ + When a query is re-executed but the data type of a column has changed + the server returns the type information of the new type. However, if + the data type returned now is a CLOB or BLOB and the data type + previously returned was CHAR/VARCHAR/RAW (or the equivalent long + types), then the server returns the data as LONG (RAW), similarly to + what happens when a define is done to return CLOB/BLOB as string/bytes. + Detect these situations and adjust the fetch type appropriately. + """ + cdef uint8_t type_num, prev_type_num, csfrm + type_num = metadata.dbtype._ora_type_num + prev_type_num = prev_var_impl._fetch_metadata.dbtype._ora_type_num + if type_num == ORA_TYPE_NUM_CLOB \ + and prev_type_num in (ORA_TYPE_NUM_CHAR, + ORA_TYPE_NUM_LONG, + ORA_TYPE_NUM_VARCHAR): + type_num = ORA_TYPE_NUM_LONG + csfrm = prev_var_impl._fetch_metadata.dbtype._csfrm + metadata.dbtype = DbType._from_ora_type_and_csfrm(type_num, csfrm) + elif type_num == ORA_TYPE_NUM_BLOB \ + and prev_type_num in (ORA_TYPE_NUM_RAW, ORA_TYPE_NUM_LONG_RAW): + type_num = ORA_TYPE_NUM_LONG_RAW + metadata.dbtype = DbType._from_ora_type_and_csfrm(type_num, 0) + + cdef object _create_cursor_from_describe(self, ReadBuffer buf, + object cursor=None): + cdef BaseThinCursorImpl cursor_impl + if cursor is None: + cursor = self.cursor.connection.cursor() + cursor_impl = cursor._impl + cursor_impl._statement = self.conn_impl._get_statement() + cursor_impl._more_rows_to_fetch = True + cursor_impl._statement._is_query = True + self._process_describe_info(buf, cursor, cursor_impl) + return cursor + + cdef int _get_bit_vector(self, ReadBuffer buf, + ssize_t num_bytes) except -1: + """ + Gets the bit vector from the buffer and stores it for later use by the + row processing code. Since it is possible that the packet buffer may be + overwritten by subsequent packet retrieval, the bit vector must be + copied. An array is stored and a pointer to the underlying memory is + used for performance reasons. + """ + cdef const char_type *ptr = buf.read_raw_bytes(num_bytes) + if self.bit_vector_buf is None: + self.bit_vector_buf = array.array('B') + array.resize(self.bit_vector_buf, num_bytes) + self.bit_vector = self.bit_vector_buf.data.as_chars + memcpy( self.bit_vector, ptr, num_bytes) + + cdef bint _is_duplicate_data(self, uint32_t column_num): + """ + Returns a boolean indicating if the given column contains data + duplicated from the previous row. When duplicate data exists, the + server sends a bit vector. Bits that are set indicate that data is sent + with the row data; bits that are not set indicate that data should be + duplicated from the previous row. + """ + cdef int byte_num, bit_num + if self.bit_vector == NULL: + return False + byte_num = column_num // 8 + bit_num = column_num % 8 + return self.bit_vector[byte_num] & (1 << bit_num) == 0 + + cdef int _write_bind_params(self, WriteBuffer buf, list params) except -1: + cdef: + bint has_data = False + list bind_var_impls + BindInfo bind_info + bind_var_impls = [] + for bind_info in params: + if not bind_info._is_return_bind: + has_data = True + bind_var_impls.append(bind_info._bind_var_impl) + self._write_column_metadata(buf, bind_var_impls) + + # write parameter values unless statement contains only returning binds + if has_data: + for i in range(self.num_execs): + buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) + self._write_bind_params_row(buf, params, i) + + cdef int _preprocess_query(self) except -1: + """ + Actions that takes place before query data is processed. + """ + cdef: + BaseThinCursorImpl cursor_impl = self.cursor_impl + Statement statement = cursor_impl._statement + object type_handler, conn + ThinVarImpl var_impl + ssize_t i, num_vals + bint uses_metadata + + # set values to indicate the start of a new fetch operation + self.in_fetch = True + cursor_impl._more_rows_to_fetch = True + cursor_impl._buffer_rowcount = cursor_impl._buffer_index = 0 + self.row_index = 0 + + # if no fetch variables exist, nothing further to do at this point; the + # processing that follows will take the metadata returned by the server + # and use it to create new fetch variables + if statement._fetch_var_impls is None: + return 0 + + # if the type handler set on the cursor or connection does not match + # the one that was used during the last fetch, rebuild the fetch + # variables in order to take the new type handler into account + conn = self.cursor.connection + type_handler = cursor_impl._get_output_type_handler(&uses_metadata) + if type_handler is not statement._last_output_type_handler: + for i, var_impl in enumerate(cursor_impl.fetch_var_impls): + cursor_impl._create_fetch_var(conn, self.cursor, type_handler, + uses_metadata, i, + var_impl._fetch_metadata) + statement._last_output_type_handler = type_handler + + # Create OracleArrowArray if fetching arrow is enabled + if cursor_impl.fetching_arrow: + cursor_impl._create_arrow_arrays() + + # the list of output variables is equivalent to the fetch variables + self.out_var_impls = cursor_impl.fetch_var_impls + + cdef int _process_bit_vector(self, ReadBuffer buf) except -1: + cdef ssize_t num_bytes + buf.read_ub2(&self.num_columns_sent) + num_bytes = self.cursor_impl._num_columns // 8 + if self.cursor_impl._num_columns % 8 > 0: + num_bytes += 1 + self._get_bit_vector(buf, num_bytes) + + cdef object _process_column_data(self, ReadBuffer buf, + ThinVarImpl var_impl, uint32_t pos): + cdef: + uint8_t num_bytes, ora_type_num, csfrm + ThinDbObjectTypeImpl typ_impl + BaseThinCursorImpl cursor_impl + object column_value = None + ThinDbObjectImpl obj_impl + int32_t actual_num_bytes + OracleMetadata metadata + OracleData data + Rowid rowid + if self.in_fetch: + metadata = var_impl._fetch_metadata + else: + metadata = var_impl.metadata + ora_type_num = metadata.dbtype._ora_type_num + csfrm = metadata.dbtype._csfrm + if var_impl.bypass_decode: + ora_type_num = ORA_TYPE_NUM_RAW + if metadata.buffer_size == 0 and self.in_fetch \ + and ora_type_num not in (ORA_TYPE_NUM_LONG, + ORA_TYPE_NUM_LONG_RAW, + ORA_TYPE_NUM_UROWID): + column_value = None # column is null by describe + elif ora_type_num == ORA_TYPE_NUM_ROWID: + if not self.in_fetch: + column_value = buf.read_str(CS_FORM_IMPLICIT) + else: + buf.read_ub1(&num_bytes) + if num_bytes == 0 or num_bytes == TNS_NULL_LENGTH_INDICATOR: + column_value = None + else: + buf.read_rowid(&rowid) + column_value = _encode_rowid(&rowid) + elif ora_type_num == ORA_TYPE_NUM_UROWID: + if not self.in_fetch: + column_value = buf.read_str(CS_FORM_IMPLICIT) + else: + column_value = buf.read_urowid() + elif ora_type_num == ORA_TYPE_NUM_CURSOR: + buf.skip_ub1() # length (fixed value) + if not self.in_fetch: + column_value = var_impl._values[pos] + column_value = self._create_cursor_from_describe(buf, column_value) + cursor_impl = column_value._impl + buf.read_ub2(&cursor_impl._statement._cursor_id) + elif ora_type_num in (ORA_TYPE_NUM_CLOB, + ORA_TYPE_NUM_BLOB, + ORA_TYPE_NUM_BFILE): + column_value = buf.read_lob_with_length(self.conn_impl, + metadata.dbtype) + elif ora_type_num == ORA_TYPE_NUM_JSON: + column_value = buf.read_oson() + elif ora_type_num == ORA_TYPE_NUM_VECTOR: + column_value = buf.read_vector() + elif ora_type_num == ORA_TYPE_NUM_OBJECT: + typ_impl = metadata.objtype + if typ_impl is None: + column_value = buf.read_xmltype(self.conn_impl) + else: + obj_impl = buf.read_dbobject(typ_impl) + if obj_impl is not None: + if not self.in_fetch: + column_value = var_impl._values[pos] + if column_value is not None: + column_value._impl = obj_impl + else: + column_value = PY_TYPE_DB_OBJECT._from_impl(obj_impl) + else: + buf.read_oracle_data(metadata, &data, from_dbobject=False) + if metadata.dbtype._csfrm == CS_FORM_NCHAR: + buf._caps._check_ncharset_id() + if self.cursor_impl.fetching_arrow: + convert_oracle_data_to_arrow( + metadata, var_impl.metadata, &data, var_impl._arrow_array + ) + else: + column_value = convert_oracle_data_to_python( + metadata, var_impl.metadata, &data, + var_impl._encoding_errors, from_dbobject=False + ) + if not self.in_fetch: + buf.read_sb4(&actual_num_bytes) + if actual_num_bytes < 0 and ora_type_num == ORA_TYPE_NUM_BOOLEAN: + column_value = None + elif actual_num_bytes != 0 and column_value is not None: + unit_type = "bytes" if isinstance(column_value, bytes) \ + else "characters" + errors._raise_err(errors.ERR_COLUMN_TRUNCATED, + col_value_len=len(column_value), + unit=unit_type, actual_len=actual_num_bytes) + elif ora_type_num == ORA_TYPE_NUM_LONG \ + or ora_type_num == ORA_TYPE_NUM_LONG_RAW: + buf.skip_sb4() # null indicator + buf.skip_ub4() # return code + return column_value + + cdef OracleMetadata _process_column_info(self, ReadBuffer buf, + BaseThinCursorImpl cursor_impl): + cdef: + uint32_t num_bytes, uds_flags, num_annotations, i + ThinDbObjectTypeImpl typ_impl + str schema, name, key, value + uint8_t ora_type_num, csfrm + OracleMetadata metadata + uint8_t nulls_allowed + int cache_num + bytes oid + buf.read_ub1(&ora_type_num) + metadata = OracleMetadata.__new__(OracleMetadata) + buf.skip_ub1() # flags + buf.read_sb1(&metadata.precision) + buf.read_sb1(&metadata.scale) + buf.read_ub4(&metadata.buffer_size) + buf.skip_ub4() # max number of array elements + buf.skip_ub8() # cont flags + buf.read_ub4(&num_bytes) # OID + if num_bytes > 0: + oid = buf.read_bytes() + buf.skip_ub2() # version + buf.skip_ub2() # character set id + buf.read_ub1(&csfrm) # character set form + metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) + buf.read_ub4(&metadata.max_size) + if ora_type_num == ORA_TYPE_NUM_RAW: + metadata.max_size = metadata.buffer_size + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: + buf.skip_ub4() # oaccolid + buf.read_ub1(&nulls_allowed) + metadata.nulls_allowed = nulls_allowed + buf.skip_ub1() # v7 length of name + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.name = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + schema = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + name = buf.read_str(CS_FORM_IMPLICIT) + buf.skip_ub2() # column position + buf.read_ub4(&uds_flags) + metadata.is_json = uds_flags & TNS_UDS_FLAGS_IS_JSON + metadata.is_oson = uds_flags & TNS_UDS_FLAGS_IS_OSON + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.domain_schema = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.domain_name = buf.read_str(CS_FORM_IMPLICIT) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_3: + buf.read_ub4(&num_annotations) + if num_annotations > 0: + buf.skip_ub1() + metadata.annotations = {} + buf.read_ub4(&num_annotations) + buf.skip_ub1() + for i in range(num_annotations): + buf.skip_ub4() # length of key + key = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + value = buf.read_str(CS_FORM_IMPLICIT) + else: + value = "" + metadata.annotations[key] = value + buf.skip_ub4() # flags + buf.skip_ub4() # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_4: + buf.read_ub4(&metadata.vector_dimensions) + buf.read_ub1(&metadata.vector_format) + buf.read_ub1(&metadata.vector_flags) + if ora_type_num == ORA_TYPE_NUM_OBJECT: + if self.type_cache is None: + cache_num = self.conn_impl._dbobject_type_cache_num + self.type_cache = get_dbobject_type_cache(cache_num) + typ_impl = self.type_cache.get_type_for_info(oid, schema, None, + name) + if typ_impl.is_xml_type: + metadata.dbtype = DB_TYPE_XMLTYPE + else: + metadata.objtype = typ_impl + return metadata + + cdef int _process_describe_info(self, ReadBuffer buf, + object cursor, + BaseThinCursorImpl cursor_impl) except -1: + cdef: + Statement stmt = cursor_impl._statement + list prev_fetch_var_impls + object type_handler, conn + OracleMetadata metadata + uint32_t num_bytes, i + bint uses_metadata + str message + buf.skip_ub4() # max row size + buf.read_ub4(&cursor_impl._num_columns) + prev_fetch_var_impls = stmt._fetch_var_impls + cursor_impl._init_fetch_vars(cursor_impl._num_columns) + if cursor_impl._num_columns > 0: + buf.skip_ub1() + type_handler = cursor_impl._get_output_type_handler(&uses_metadata) + conn = self.cursor.connection + for i in range(cursor_impl._num_columns): + metadata = self._process_column_info(buf, cursor_impl) + if prev_fetch_var_impls is not None \ + and i < len(prev_fetch_var_impls): + self._adjust_metadata(prev_fetch_var_impls[i], metadata) + if metadata.dbtype._ora_type_num in (ORA_TYPE_NUM_BLOB, + ORA_TYPE_NUM_CLOB, + ORA_TYPE_NUM_JSON, + ORA_TYPE_NUM_VECTOR): + stmt._requires_define = True + stmt._no_prefetch = True + cursor_impl._create_fetch_var(conn, self.cursor, type_handler, + uses_metadata, i, metadata) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.skip_raw_bytes_chunked() # current date + buf.skip_ub4() # dcbflag + buf.skip_ub4() # dcbmdbz + buf.skip_ub4() # dcbmnpr + buf.skip_ub4() # dcbmxpr + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.skip_raw_bytes_chunked() # dcbqcky + stmt._fetch_metadata = cursor_impl.fetch_metadata + stmt._fetch_vars = cursor_impl.fetch_vars + stmt._fetch_var_impls = cursor_impl.fetch_var_impls + stmt._num_columns = cursor_impl._num_columns + stmt._last_output_type_handler = type_handler + + cdef int _process_error_info(self, ReadBuffer buf) except -1: + cdef: + BaseThinCursorImpl cursor_impl = self.cursor_impl + BaseThinConnImpl conn_impl = self.conn_impl + object exc_type + Message._process_error_info(self, buf) + if self.error_info.cursor_id != 0: + cursor_impl._statement._cursor_id = self.error_info.cursor_id + if not cursor_impl._statement._is_plsql and not self.in_fetch: + cursor_impl.rowcount = self.error_info.rowcount + elif self.in_fetch and self.row_index > 0: + cursor_impl._statement._requires_define = False + cursor_impl._lastrowid = self.error_info.rowid + cursor_impl._batcherrors = self.error_info.batcherrors + if self.batcherrors and cursor_impl._batcherrors is None: + cursor_impl._batcherrors = [] + if self.error_info.num == TNS_ERR_NO_DATA_FOUND and self.in_fetch: + self.error_info.num = 0 + cursor_impl._more_rows_to_fetch = False + cursor_impl._last_row_index = 0 + cursor_impl._statement._requires_define = False + self.error_occurred = False + elif self.error_info.num == TNS_ERR_ARRAY_DML_ERRORS: + self.error_info.num = 0 + self.error_occurred = False + elif self.retry: + self.retry = False + elif cursor_impl._statement._is_query \ + and self.error_info.num in (TNS_ERR_VAR_NOT_IN_SELECT_LIST, + TNS_ERR_INCONSISTENT_DATA_TYPES): + self.retry = True + conn_impl._statement_cache.clear_cursor(cursor_impl._statement) + elif self.error_info.num != 0 and self.error_info.cursor_id != 0: + if self.error_info.num not in errors.ERR_INTEGRITY_ERROR_CODES: + conn_impl._statement_cache.clear_cursor(cursor_impl._statement) + + cdef int _process_implicit_result(self, ReadBuffer buf) except -1: + cdef: + BaseThinCursorImpl child_cursor_impl + uint32_t i, num_results + object child_cursor + uint8_t num_bytes + self.cursor_impl._implicit_resultsets = [] + buf.read_ub4(&num_results) + for i in range(num_results): + buf.read_ub1(&num_bytes) + buf.skip_raw_bytes(num_bytes) + child_cursor = self._create_cursor_from_describe(buf) + child_cursor_impl = child_cursor._impl + buf.read_ub2(&child_cursor_impl._statement._cursor_id) + self.cursor_impl._implicit_resultsets.append(child_cursor) + + cdef int _process_io_vector(self, ReadBuffer buf) except -1: + """ + An I/O vector is sent by the database in response to a PL/SQL execute. + It indicates whether binds are IN only, IN/OUT or OUT only. + """ + cdef: + uint16_t i, num_bytes, temp16 + uint32_t temp32, num_binds + BindInfo bind_info + buf.skip_ub1() # flag + buf.read_ub2(&temp16) # num requests + buf.read_ub4(&temp32) # num iters + num_binds = temp32 * 256 + temp16 + buf.skip_ub4() # num iters this time + buf.skip_ub2() # uac buffer length + buf.read_ub2(&num_bytes) # bit vector for fast fetch + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + buf.read_ub2(&num_bytes) # rowid + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + self.out_var_impls = [] + for i in range(num_binds): # bind directions + bind_info = self.cursor_impl._statement._bind_info_list[i] + buf.read_ub1(&bind_info.bind_dir) + if bind_info.bind_dir == TNS_BIND_DIR_INPUT: + continue + self.out_var_impls.append(bind_info._bind_var_impl) + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + if message_type == TNS_MSG_TYPE_ROW_HEADER: + self._process_row_header(buf) + elif message_type == TNS_MSG_TYPE_ROW_DATA: + self._process_row_data(buf) + elif message_type == TNS_MSG_TYPE_FLUSH_OUT_BINDS: + self.flush_out_binds = True + self.end_of_response = True + elif message_type == TNS_MSG_TYPE_DESCRIBE_INFO: + buf.skip_raw_bytes_chunked() + self._process_describe_info(buf, self.cursor, self.cursor_impl) + self.out_var_impls = self.cursor_impl.fetch_var_impls + elif message_type == TNS_MSG_TYPE_ERROR: + self._process_error_info(buf) + elif message_type == TNS_MSG_TYPE_BIT_VECTOR: + self._process_bit_vector(buf) + elif message_type == TNS_MSG_TYPE_IO_VECTOR: + self._process_io_vector(buf) + elif message_type == TNS_MSG_TYPE_IMPLICIT_RESULTSET: + self._process_implicit_result(buf) + else: + Message._process_message(self, buf, message_type) + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + cdef: + uint16_t keyword_num, num_params, num_bytes + uint32_t num_rows, i + uint64_t rowcount + bytes key_value + list rowcounts + buf.read_ub2(&num_params) # al8o4l (ignored) + for i in range(num_params): + buf.skip_ub4() + buf.read_ub2(&num_bytes) # al8txl (ignored) + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + buf.read_ub2(&num_params) # num key/value pairs + for i in range(num_params): + buf.read_ub2(&num_bytes) # key + if num_bytes > 0: + key_value = buf.read_bytes() + buf.read_ub2(&num_bytes) # value + if num_bytes > 0: + buf.skip_raw_bytes_chunked() + buf.read_ub2(&keyword_num) # keyword num + if keyword_num == TNS_KEYWORD_NUM_CURRENT_SCHEMA: + self.conn_impl._current_schema = key_value.decode() + elif keyword_num == TNS_KEYWORD_NUM_EDITION: + self.conn_impl._edition = key_value.decode() + buf.read_ub2(&num_bytes) # registration + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + if self.arraydmlrowcounts: + buf.read_ub4(&num_rows) + rowcounts = self.cursor_impl._dmlrowcounts = [] + for i in range(num_rows): + buf.read_ub8(&rowcount) + rowcounts.append(rowcount) + + cdef int _process_row_data(self, ReadBuffer buf) except -1: + cdef: + uint32_t num_rows, pos + ThinVarImpl var_impl + ssize_t i, j + object value + list values + for i, var_impl in enumerate(self.out_var_impls): + if var_impl.is_array: + buf.read_ub4(&var_impl.num_elements_in_array) + for pos in range(var_impl.num_elements_in_array): + value = self._process_column_data(buf, var_impl, pos) + var_impl._values[pos] = value + elif self.cursor_impl._statement._is_returning: + buf.read_ub4(&num_rows) + values = [None] * num_rows + for j in range(num_rows): + values[j] = self._process_column_data(buf, var_impl, j) + var_impl._values[self.row_index] = values + var_impl._has_returned_data = True + elif self.cursor_impl.fetching_arrow: + if self._is_duplicate_data(i): + var_impl._arrow_array.append_last_value( + var_impl._last_arrow_array + ) + else: + self._process_column_data(buf, var_impl, self.row_index) + var_impl._last_arrow_array = None + elif self._is_duplicate_data(i): + if self.row_index == 0 and var_impl.outconverter is not None: + value = var_impl._last_raw_value + else: + value = var_impl._values[self.cursor_impl._last_row_index] + var_impl._values[self.row_index] = value + else: + value = self._process_column_data(buf, var_impl, + self.row_index) + var_impl._values[self.row_index] = value + self.row_index += 1 + if self.in_fetch: + self.cursor_impl._last_row_index = self.row_index - 1 + self.cursor_impl._buffer_rowcount = self.row_index + self.bit_vector = NULL + + cdef int _process_row_header(self, ReadBuffer buf) except -1: + cdef uint32_t num_bytes + buf.skip_ub1() # flags + buf.skip_ub2() # num requests + buf.skip_ub4() # iteration number + buf.skip_ub4() # num iters + buf.skip_ub2() # buffer length + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.skip_ub1() # skip repeated length + self._get_bit_vector(buf, num_bytes) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.skip_raw_bytes_chunked() # rxhrid + + cdef int _write_column_metadata(self, WriteBuffer buf, + list bind_var_impls) except -1: + cdef: + uint32_t buffer_size, cont_flag, lob_prefetch_length + ThinDbObjectTypeImpl typ_impl + uint8_t ora_type_num, flag + OracleMetadata metadata + ThinVarImpl var_impl + for var_impl in bind_var_impls: + metadata = var_impl.metadata + ora_type_num = metadata.dbtype._ora_type_num + buffer_size = metadata.buffer_size + if ora_type_num in (ORA_TYPE_NUM_ROWID, ORA_TYPE_NUM_UROWID): + ora_type_num = ORA_TYPE_NUM_VARCHAR + buffer_size = TNS_MAX_UROWID_LENGTH + flag = TNS_BIND_USE_INDICATORS + if var_impl.is_array: + flag |= TNS_BIND_ARRAY + cont_flag = 0 + lob_prefetch_length = 0 + if ora_type_num in (ORA_TYPE_NUM_BLOB, + ORA_TYPE_NUM_CLOB): + cont_flag = TNS_LOB_PREFETCH_FLAG + elif ora_type_num == ORA_TYPE_NUM_JSON: + cont_flag = TNS_LOB_PREFETCH_FLAG + buffer_size = lob_prefetch_length = TNS_JSON_MAX_LENGTH + elif ora_type_num == ORA_TYPE_NUM_VECTOR: + cont_flag = TNS_LOB_PREFETCH_FLAG + buffer_size = lob_prefetch_length = TNS_VECTOR_MAX_LENGTH + buf.write_uint8(ora_type_num) + buf.write_uint8(flag) + # precision and scale are always written as zero as the server + # expects that and complains if any other value is sent! + buf.write_uint8(0) + buf.write_uint8(0) + buf.write_ub4(buffer_size) + if var_impl.is_array: + buf.write_ub4(var_impl.num_elements) + else: + buf.write_ub4(0) # max num elements + buf.write_ub8(cont_flag) + if metadata.objtype is not None: + typ_impl = metadata.objtype + buf.write_ub4(len(typ_impl.oid)) + buf.write_bytes_with_length(typ_impl.oid) + buf.write_ub4(typ_impl.version) + else: + buf.write_ub4(0) # OID + buf.write_ub2(0) # version + if metadata.dbtype._csfrm != 0: + buf.write_ub2(TNS_CHARSET_UTF8) + else: + buf.write_ub2(0) + buf.write_uint8(metadata.dbtype._csfrm) + buf.write_ub4(lob_prefetch_length) # max chars (LOB prefetch) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: + buf.write_ub4(0) # oaccolid + + cdef int _write_bind_params_column(self, WriteBuffer buf, + OracleMetadata metadata, + object value) except -1: + cdef: + uint8_t ora_type_num = metadata.dbtype._ora_type_num + ThinDbObjectTypeImpl typ_impl + BaseThinCursorImpl cursor_impl + BaseThinLobImpl lob_impl + uint32_t num_bytes + bytes temp_bytes + if value is None: + if ora_type_num == ORA_TYPE_NUM_BOOLEAN: + buf.write_uint8(TNS_ESCAPE_CHAR) + buf.write_uint8(1) + elif ora_type_num == ORA_TYPE_NUM_OBJECT: + buf.write_ub4(0) # TOID + buf.write_ub4(0) # OID + buf.write_ub4(0) # snapshot + buf.write_ub2(0) # version + buf.write_ub4(0) # packed data length + buf.write_ub4(TNS_OBJ_TOP_LEVEL) # flags + else: + buf.write_uint8(0) + elif ora_type_num == ORA_TYPE_NUM_VARCHAR \ + or ora_type_num == ORA_TYPE_NUM_CHAR \ + or ora_type_num == ORA_TYPE_NUM_LONG: + if metadata.dbtype._csfrm == CS_FORM_IMPLICIT: + temp_bytes = ( value).encode() + else: + buf._caps._check_ncharset_id() + temp_bytes = ( value).encode(ENCODING_UTF16) + buf.write_bytes_with_length(temp_bytes) + elif ora_type_num == ORA_TYPE_NUM_RAW \ + or ora_type_num == ORA_TYPE_NUM_LONG_RAW: + buf.write_bytes_with_length(value) + elif ora_type_num == ORA_TYPE_NUM_NUMBER \ + or ora_type_num == ORA_TYPE_NUM_BINARY_INTEGER: + if isinstance(value, bool): + temp_bytes = b'1' if value is True else b'0' + else: + temp_bytes = ( cpython.PyObject_Str(value)).encode() + buf.write_oracle_number(temp_bytes) + elif ora_type_num == ORA_TYPE_NUM_DATE \ + or ora_type_num == ORA_TYPE_NUM_TIMESTAMP \ + or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_TZ \ + or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_LTZ: + buf.write_oracle_date(value, metadata.dbtype._buffer_size_factor) + elif ora_type_num == ORA_TYPE_NUM_BINARY_DOUBLE: + buf.write_binary_double(value) + elif ora_type_num == ORA_TYPE_NUM_BINARY_FLOAT: + buf.write_binary_float(value) + elif ora_type_num == ORA_TYPE_NUM_CURSOR: + cursor_impl = value._impl + if cursor_impl is None: + errors._raise_err(errors.ERR_CURSOR_NOT_OPEN) + if cursor_impl._statement is None: + cursor_impl._statement = self.conn_impl._get_statement() + if cursor_impl._statement._cursor_id == 0: + buf.write_uint8(1) + buf.write_uint8(0) + else: + buf.write_ub4(1) + buf.write_ub4(cursor_impl._statement._cursor_id) + cursor_impl.statement = None + elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: + buf.write_bool(value) + elif ora_type_num == ORA_TYPE_NUM_INTERVAL_DS: + buf.write_interval_ds(value) + elif ora_type_num == ORA_TYPE_NUM_INTERVAL_YM: + buf.write_interval_ym(value) + elif ora_type_num in ( + ORA_TYPE_NUM_BLOB, + ORA_TYPE_NUM_CLOB, + ORA_TYPE_NUM_BFILE + ): + buf.write_lob_with_length(value._impl) + elif ora_type_num in (ORA_TYPE_NUM_ROWID, ORA_TYPE_NUM_UROWID): + temp_bytes = ( value).encode() + buf.write_bytes_with_length(temp_bytes) + elif ora_type_num == ORA_TYPE_NUM_OBJECT: + buf.write_dbobject(value._impl) + elif ora_type_num == ORA_TYPE_NUM_JSON: + buf.write_oson(value, self.conn_impl._oson_max_fname_size) + elif ora_type_num == ORA_TYPE_NUM_VECTOR: + buf.write_vector(value) + else: + errors._raise_err(errors.ERR_DB_TYPE_NOT_SUPPORTED, + name=metadata.dbtype.name) + + cdef int _write_bind_params_row(self, WriteBuffer buf, list params, + uint32_t pos) except -1: + """ + Write a row of bind parameters. Note that non-LONG values are written + first followed by any LONG values. + """ + cdef: + uint32_t i, num_elements, offset = self.offset + bint found_long = False + OracleMetadata metadata + ThinVarImpl var_impl + BindInfo bind_info + for i, bind_info in enumerate(params): + if bind_info._is_return_bind: + continue + var_impl = bind_info._bind_var_impl + metadata = var_impl.metadata + if var_impl.is_array: + num_elements = var_impl.num_elements_in_array + buf.write_ub4(num_elements) + for value in var_impl._values[:num_elements]: + self._write_bind_params_column(buf, metadata, value) + else: + if not self.cursor_impl._statement._is_plsql \ + and metadata.buffer_size > buf._caps.max_string_size: + found_long = True + continue + self._write_bind_params_column(buf, metadata, + var_impl._values[pos + offset]) + if found_long: + for i, bind_info in enumerate(params): + if bind_info._is_return_bind: + continue + var_impl = bind_info._bind_var_impl + metadata = var_impl.metadata + if metadata.buffer_size <= buf._caps.max_string_size: + continue + self._write_bind_params_column(buf, metadata, + var_impl._values[pos + offset]) + + cdef int postprocess(self) except -1: + """ + Run any variable out converter functions on all non-null values that + were returned in the current database response. This must be done + independently since the out converter function may itself invoke a + database round-trip. + """ + cdef: + uint32_t i, j, num_elements + object value, element_value + ThinVarImpl var_impl + if self.out_var_impls is None: + return 0 + for var_impl in self.out_var_impls: + if var_impl is None or var_impl.outconverter is None: + continue + if not self.cursor_impl.fetching_arrow: + var_impl._last_raw_value = \ + var_impl._values[self.cursor_impl._last_row_index] + if var_impl.is_array: + num_elements = var_impl.num_elements_in_array + else: + num_elements = self.row_index + for i in range(num_elements): + value = var_impl._values[i] + if value is None and not var_impl.convert_nulls: + continue + if isinstance(value, list): + for j, element_value in enumerate(value): + if element_value is None: + continue + value[j] = var_impl.outconverter(element_value) + else: + var_impl._values[i] = var_impl.outconverter(value) + + async def postprocess_async(self): + """ + Run any variable out converter functions on all non-null values that + were returned in the current database response. This must be done + independently since the out converter function may itself invoke a + database round-trip. + """ + cdef: + object value, element_value, fn + uint32_t i, j, num_elements + ThinVarImpl var_impl + if self.out_var_impls is None: + return 0 + for var_impl in self.out_var_impls: + if var_impl is None or var_impl.outconverter is None: + continue + if not self.cursor_impl.fetching_arrow: + var_impl._last_raw_value = \ + var_impl._values[self.cursor_impl._last_row_index] + if var_impl.is_array: + num_elements = var_impl.num_elements_in_array + else: + num_elements = self.row_index + fn = var_impl.outconverter + for i in range(num_elements): + value = var_impl._values[i] + if value is None and not var_impl.convert_nulls: + continue + if isinstance(value, list): + for j, element_value in enumerate(value): + if element_value is None: + continue + element_value = fn(element_value) + if inspect.isawaitable(element_value): + element_value = await element_value + value[j] = element_value + else: + value = fn(value) + if inspect.isawaitable(value): + value = await value + var_impl._values[i] = value + + cdef int preprocess(self) except -1: + cdef: + Statement statement = self.cursor_impl._statement + BindInfo bind_info + if statement._is_returning and not self.parse_only: + self.out_var_impls = [] + for bind_info in statement._bind_info_list: + if not bind_info._is_return_bind: + continue + self.out_var_impls.append(bind_info._bind_var_impl) + elif statement._is_query: + self._preprocess_query() diff --git a/src/oracledb/impl/thin/messages/commit.pyx b/src/oracledb/impl/thin/messages/commit.pyx new file mode 100644 index 00000000..2b2bce78 --- /dev/null +++ b/src/oracledb/impl/thin/messages/commit.pyx @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# commit.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for committing a transaction +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class CommitMessage(Message): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_COMMIT diff --git a/src/oracledb/impl/thin/messages/connect.pyx b/src/oracledb/impl/thin/messages/connect.pyx new file mode 100644 index 00000000..ee4cead9 --- /dev/null +++ b/src/oracledb/impl/thin/messages/connect.pyx @@ -0,0 +1,140 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# connect.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for the initial connection request +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class ConnectMessage(Message): + cdef: + bytes connect_string_bytes + uint16_t connect_string_len, redirect_data_len + bint read_redirect_data_len + Description description + uint8_t packet_flags + str redirect_data + str host + int port + + cdef int process(self, ReadBuffer buf) except -1: + cdef: + uint16_t protocol_version, protocol_options + const char_type *redirect_data + uint32_t flags2 = 0 + uint8_t flags1 + bytes db_uuid + if buf._current_packet.packet_type == TNS_PACKET_TYPE_REDIRECT: + if not self.read_redirect_data_len: + buf.read_uint16be(&self.redirect_data_len) + self.read_redirect_data_len = True + buf.wait_for_packets_sync() + redirect_data = buf.read_raw_bytes(self.redirect_data_len) + if self.redirect_data_len > 0: + self.redirect_data = \ + redirect_data[:self.redirect_data_len].decode() + self.read_redirect_data_len = False + elif buf._current_packet.packet_type == TNS_PACKET_TYPE_ACCEPT: + buf.read_uint16be(&protocol_version) + # check if the protocol version supported by the database is high + # enough; if not, reject the connection immediately + if protocol_version < TNS_VERSION_MIN_ACCEPTED: + errors._raise_err(errors.ERR_SERVER_VERSION_NOT_SUPPORTED) + buf.read_uint16be(&protocol_options) + buf.skip_raw_bytes(10) + buf.read_ub1(&flags1) + if flags1 & TNS_NSI_NA_REQUIRED: + feature = "Native Network Encryption and Data Integrity" + errors._raise_not_supported(feature) + buf.skip_raw_bytes(9) + buf.read_uint32be(&buf._caps.sdu) + if protocol_version >= TNS_VERSION_MIN_OOB_CHECK: + buf.skip_raw_bytes(5) + buf.read_uint32be(&flags2) + buf._caps._adjust_for_protocol(protocol_version, protocol_options, + flags2) + buf._transport._full_packet_size = True + elif buf._current_packet.packet_type == TNS_PACKET_TYPE_REFUSE: + response = self.error_info.message + error_code = "unknown" + error_code_int = 0 + if response is not None: + pos = response.find("(ERR=") + if pos > 0: + end_pos = response.find(")", pos) + if end_pos > 0: + error_code = response[pos + 5:end_pos] + error_code_int = int(error_code) + if error_code_int == 0: + errors._raise_err(errors.ERR_UNEXPECTED_REFUSE) + if error_code_int == TNS_ERR_INVALID_SERVICE_NAME: + errors._raise_err(errors.ERR_INVALID_SERVICE_NAME, + service_name=self.description.service_name, + host=self.host, port=self.port) + elif error_code_int == TNS_ERR_INVALID_SID: + errors._raise_err(errors.ERR_INVALID_SID, + sid=self.description.sid, + host=self.host, port=self.port) + errors._raise_err(errors.ERR_LISTENER_REFUSED_CONNECTION, + error_code=error_code) + + cdef int send(self, WriteBuffer buf) except -1: + cdef: + uint16_t service_options = TNS_GSO_DONT_CARE + uint32_t connect_flags_1 = 0, connect_flags_2 = 0 + uint8_t nsi_flags = \ + TNS_NSI_SUPPORT_SECURITY_RENEG | TNS_NSI_DISABLE_NA + if buf._caps.supports_oob: + service_options |= TNS_GSO_CAN_RECV_ATTENTION + connect_flags_2 |= TNS_CHECK_OOB + buf.start_request(TNS_PACKET_TYPE_CONNECT, self.packet_flags) + buf.write_uint16be(TNS_VERSION_DESIRED) + buf.write_uint16be(TNS_VERSION_MINIMUM) + buf.write_uint16be(service_options) + buf.write_uint16be(self.description.sdu) + buf.write_uint16be(self.description.sdu) + buf.write_uint16be(TNS_PROTOCOL_CHARACTERISTICS) + buf.write_uint16be(0) # line turnaround + buf.write_uint16be(1) # value of 1 + buf.write_uint16be(self.connect_string_len) + buf.write_uint16be(74) # offset to connect data + buf.write_uint32be(0) # max receivable data + buf.write_uint8(nsi_flags) + buf.write_uint8(nsi_flags) + buf.write_uint64be(0) # obsolete + buf.write_uint64be(0) # obsolete + buf.write_uint64be(0) # obsolete + buf.write_uint32be(self.description.sdu) # SDU (large) + buf.write_uint32be(self.description.sdu) # TDU (large) + buf.write_uint32be(connect_flags_1) + buf.write_uint32be(connect_flags_2) + if self.connect_string_len > TNS_MAX_CONNECT_DATA: + buf.end_request() + buf.start_request(TNS_PACKET_TYPE_DATA) + buf.write_bytes(self.connect_string_bytes) + buf.end_request() diff --git a/src/oracledb/impl/thin/data_types.pyx b/src/oracledb/impl/thin/messages/data_types.pyx similarity index 95% rename from src/oracledb/impl/thin/data_types.pyx rename to src/oracledb/impl/thin/messages/data_types.pyx index 3400cb65..c0b9addd 100644 --- a/src/oracledb/impl/thin/data_types.pyx +++ b/src/oracledb/impl/thin/messages/data_types.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2021, 2023, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -25,8 +25,9 @@ #------------------------------------------------------------------------------ # data_types.pyx # -# Cython file defining the data type array sent to the database server during -# connect (embedded in thin_impl.pyx). +# Cython file defining the messages sent to the database and the responses that +# are received by the client for establishing data type formats (embedded in +# thin_impl.pyx). #------------------------------------------------------------------------------ ctypedef struct DataType: @@ -666,3 +667,46 @@ cdef DataType[319] DATA_TYPES = [ [TNS_DATA_TYPE_PLOP, TNS_DATA_TYPE_PLOP, TNS_TYPE_REP_UNIVERSAL], [0, 0, 0] ] + + +@cython.final +cdef class DataTypesMessage(Message): + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + cdef uint16_t data_type, conv_data_type + while True: + buf.read_uint16be(&data_type) + if data_type == 0: + break + buf.read_uint16be(&conv_data_type) + if conv_data_type != 0: + buf.skip_raw_bytes(4) + if not buf._caps.supports_end_of_response: + self.end_of_response = True + + cdef int _write_message(self, WriteBuffer buf) except -1: + cdef: + DataType* data_type + int i + + # write character set and capabilities + buf.write_uint8(TNS_MSG_TYPE_DATA_TYPES) + buf.write_uint16le(TNS_CHARSET_UTF8) + buf.write_uint16le(TNS_CHARSET_UTF8) + buf.write_uint8(TNS_ENCODING_MULTI_BYTE | TNS_ENCODING_CONV_LENGTH) + buf.write_bytes_with_length(bytes(buf._caps.compile_caps)) + buf.write_bytes_with_length(bytes(buf._caps.runtime_caps)) + + # write data types + i = 0 + while True: + data_type = &DATA_TYPES[i] + if data_type.data_type == 0: + break + i += 1 + buf.write_uint16be(data_type.data_type) + buf.write_uint16be(data_type.conv_data_type) + buf.write_uint16be(data_type.representation) + buf.write_uint16be(0) + buf.write_uint16be(0) diff --git a/src/oracledb/impl/thin/messages/deq.pyx b/src/oracledb/impl/thin/messages/deq.pyx new file mode 100644 index 00000000..94d07307 --- /dev/null +++ b/src/oracledb/impl/thin/messages/deq.pyx @@ -0,0 +1,252 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# deq.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for dequeuing an AQ message +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class DeqMessage(Message): + cdef: + BaseThinQueueImpl queue_impl + ThinDeqOptionsImpl deq_options_impl + ThinMsgPropsImpl props_impl + bint no_msg_found + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization + """ + self.function_code = TNS_FUNC_AQ_DEQ + + cdef int _process_error_info(self, ReadBuffer buf) except -1: + """ + Process error information from the buffer. If the error that indicates + that no messages were received is detected, the error is cleared and + the flag set so that the dequeue can handle that case. + """ + Message._process_error_info(self, buf) + if self.error_info.num == TNS_ERR_NO_MESSAGES_FOUND: + self.error_info.num = 0 + self.error_occurred = False + self.no_msg_found = True + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters of the AQ Dequeue request. + """ + cdef: + uint32_t num_bytes, num_extensions, i + ssize_t temp_num_bytes + const char_type *ptr + uint16_t temp16, keyword + bytes temp + OracleData data + uint32_t imageLength + ThinDbObjectImpl obj_impl + ThinDbObjectTypeImpl type_impl + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.read_sb4(&self.props_impl.priority) # priority + buf.read_sb4(&self.props_impl.delay) # delay + buf.read_sb4(&self.props_impl.expiration) # expiration + # correlation id + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) + self.props_impl.correlation = ptr[:temp_num_bytes].decode() + buf.read_sb4(&self.props_impl.num_attempts) + # exception queue name + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) + self.props_impl.exceptionq = ptr[:temp_num_bytes].decode() + buf.read_sb4(&self.props_impl.state) + buf.read_ub4(&num_bytes) # enqueue time + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) + decode_date(ptr, temp_num_bytes, &data.buffer) + self.props_impl.enq_time = convert_date_to_python(&data.buffer) + buf.read_ub4(&num_bytes) # transaction id + if num_bytes > 0: + ptr = buf._get_raw(num_bytes) + self.props_impl.enq_txn_id = ptr[:num_bytes] + else: + self.props_impl.enq_txn_id = None + buf.read_ub4(&num_extensions) # number of extensions + if num_extensions > 0: + buf.skip_ub1() + for i in range(num_extensions): + temp = None + temp16 = 0 + buf.read_ub4(&num_bytes) # text value length + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) + temp = ptr[:temp_num_bytes] + temp16 = temp_num_bytes + buf.read_ub4(&num_bytes) # binary value length + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) + temp = ptr[:temp_num_bytes] + buf.read_ub2(&keyword) # extension keyword + if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME and + temp is not None and temp16 > 0): + self.props_impl.sender_agent_name = temp + if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS and + temp is not None and temp16 > 0): + self.props_impl.sender_agent_address = temp + if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL and + temp is not None): + self.props_impl.sender_agent_protocol = temp + if (keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID and + temp is not None): + self.props_impl.original_msg_id = temp + buf.read_ub4(&num_bytes) # user properties + if num_bytes > 0: + errors._raise_err(errors.ERR_NOT_IMPLEMENTED) + buf.skip_ub4() # csn + buf.skip_ub4() # dsn + buf.skip_ub4() # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.skip_ub4() # shard number + buf.read_ub4(&num_bytes) # num recipients + if num_bytes > 0: + errors._raise_err(errors.ERR_NOT_IMPLEMENTED) + if self.queue_impl.payload_type is not None: + type_impl = self.queue_impl.payload_type + obj_impl = buf.read_dbobject(type_impl) + if obj_impl is None: + obj_impl = type_impl.create_new_object() + self.props_impl.payload = PY_TYPE_DB_OBJECT._from_impl(obj_impl) + else: + buf.read_ub4(&num_bytes) # TOID len + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + buf.read_ub4(&num_bytes) # OID len + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + buf.read_ub4(&num_bytes) # snapshot + if num_bytes > 0: + buf.skip_raw_bytes(num_bytes) + buf.skip_ub2() # version no + buf.read_ub4(&imageLength) # image len + buf.skip_ub2() # flags + if imageLength > 0: + self.props_impl.payload = buf.read_bytes()[4:imageLength] + if self.queue_impl.is_json: + self.props_impl.payload = \ + self.conn_impl.decode_oson(self.props_impl.payload) + else: + if not self.queue_impl.is_json: + self.props_impl.payload = b'' + ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) + self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write message to the network buffers. + """ + cdef: + bytes queue_name_bytes + bytes consumer_name_bytes + bytes correlation_bytes + bytes condition_bytes + uint16_t delivery_mode + int deq_flags + self._write_function_code(buf) + queue_name_bytes = self.queue_impl.name.encode() + buf.write_uint8(1) # queue name (pointer) + buf.write_ub4(len(queue_name_bytes)) # queue name length + buf.write_uint8(1) # message properties + buf.write_uint8(1) # msg props length + buf.write_uint8(1) # recipient list + buf.write_uint8(1) # recipient list length + if self.deq_options_impl.consumer_name: + consumer_name_bytes = self.deq_options_impl.consumer_name.encode() + buf.write_uint8(1) # consumer name + buf.write_ub4(len(consumer_name_bytes)) + else: + consumer_name_bytes = None + buf.write_uint8(0) # consumer name + buf.write_ub4(0) # consumer name length + buf.write_sb4(self.deq_options_impl.mode) # dequeue mode + buf.write_sb4(self.deq_options_impl.navigation) # navigation + buf.write_sb4(self.deq_options_impl.visibility) # visibility + buf.write_sb4(self.deq_options_impl.wait) # wait + if self.deq_options_impl.msgid: + buf.write_uint8(1) # select mesg id + buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) # mesg id len + else: + buf.write_uint8(0) # select mesg id + buf.write_ub4(0) # select mesg id length + if self.deq_options_impl.correlation: + correlation_bytes = self.deq_options_impl.correlation.encode() + buf.write_uint8(1) # correlation id + buf.write_ub4(len(correlation_bytes)) # correlation id len + else: + correlation_bytes = None + buf.write_uint8(0) # correlation id + buf.write_ub4(0) # correlation id len + buf.write_uint8(1) # toid of payload + buf.write_ub4(16) # toid length + buf.write_ub2(self.props_impl.version) # version of type + buf.write_uint8(1) # payload + buf.write_uint8(1) # return msg id + buf.write_ub4(16) # mesg id length + deq_flags = 0 + delivery_mode = self.deq_options_impl.delivery_mode + if (delivery_mode == TNS_AQ_MSG_BUFFERED): + deq_flags |= TNS_KPD_AQ_BUFMSG + elif (delivery_mode == TNS_AQ_MSG_PERSISTENT_OR_BUFFERED): + deq_flags |= TNS_KPD_AQ_EITHER + buf.write_ub4(deq_flags) # dequeue flags + if self.deq_options_impl.condition: + condition_bytes = self.deq_options_impl.condition.encode() + buf.write_uint8(1) # condition (pointer) + buf.write_ub4(len(condition_bytes)) # condition length + else: + condition_bytes = None + buf.write_uint8(0) # condition + buf.write_ub4(0) # condition length + buf.write_uint8(0) # extensions + buf.write_ub4(0) # number of extensions + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: + buf.write_uint8(0) # JSON payload + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.write_ub4(-1) # shard id + + buf.write_bytes_with_length(queue_name_bytes) + if consumer_name_bytes is not None: + buf.write_bytes_with_length(consumer_name_bytes) + if self.deq_options_impl.msgid: + buf.write_bytes(self.deq_options_impl.msgid) + if correlation_bytes is not None: + buf.write_bytes_with_length(correlation_bytes) + buf.write_bytes(self.queue_impl.payload_toid) + if condition_bytes is not None: + buf.write_bytes_with_length(condition_bytes) diff --git a/src/oracledb/impl/thin/messages/end_pipeline.pyx b/src/oracledb/impl/thin/messages/end_pipeline.pyx new file mode 100644 index 00000000..f0545fe3 --- /dev/null +++ b/src/oracledb/impl/thin/messages/end_pipeline.pyx @@ -0,0 +1,46 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# end_pipeline.pyx +# +# Cython file defining the messages sent to the database and the responses that +# are received by the client for ending a pipeline (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class EndPipelineMessage(Message): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_PIPELINE_END + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write the message to the buffer. + """ + self._write_function_code(buf) + buf.write_ub4(0) # ID (unused) diff --git a/src/oracledb/impl/thin/messages/enq.pyx b/src/oracledb/impl/thin/messages/enq.pyx new file mode 100644 index 00000000..c84ae8c1 --- /dev/null +++ b/src/oracledb/impl/thin/messages/enq.pyx @@ -0,0 +1,169 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# enq.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for enqueuing an AQ message +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class EnqMessage(Message): + cdef: + BaseThinQueueImpl queue_impl + ThinEnqOptionsImpl enq_options_impl + ThinMsgPropsImpl props_impl + + cdef int _initialize_hook(self) except -1: + """ + perform initialization + """ + self.function_code = TNS_FUNC_AQ_ENQ + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters for the AQ enqueue request. + """ + cdef const char_type *ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) + self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] + buf.skip_ub2() # extensions length + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write message to the network buffers. + """ + cdef: + bytes queue_name_bytes + bytes correlation_bytes + bytes exceptionq_bytes + int enq_flags + + self._write_function_code(buf) + queue_name_bytes = self.queue_impl.name.encode() + buf.write_uint8(1) # queue name (pointer) + buf.write_ub4(len(queue_name_bytes)) # queue name length + buf.write_ub4(self.props_impl.priority) + buf.write_ub4(self.props_impl.delay) + buf.write_sb4(self.props_impl.expiration) + if self.props_impl.correlation is None: + buf.write_ub4(0) # correlation + else: + correlation_bytes = self.props_impl.correlation.encode() + buf.write_ub4(len(correlation_bytes)) + buf.write_bytes_with_length(correlation_bytes) + buf.write_ub4(0) # number of attempts + if self.props_impl.exceptionq is None: + buf.write_ub4(0) # exception queue + else: + exceptionq_bytes = self.props_impl.exceptionq.encode() + buf.write_ub4(len(exceptionq_bytes)) + buf.write_bytes_with_length(exceptionq_bytes) + buf.write_ub4(self.props_impl.state) # message state + buf.write_ub4(0) # enqueue time length + if self.props_impl.enq_txn_id is None: + buf.write_ub4(0) # enqueue txn id length + else: + buf.write_ub4(len(self.props_impl.enq_txn_id)) + buf.write_bytes_with_length(self.props_impl.enq_txn_id) + buf.write_ub4(4) # number of extensions + buf.write_uint8(0x0e) # unknown extra byte + buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) + buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) + buf.write_extension_values(None, b'\x00', + TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) + buf.write_extension_values(None, None, + TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) + buf.write_ub4(0) # user property + buf.write_ub4(0) # cscn + buf.write_ub4(0) # dscn + buf.write_ub4(0) # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.write_ub4(0xffffffffl) # shard id + + if self.props_impl.recipients is None: + buf.write_uint8(0) # recipients (pointer) + buf.write_ub4(0) # number of key/value pairs + else: + buf.write_uint8(1) + buf.write_ub4(len(self.props_impl.recipients) * 3) + buf.write_ub4(self.enq_options_impl.visibility) + buf.write_uint8(0) # relative message id + buf.write_ub4(0) # relative message length + buf.write_ub4(0) # sequence deviation + buf.write_uint8(1) # TOID of payload (pointer) + buf.write_ub4(16) # TOID of payload length + buf.write_ub2(self.props_impl.version) + if self.queue_impl.is_json: + buf.write_uint8(0) # payload (pointer) + buf.write_uint8(0) # RAW payload (pointer) + buf.write_ub4(0) # RAW payload length + elif self.queue_impl.payload_type is not None: + buf.write_uint8(1) # payload (pointer) + buf.write_uint8(0) # RAW payload (pointer) + buf.write_ub4(0) # RAW payload (length) + else: + buf.write_uint8(0) # payload (pointer) + buf.write_uint8(1) # RAW payload (pointer) + buf.write_ub4(len(self.props_impl.payloadObject)) + buf.write_uint8(1) # return message id (pointer) + buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) # return message id length + enq_flags = 0 + if self.enq_options_impl.delivery_mode == TNS_AQ_MSG_BUFFERED: + enq_flags |= TNS_KPD_AQ_BUFMSG + buf.write_ub4(enq_flags) # enqueue flags + buf.write_uint8(0) # extensions 1 (pointer) + buf.write_ub4(0) # number of extensions 1 + buf.write_uint8(0) # extensions 2 (pointer) + buf.write_ub4(0) # number of extensions 2 + buf.write_uint8(0) # source sequence number + buf.write_ub4(0) # source sequence length + buf.write_uint8(0) # max sequence number + buf.write_ub4(0) # max sequence length + buf.write_uint8(0) # output ack length + buf.write_uint8(0) # correlation (pointer) + buf.write_ub4(0) # correlation length + buf.write_uint8(0) # sender name (pointer) + buf.write_ub4(0) # sender name length + buf.write_uint8(0) # sender address (pointer) + buf.write_ub4(0) # sender address length + buf.write_uint8(0) # sender charset id (pointer) + buf.write_uint8(0) # sender ncharset id (pointer) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_20_1: + if self.queue_impl.is_json: + buf.write_uint8(1) # JSON payload (pointer) + else: + buf.write_uint8(0) # JSON payload (pointer) + + buf.write_bytes_with_length(queue_name_bytes) + buf.write_bytes(self.queue_impl.payload_toid) + if not self.queue_impl.is_json: + if self.queue_impl.payload_type is not None: + buf.write_dbobject(self.props_impl.payloadObject) + else: + buf.write_bytes(self.props_impl.payloadObject) + if self.queue_impl.is_json: + buf.write_oson(self.props_impl.payloadObject, + self.conn_impl._oson_max_fname_size, False) diff --git a/src/oracledb/impl/thin/messages/execute.pyx b/src/oracledb/impl/thin/messages/execute.pyx new file mode 100644 index 00000000..540d4c10 --- /dev/null +++ b/src/oracledb/impl/thin/messages/execute.pyx @@ -0,0 +1,263 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# execute.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for executing a SQL statement +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class ExecuteMessage(MessageWithData): + + cdef int _write_execute_message(self, WriteBuffer buf) except -1: + """ + Write the message for a full execute. + """ + cdef: + uint32_t options, dml_options = 0, num_params = 0, num_iters = 1 + Statement stmt = self.cursor_impl._statement + BaseThinCursorImpl cursor_impl = self.cursor_impl + list params = stmt._bind_info_list + + # determine the options to use for the execute + options = 0 + if not stmt._requires_define and not self.parse_only \ + and params is not None: + num_params = len(params) + if stmt._requires_define: + options |= TNS_EXEC_OPTION_DEFINE + elif not self.parse_only and stmt._sql is not None: + dml_options = TNS_EXEC_OPTION_IMPLICIT_RESULTSET + options |= TNS_EXEC_OPTION_EXECUTE + if stmt._cursor_id == 0 or stmt._is_ddl: + options |= TNS_EXEC_OPTION_PARSE + if stmt._is_query: + if self.parse_only: + options |= TNS_EXEC_OPTION_DESCRIBE + else: + if stmt._cursor_id == 0 or stmt._requires_define: + num_iters = self.cursor_impl.prefetchrows + else: + num_iters = self.cursor_impl.arraysize + self.cursor_impl._set_fetch_array_size(num_iters) + if num_iters > 0 and not stmt._no_prefetch: + options |= TNS_EXEC_OPTION_FETCH + if not stmt._is_plsql and not self.parse_only: + options |= TNS_EXEC_OPTION_NOT_PLSQL + elif stmt._is_plsql and num_params > 0: + options |= TNS_EXEC_OPTION_PLSQL_BIND + if num_params > 0: + options |= TNS_EXEC_OPTION_BIND + if self.batcherrors: + options |= TNS_EXEC_OPTION_BATCH_ERRORS + if self.arraydmlrowcounts: + dml_options = TNS_EXEC_OPTION_DML_ROWCOUNTS + if self.conn_impl.autocommit and not self.parse_only: + options |= TNS_EXEC_OPTION_COMMIT + + # write body of message + self._write_function_code(buf) + buf.write_ub4(options) # execute options + buf.write_ub4(stmt._cursor_id) # cursor id + if stmt._cursor_id == 0 or stmt._is_ddl: + buf.write_uint8(1) # pointer (cursor id) + buf.write_ub4(stmt._sql_length) + else: + buf.write_uint8(0) # pointer (cursor id) + buf.write_ub4(0) + buf.write_uint8(1) # pointer (vector) + buf.write_ub4(13) # al8i4 array length + buf.write_uint8(0) # pointer (al8o4) + buf.write_uint8(0) # pointer (al8o4l) + buf.write_ub4(0) # prefetch buffer size + buf.write_ub4(num_iters) # prefetch number of rows + buf.write_ub4(TNS_MAX_LONG_LENGTH) # maximum long size + if num_params == 0: + buf.write_uint8(0) # pointer (binds) + buf.write_ub4(0) # number of binds + else: + buf.write_uint8(1) # pointer (binds) + buf.write_ub4(num_params) # number of binds + buf.write_uint8(0) # pointer (al8app) + buf.write_uint8(0) # pointer (al8txn) + buf.write_uint8(0) # pointer (al8txl) + buf.write_uint8(0) # pointer (al8kv) + buf.write_uint8(0) # pointer (al8kvl) + if stmt._requires_define: + buf.write_uint8(1) # pointer (al8doac) + buf.write_ub4(len(self.cursor_impl.fetch_vars)) + # number of defines + else: + buf.write_uint8(0) + buf.write_ub4(0) + buf.write_ub4(0) # registration id + buf.write_uint8(0) # pointer (al8objlist) + buf.write_uint8(1) # pointer (al8objlen) + buf.write_uint8(0) # pointer (al8blv) + buf.write_ub4(0) # al8blvl + buf.write_uint8(0) # pointer (al8dnam) + buf.write_ub4(0) # al8dnaml + buf.write_ub4(0) # al8regid_msb + if self.arraydmlrowcounts: + buf.write_uint8(1) # pointer (al8pidmlrc) + buf.write_ub4(self.num_execs) # al8pidmlrcbl + buf.write_uint8(1) # pointer (al8pidmlrcl) + else: + buf.write_uint8(0) # pointer (al8pidmlrc) + buf.write_ub4(0) # al8pidmlrcbl + buf.write_uint8(0) # pointer (al8pidmlrcl) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: + buf.write_uint8(0) # pointer (al8sqlsig) + buf.write_ub4(0) # SQL signature length + buf.write_uint8(0) # pointer (SQL ID) + buf.write_ub4(0) # allocated size of SQL ID + buf.write_uint8(0) # pointer (length of SQL ID) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2_EXT1: + buf.write_uint8(0) # pointer (chunk ids) + buf.write_ub4(0) # number of chunk ids + if stmt._cursor_id == 0 or stmt._is_ddl: + if stmt._sql_bytes is None: + errors._raise_err(errors.ERR_INVALID_REF_CURSOR) + buf.write_bytes_with_length(stmt._sql_bytes) + buf.write_ub4(1) # al8i4[0] parse + else: + buf.write_ub4(0) # al8i4[0] parse + if stmt._is_query: + if stmt._cursor_id == 0: + buf.write_ub4(0) # al8i4[1] execution count + else: + buf.write_ub4(num_iters) + else: + buf.write_ub4(self.num_execs) # al8i4[1] execution count + buf.write_ub4(0) # al8i4[2] + buf.write_ub4(0) # al8i4[3] + buf.write_ub4(0) # al8i4[4] + buf.write_ub4(0) # al8i4[5] SCN (part 1) + buf.write_ub4(0) # al8i4[6] SCN (part 2) + buf.write_ub4(stmt._is_query) # al8i4[7] is query + buf.write_ub4(0) # al8i4[8] + buf.write_ub4(dml_options) # al8i4[9] DML row counts/implicit + buf.write_ub4(0) # al8i4[10] + buf.write_ub4(0) # al8i4[11] + buf.write_ub4(0) # al8i4[12] + if stmt._requires_define: + self._write_column_metadata(buf, self.cursor_impl.fetch_var_impls) + elif num_params > 0: + self._write_bind_params(buf, params) + + cdef int _write_reexecute_message(self, WriteBuffer buf) except -1: + """ + Write the message for a re-execute. + """ + cdef: + uint32_t i, exec_flags_1 = 0, exec_flags_2 = 0, num_iters + Statement stmt = self.cursor_impl._statement + list params = stmt._bind_info_list + BindInfo info + + if params: + if not stmt._is_query and not stmt._is_returning: + self.out_var_impls = [info._bind_var_impl \ + for info in params \ + if info.bind_dir != TNS_BIND_DIR_INPUT] + params = [info for info in params \ + if info.bind_dir != TNS_BIND_DIR_OUTPUT \ + and not info._is_return_bind] + if self.function_code == TNS_FUNC_REEXECUTE_AND_FETCH: + exec_flags_1 |= TNS_EXEC_OPTION_EXECUTE + num_iters = self.cursor_impl.prefetchrows + self.cursor_impl._set_fetch_array_size(num_iters) + else: + if self.conn_impl.autocommit: + exec_flags_2 |= TNS_EXEC_OPTION_COMMIT_REEXECUTE + num_iters = self.num_execs + + self._write_function_code(buf) + buf.write_ub4(stmt._cursor_id) + buf.write_ub4(num_iters) + buf.write_ub4(exec_flags_1) + buf.write_ub4(exec_flags_2) + if params: + for i in range(self.num_execs): + buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) + self._write_bind_params_row(buf, params, i) + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write the execute message to the buffer. Two types of execute messages + are possible: one for a full execute and the second, simpler message, + for when an existing cursor is being re-executed. A full execute is + required under the following circumstances: + - the statement has never been executed + - the statement refers to a REF cursor (no sql is defined) + - prefetch is not possible (LOB columns fetched) + - bind metadata has changed + - parse is being performed + - define is being performed + - DDL is being executed + - batch errors mode is enabled + """ + cdef: + Statement stmt = self.cursor_impl._statement + if stmt._cursor_id == 0 or not stmt._executed \ + or stmt._sql is None \ + or stmt._no_prefetch \ + or stmt._binds_changed \ + or self.parse_only \ + or stmt._requires_define \ + or stmt._is_ddl \ + or self.batcherrors: + self.function_code = TNS_FUNC_EXECUTE + self._write_execute_message(buf) + elif stmt._is_query and self.cursor_impl.prefetchrows > 0: + self.function_code = TNS_FUNC_REEXECUTE_AND_FETCH + self._write_reexecute_message(buf) + else: + self.function_code = TNS_FUNC_REEXECUTE + self._write_reexecute_message(buf) + stmt._binds_changed = False + + cdef int process(self, ReadBuffer buf) except -1: + """ + Runs after the database response has been processed. If the statement + executed requires define and is not a REF cursor (which would already + have performed the define during its execute), then mark the message as + needing to be resent. If this is after the second time the message has + been sent, mark the statement as no longer needing a define (since this + only needs to happen once). + """ + cdef Statement stmt = self.cursor_impl._statement + MessageWithData.process(self, buf) + if self.error_occurred and self.error_info.pos == 0 and stmt._is_plsql: + self.error_info.pos = self.error_info.rowcount + self.offset + if not self.parse_only: + stmt._executed = True + if stmt._requires_define and stmt._sql is not None: + if self.resend: + stmt._requires_define = False + else: + self.resend = True diff --git a/src/oracledb/impl/thin/messages/fast_auth.pyx b/src/oracledb/impl/thin/messages/fast_auth.pyx new file mode 100644 index 00000000..1d3fa770 --- /dev/null +++ b/src/oracledb/impl/thin/messages/fast_auth.pyx @@ -0,0 +1,74 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# fast_auth.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for performing fast authentication +# to the database (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class FastAuthMessage(Message): + cdef: + DataTypesMessage data_types_message + ProtocolMessage protocol_message + AuthMessage auth_message + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + """ + Processes the messages returned from the server response. + """ + if message_type == TNS_MSG_TYPE_PROTOCOL: + ProtocolMessage._process_message(self.protocol_message, buf, + message_type) + elif message_type == TNS_MSG_TYPE_DATA_TYPES: + DataTypesMessage._process_message(self.data_types_message, buf, + message_type) + else: + AuthMessage._process_message(self.auth_message, buf, message_type) + self.end_of_response = self.auth_message.end_of_response + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the buffer. This includes not just this message + but also the protocol, data types and auth messages. This reduces the + number of round-trips to the database and thereby increases + performance. + """ + buf.write_uint8(TNS_MSG_TYPE_FAST_AUTH) + buf.write_uint8(1) # fast auth version + buf.write_uint8(TNS_SERVER_CONVERTS_CHARS) # flag 1 + buf.write_uint8(0) # flag 2 + ProtocolMessage._write_message(self.protocol_message, buf) + buf.write_uint16be(0) # server charset (unused) + buf.write_uint8(0) # server charset flag (unused) + buf.write_uint16be(0) # server ncharset (unused) + buf._caps.ttc_field_version = TNS_CCAP_FIELD_VERSION_19_1_EXT_1 + buf.write_uint8(buf._caps.ttc_field_version) + DataTypesMessage._write_message(self.data_types_message, buf) + AuthMessage._write_message(self.auth_message, buf) + buf._caps.ttc_field_version = TNS_CCAP_FIELD_VERSION_MAX diff --git a/src/oracledb/impl/thin/messages/fetch.pyx b/src/oracledb/impl/thin/messages/fetch.pyx new file mode 100644 index 00000000..e3630933 --- /dev/null +++ b/src/oracledb/impl/thin/messages/fetch.pyx @@ -0,0 +1,48 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# fetch.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for fetching data (embedded in +# thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class FetchMessage(MessageWithData): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_FETCH + + cdef int _write_message(self, WriteBuffer buf) except -1: + self.cursor_impl._set_fetch_array_size(self.cursor_impl.arraysize) + self._write_function_code(buf) + if self.cursor_impl._statement._cursor_id == 0: + errors._raise_err(errors.ERR_CURSOR_HAS_BEEN_CLOSED) + buf.write_ub4(self.cursor_impl._statement._cursor_id) + buf.write_ub4(self.cursor_impl._fetch_array_size) diff --git a/src/oracledb/impl/thin/messages/lob_op.pyx b/src/oracledb/impl/thin/messages/lob_op.pyx new file mode 100644 index 00000000..99dcfd73 --- /dev/null +++ b/src/oracledb/impl/thin/messages/lob_op.pyx @@ -0,0 +1,147 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# lob_op.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for performing LOB operations +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class LobOpMessage(Message): + cdef: + uint32_t operation + BaseThinLobImpl source_lob_impl + BaseThinLobImpl dest_lob_impl + uint64_t source_offset + uint64_t dest_offset + int64_t amount + bint send_amount + bint bool_flag + object data + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_LOB_OP + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + cdef: + const char* encoding + const char_type *ptr + ssize_t num_bytes + if message_type == TNS_MSG_TYPE_LOB_DATA: + buf.read_raw_bytes_and_length(&ptr, &num_bytes) + if self.source_lob_impl.dbtype._ora_type_num in \ + (ORA_TYPE_NUM_BLOB, ORA_TYPE_NUM_BFILE): + self.data = ptr[:num_bytes] + else: + encoding = self.source_lob_impl._get_encoding() + self.data = ptr[:num_bytes].decode(encoding) + else: + Message._process_message(self, buf, message_type) + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + cdef: + cdef const char_type *ptr + ssize_t num_bytes + uint8_t temp8 + if self.source_lob_impl is not None: + num_bytes = len(self.source_lob_impl._locator) + ptr = buf.read_raw_bytes(num_bytes) + self.source_lob_impl._locator = ptr[:num_bytes] + if self.dest_lob_impl is not None: + num_bytes = len(self.dest_lob_impl._locator) + ptr = buf.read_raw_bytes(num_bytes) + self.dest_lob_impl._locator = ptr[:num_bytes] + if self.operation == TNS_LOB_OP_CREATE_TEMP: + buf.skip_ub2() # skip character set + buf.skip_raw_bytes(3) # skip trailing flags, amount + elif self.send_amount: + buf.read_sb8(&self.amount) + if self.operation in (TNS_LOB_OP_IS_OPEN, + TNS_LOB_OP_FILE_EXISTS, + TNS_LOB_OP_FILE_ISOPEN): + buf.read_ub1(&temp8) + self.bool_flag = temp8 > 0 + + cdef int _write_message(self, WriteBuffer buf) except -1: + cdef int i + self._write_function_code(buf) + if self.source_lob_impl is None: + buf.write_uint8(0) # source pointer + buf.write_ub4(0) # source length + else: + buf.write_uint8(1) # source pointer + buf.write_ub4(len(self.source_lob_impl._locator)) + if self.dest_lob_impl is None: + buf.write_uint8(0) # dest pointer + buf.write_ub4(0) # dest length + else: + buf.write_uint8(1) # dest pointer + buf.write_ub4(len(self.dest_lob_impl._locator)) + buf.write_ub4(0) # short source offset + buf.write_ub4(0) # short dest offset + if self.operation == TNS_LOB_OP_CREATE_TEMP: + buf.write_uint8(1) # pointer (character set) + else: + buf.write_uint8(0) # pointer (character set) + buf.write_uint8(0) # pointer (short amount) + if self.operation in (TNS_LOB_OP_CREATE_TEMP, + TNS_LOB_OP_IS_OPEN, + TNS_LOB_OP_FILE_EXISTS, + TNS_LOB_OP_FILE_ISOPEN): + buf.write_uint8(1) # pointer (NULL LOB) + else: + buf.write_uint8(0) # pointer (NULL LOB) + buf.write_ub4(self.operation) + buf.write_uint8(0) # pointer (SCN array) + buf.write_uint8(0) # SCN array length + buf.write_ub8(self.source_offset) + buf.write_ub8(self.dest_offset) + if self.send_amount: + buf.write_uint8(1) # pointer (amount) + else: + buf.write_uint8(0) # pointer (amount) + for i in range(3): # array LOB (not used) + buf.write_uint16be(0) + if self.source_lob_impl is not None: + buf.write_bytes(self.source_lob_impl._locator) + if self.dest_lob_impl is not None: + buf.write_bytes(self.dest_lob_impl._locator) + if self.operation == TNS_LOB_OP_CREATE_TEMP: + if self.source_lob_impl.dbtype._csfrm == CS_FORM_NCHAR: + buf._caps._check_ncharset_id() + buf.write_ub4(TNS_CHARSET_UTF16) + else: + buf.write_ub4(TNS_CHARSET_UTF8) + if self.data is not None: + buf.write_uint8(TNS_MSG_TYPE_LOB_DATA) + buf.write_bytes_with_length(self.data) + if self.send_amount: + buf.write_ub8(self.amount) # LOB amount diff --git a/src/oracledb/impl/thin/messages/logoff.pyx b/src/oracledb/impl/thin/messages/logoff.pyx new file mode 100644 index 00000000..a5fec38c --- /dev/null +++ b/src/oracledb/impl/thin/messages/logoff.pyx @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# logoff.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for logging off from the database +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class LogoffMessage(Message): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_LOGOFF diff --git a/src/oracledb/impl/thin/messages/ping.pyx b/src/oracledb/impl/thin/messages/ping.pyx new file mode 100644 index 00000000..f419e57d --- /dev/null +++ b/src/oracledb/impl/thin/messages/ping.pyx @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# ping.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for pinging the database +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class PingMessage(Message): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_PING diff --git a/src/oracledb/impl/thin/messages/protocol.pyx b/src/oracledb/impl/thin/messages/protocol.pyx new file mode 100644 index 00000000..3047d305 --- /dev/null +++ b/src/oracledb/impl/thin/messages/protocol.pyx @@ -0,0 +1,89 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# protocol.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for establishing the protoocl to +# use during the connection (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class ProtocolMessage(Message): + cdef: + uint8_t server_version + uint8_t server_flags + bytes server_compile_caps + bytes server_runtime_caps + bytes server_banner + + cdef int _write_message(self, WriteBuffer buf) except -1: + buf.write_uint8(TNS_MSG_TYPE_PROTOCOL) + buf.write_uint8(6) # protocol version (8.1 and higher) + buf.write_uint8(0) # "array" terminator + buf.write_str(DRIVER_NAME) + buf.write_uint8(0) # NULL terminator + + cdef int _process_message(self, ReadBuffer buf, + uint8_t message_type) except -1: + if message_type == TNS_MSG_TYPE_PROTOCOL: + self._process_protocol_info(buf) + if not buf._caps.supports_end_of_response: + self.end_of_response = True + else: + Message._process_message(self, buf, message_type) + + cdef int _process_protocol_info(self, ReadBuffer buf) except -1: + """ + Processes the response to the protocol request. + """ + cdef: + uint16_t num_elem, fdo_length + Capabilities caps = buf._caps + const char_type *fdo + bytearray temp_array + ssize_t ix + buf.read_ub1(&self.server_version) + buf.skip_ub1() # skip zero byte + self.server_banner = buf.read_null_terminated_bytes() + buf.read_uint16le(&caps.charset_id) + buf.read_ub1(&self.server_flags) + buf.read_uint16le(&num_elem) + if num_elem > 0: # skip elements + buf.skip_raw_bytes(num_elem * 5) + buf.read_uint16be(&fdo_length) + fdo = buf.read_raw_bytes(fdo_length) + ix = 6 + fdo[5] + fdo[6] + caps.ncharset_id = (fdo[ix + 3] << 8) + fdo[ix + 4] + self.server_compile_caps = buf.read_bytes() + if self.server_compile_caps is not None: + temp_array = bytearray(self.server_compile_caps) + caps._adjust_for_server_compile_caps(temp_array) + if caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: + self.conn_impl._oson_max_fname_size = 65535 + self.server_runtime_caps = buf.read_bytes() + if self.server_runtime_caps is not None: + temp_array = bytearray(self.server_runtime_caps) + caps._adjust_for_server_runtime_caps(temp_array) diff --git a/src/oracledb/impl/thin/messages/rollback.pyx b/src/oracledb/impl/thin/messages/rollback.pyx new file mode 100644 index 00000000..8b931779 --- /dev/null +++ b/src/oracledb/impl/thin/messages/rollback.pyx @@ -0,0 +1,40 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# rollback.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for rolling back a transaction +# (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class RollbackMessage(Message): + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_ROLLBACK diff --git a/src/oracledb/impl/thin/messages/session_release.pyx b/src/oracledb/impl/thin/messages/session_release.pyx new file mode 100644 index 00000000..21d5c424 --- /dev/null +++ b/src/oracledb/impl/thin/messages/session_release.pyx @@ -0,0 +1,53 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# session_release.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for releasing a session (embedded +# in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class SessionReleaseMessage(Message): + + cdef: + uint32_t release_mode + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.message_type = TNS_MSG_TYPE_ONEWAY_FN + self.function_code = TNS_FUNC_SESSION_RELEASE + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write the message for a DRCP session release. + """ + self._write_function_code(buf) + buf.write_uint8(0) # pointer (tag name) + buf.write_uint8(0) # tag name length + buf.write_ub4(self.release_mode) # mode diff --git a/src/oracledb/impl/thin/messages/tpc_change_state.pyx b/src/oracledb/impl/thin/messages/tpc_change_state.pyx new file mode 100644 index 00000000..adbb5b9a --- /dev/null +++ b/src/oracledb/impl/thin/messages/tpc_change_state.pyx @@ -0,0 +1,103 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# tpc_change_state.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for changing the state of a two +# phase commit (TPC) transaction (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class TransactionChangeStateMessage(Message): + """ + Used for two-phase commit (TPC) transaction change state: commit, rollback, + forget, etc. + """ + cdef: + uint32_t operation, state, flags + bytes context + object xid + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_TPC_TXN_CHANGE_STATE + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the parameters returned by the database. + """ + buf.read_ub4(&self.state) + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the database. + """ + cdef: + bytes global_transaction_id, branch_qualifier, xid_bytes + uint32_t format_id = 0 + + # acquire data to send to the server + if self.xid is not None: + format_id = self.xid[0] + global_transaction_id = self.xid[1] \ + if isinstance(self.xid[1], bytes) \ + else self.xid[1].encode() + branch_qualifier = self.xid[2] \ + if isinstance(self.xid[2], bytes) \ + else self.xid[2].encode() + xid_bytes = global_transaction_id + branch_qualifier + xid_bytes += bytes(128 - len(xid_bytes)) + + self._write_function_code(buf) + buf.write_ub4(self.operation) + if self.context is not None: + buf.write_uint8(1) # pointer (context) + buf.write_ub4(len(self.context)) + else: + buf.write_uint8(0) # pointer (context) + buf.write_ub4(0) # context length + if self.xid is not None: + buf.write_ub4(format_id) + buf.write_ub4(len(global_transaction_id)) + buf.write_ub4(len(branch_qualifier)) + buf.write_uint8(1) # pointer (xid) + buf.write_ub4(len(xid_bytes)) + else: + buf.write_ub4(0) # format id + buf.write_ub4(0) # global transaction id length + buf.write_ub4(0) # branch qualifier length + buf.write_uint8(0) # pointer (xid) + buf.write_ub4(0) # XID length + buf.write_ub4(0) # timeout + buf.write_ub4(self.state) + buf.write_uint8(1) # pointer (out state) + buf.write_ub4(self.flags) + if self.context is not None: + buf.write_bytes(self.context) + if self.xid is not None: + buf.write_bytes(xid_bytes) diff --git a/src/oracledb/impl/thin/messages/tpc_switch.pyx b/src/oracledb/impl/thin/messages/tpc_switch.pyx new file mode 100644 index 00000000..7e2c1a01 --- /dev/null +++ b/src/oracledb/impl/thin/messages/tpc_switch.pyx @@ -0,0 +1,134 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# tpc_switch.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for switching two phase commit +# (TPC) transactions (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + + +@cython.final +cdef class TransactionSwitchMessage(Message): + """ + Used for two-phase commit (TPC) transaction start, attach and detach. + """ + cdef: + uint32_t operation, flags, timeout, application_value + bytes context + object xid + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_TPC_TXN_SWITCH + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the parameters returned by the database. + """ + cdef: + const char_type* ptr + uint16_t context_len + buf.read_ub4(&self.application_value) + buf.read_ub2(&context_len) + ptr = buf.read_raw_bytes(context_len) + self.context = ptr[:context_len] + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the database. + """ + cdef: + bytes global_transaction_id, branch_qualifier, xid_bytes + bytes internal_name = None, external_name = None + uint32_t format_id = 0 + + # acquire data to send to the server + if self.xid is not None: + format_id = self.xid[0] + global_transaction_id = self.xid[1] \ + if isinstance(self.xid[1], bytes) \ + else self.xid[1].encode() + branch_qualifier = self.xid[2] \ + if isinstance(self.xid[2], bytes) \ + else self.xid[2].encode() + xid_bytes = global_transaction_id + branch_qualifier + xid_bytes += bytes(128 - len(xid_bytes)) + if self.conn_impl._internal_name is not None: + internal_name = self.conn_impl._internal_name.encode() + if self.conn_impl._external_name is not None: + external_name = self.conn_impl._external_name.encode() + + # write message + self._write_function_code(buf) + buf.write_ub4(self.operation) + if self.context is not None: + buf.write_uint8(1) # pointer (transaction context) + buf.write_ub4(len(self.context)) + else: + buf.write_uint8(0) # pointer (transaction context) + buf.write_ub4(0) # transaction context length + if self.xid is not None: + buf.write_ub4(format_id) + buf.write_ub4(len(global_transaction_id)) + buf.write_ub4(len(branch_qualifier)) + buf.write_uint8(1) # pointer (XID) + buf.write_ub4(len(xid_bytes)) + else: + buf.write_ub4(0) # format id + buf.write_ub4(0) # global transaction id length + buf.write_ub4(0) # branch qualifier length + buf.write_uint8(0) # pointer (XID) + buf.write_ub4(0) # XID length + buf.write_ub4(self.flags) + buf.write_ub4(self.timeout) + buf.write_uint8(1) # pointer (application value) + buf.write_uint8(1) # pointer (return context) + buf.write_uint8(1) # pointer (return context length) + if internal_name is not None: + buf.write_uint8(1) # pointer (internal name) + buf.write_ub4(len(internal_name)) + else: + buf.write_uint8(0) # pointer (internal name) + buf.write_ub4(0) # length of internal name + if external_name is not None: + external_name = self.conn_impl._external_name.encode() + buf.write_uint8(1) # pointer (external name) + buf.write_ub4(len(external_name)) + else: + buf.write_uint8(0) # pointer (external name) + buf.write_ub4(0) # length of external name + if self.context is not None: + buf.write_bytes(self.context) + if self.xid is not None: + buf.write_bytes(xid_bytes) + buf.write_ub4(self.application_value) + if internal_name is not None: + buf.write_bytes(internal_name) + if external_name is not None: + buf.write_bytes(external_name) diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index 81cc703e..abbe85df 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -207,8 +207,25 @@ include "impl/thin/crypto.pyx" include "impl/thin/capabilities.pyx" include "impl/thin/transport.pyx" include "impl/thin/packet.pyx" -include "impl/thin/data_types.pyx" -include "impl/thin/messages.pyx" +include "impl/thin/messages/base.pyx" +include "impl/thin/messages/auth.pyx" +include "impl/thin/messages/commit.pyx" +include "impl/thin/messages/connect.pyx" +include "impl/thin/messages/data_types.pyx" +include "impl/thin/messages/deq.pyx" +include "impl/thin/messages/end_pipeline.pyx" +include "impl/thin/messages/enq.pyx" +include "impl/thin/messages/execute.pyx" +include "impl/thin/messages/fetch.pyx" +include "impl/thin/messages/lob_op.pyx" +include "impl/thin/messages/logoff.pyx" +include "impl/thin/messages/ping.pyx" +include "impl/thin/messages/protocol.pyx" +include "impl/thin/messages/fast_auth.pyx" +include "impl/thin/messages/rollback.pyx" +include "impl/thin/messages/session_release.pyx" +include "impl/thin/messages/tpc_change_state.pyx" +include "impl/thin/messages/tpc_switch.pyx" include "impl/thin/protocol.pyx" include "impl/thin/queue.pyx" include "impl/thin/connection.pyx" From dfc1a777dc07438741d8a04759ad72e2a516861a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:01:34 -0600 Subject: [PATCH 016/239] An error message that links to documention on setting up a protocol hook function is now returned for "ldap" and "ldaps" connection strings. --- doc/src/release_notes.rst | 2 ++ src/oracledb/__init__.py | 4 ++-- .../{config_providers.py => builtin_hooks.py} | 24 ++++++++++++++++++- 3 files changed, 27 insertions(+), 3 deletions(-) rename src/oracledb/{config_providers.py => builtin_hooks.py} (78%) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 774cd785..e53066af 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -48,6 +48,8 @@ Common Changes :attr:`defaults.fetch_decimals` is set to *True*. #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. +#) An error message that links to documention on setting up a protocol hook + function is now returned for "ldap" and "ldaps" connection strings. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index 15f62a54..afcf9da0 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -320,7 +320,7 @@ OracleDataFrame as OracleDataFrame, ) -from . import config_providers +from . import builtin_hooks IntervalYM = collections.namedtuple("IntervalYM", ["years", "months"]) @@ -339,7 +339,7 @@ class JsonId(bytes): del ( aq, # noqa base_impl, # noqa - config_providers, # noqa + builtin_hooks, # noqa connect_params, # noqa connection, # noqa constants, # noqa diff --git a/src/oracledb/config_providers.py b/src/oracledb/builtin_hooks.py similarity index 78% rename from src/oracledb/config_providers.py rename to src/oracledb/builtin_hooks.py index fed8ea4a..00324b1e 100644 --- a/src/oracledb/config_providers.py +++ b/src/oracledb/builtin_hooks.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -68,6 +68,28 @@ def config_provider_file_hook(protocol, protocol_arg, connect_params): register_protocol("config-file", config_provider_file_hook) +def ldap_hook(protocol, arg, params): + """ + Default hook for LDAP which simply points the user to the documentation + which explains how they can write their own hook for LDAP. + This hook is needed for python-oracledb Thin mode,or when + defaults.thick_mode_dsn_passthrough is False in Thick mode. + """ + doc_url = ( + "https://python-oracledb.readthedocs.io/en/latest" + "/user_guide/connection_handling.html#ldap-directory-naming" + ) + message = ( + f"To use an LDAP URL in python-oracledb, " + f"register an LDAP resolution function as shown in {doc_url}" + ) + raise Exception(message) + + +register_protocol("ldap", ldap_hook) +register_protocol("ldaps", ldap_hook) + + def password_type_base64_hook(args): """ Hook for password type "base64". The key "value" in the supplied args is From 2fe90c8b4815295f60c8b58b0aaf1788c4b2ab9e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:02:36 -0600 Subject: [PATCH 017/239] Refactor to eliminate unnecessary parameter. --- src/oracledb/impl/thin/messages/base.pyx | 182 ++++++++++++----------- 1 file changed, 92 insertions(+), 90 deletions(-) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 39449d8d..393484c0 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -45,6 +45,7 @@ cdef class _OracleErrorInfo: cdef class Message: cdef: BaseThinConnImpl conn_impl + BaseThinDbObjectTypeCache type_cache PipelineOpResultImpl pipeline_result_impl _OracleErrorInfo error_info uint8_t message_type @@ -247,6 +248,96 @@ cdef class Message: message_type=message_type, position=buf._pos - 1) + cdef OracleMetadata _process_metadata(self, ReadBuffer buf): + """ + Process metadata from the buffer and return it. + """ + cdef: + uint32_t num_bytes, uds_flags, num_annotations, i + ThinDbObjectTypeImpl typ_impl + str schema, name, key, value + uint8_t ora_type_num, csfrm + OracleMetadata metadata + uint8_t nulls_allowed + int cache_num + bytes oid + buf.read_ub1(&ora_type_num) + metadata = OracleMetadata.__new__(OracleMetadata) + buf.skip_ub1() # flags + buf.read_sb1(&metadata.precision) + buf.read_sb1(&metadata.scale) + buf.read_ub4(&metadata.buffer_size) + buf.skip_ub4() # max number of array elements + buf.skip_ub8() # cont flags + buf.read_ub4(&num_bytes) # OID + if num_bytes > 0: + oid = buf.read_bytes() + buf.skip_ub2() # version + buf.skip_ub2() # character set id + buf.read_ub1(&csfrm) # character set form + metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) + buf.read_ub4(&metadata.max_size) + if ora_type_num == ORA_TYPE_NUM_RAW: + metadata.max_size = metadata.buffer_size + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: + buf.skip_ub4() # oaccolid + buf.read_ub1(&nulls_allowed) + metadata.nulls_allowed = nulls_allowed + buf.skip_ub1() # v7 length of name + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.name = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + schema = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + name = buf.read_str(CS_FORM_IMPLICIT) + buf.skip_ub2() # column position + buf.read_ub4(&uds_flags) + metadata.is_json = uds_flags & TNS_UDS_FLAGS_IS_JSON + metadata.is_oson = uds_flags & TNS_UDS_FLAGS_IS_OSON + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.domain_schema = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + metadata.domain_name = buf.read_str(CS_FORM_IMPLICIT) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_3: + buf.read_ub4(&num_annotations) + if num_annotations > 0: + buf.skip_ub1() + metadata.annotations = {} + buf.read_ub4(&num_annotations) + buf.skip_ub1() + for i in range(num_annotations): + buf.skip_ub4() # length of key + key = buf.read_str(CS_FORM_IMPLICIT) + buf.read_ub4(&num_bytes) + if num_bytes > 0: + value = buf.read_str(CS_FORM_IMPLICIT) + else: + value = "" + metadata.annotations[key] = value + buf.skip_ub4() # flags + buf.skip_ub4() # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_4: + buf.read_ub4(&metadata.vector_dimensions) + buf.read_ub1(&metadata.vector_format) + buf.read_ub1(&metadata.vector_flags) + if ora_type_num == ORA_TYPE_NUM_OBJECT: + if self.type_cache is None: + cache_num = self.conn_impl._dbobject_type_cache_num + self.type_cache = get_dbobject_type_cache(cache_num) + typ_impl = self.type_cache.get_type_for_info(oid, schema, None, + name) + if typ_impl.is_xml_type: + metadata.dbtype = DB_TYPE_XMLTYPE + else: + metadata.objtype = typ_impl + return metadata + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: raise NotImplementedError() @@ -616,7 +707,6 @@ cdef class Message: cdef class MessageWithData(Message): cdef: - BaseThinDbObjectTypeCache type_cache BaseThinCursorImpl cursor_impl array.array bit_vector_buf const char_type *bit_vector @@ -868,94 +958,6 @@ cdef class MessageWithData(Message): buf.skip_ub4() # return code return column_value - cdef OracleMetadata _process_column_info(self, ReadBuffer buf, - BaseThinCursorImpl cursor_impl): - cdef: - uint32_t num_bytes, uds_flags, num_annotations, i - ThinDbObjectTypeImpl typ_impl - str schema, name, key, value - uint8_t ora_type_num, csfrm - OracleMetadata metadata - uint8_t nulls_allowed - int cache_num - bytes oid - buf.read_ub1(&ora_type_num) - metadata = OracleMetadata.__new__(OracleMetadata) - buf.skip_ub1() # flags - buf.read_sb1(&metadata.precision) - buf.read_sb1(&metadata.scale) - buf.read_ub4(&metadata.buffer_size) - buf.skip_ub4() # max number of array elements - buf.skip_ub8() # cont flags - buf.read_ub4(&num_bytes) # OID - if num_bytes > 0: - oid = buf.read_bytes() - buf.skip_ub2() # version - buf.skip_ub2() # character set id - buf.read_ub1(&csfrm) # character set form - metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) - buf.read_ub4(&metadata.max_size) - if ora_type_num == ORA_TYPE_NUM_RAW: - metadata.max_size = metadata.buffer_size - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_12_2: - buf.skip_ub4() # oaccolid - buf.read_ub1(&nulls_allowed) - metadata.nulls_allowed = nulls_allowed - buf.skip_ub1() # v7 length of name - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.name = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - name = buf.read_str(CS_FORM_IMPLICIT) - buf.skip_ub2() # column position - buf.read_ub4(&uds_flags) - metadata.is_json = uds_flags & TNS_UDS_FLAGS_IS_JSON - metadata.is_oson = uds_flags & TNS_UDS_FLAGS_IS_OSON - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_name = buf.read_str(CS_FORM_IMPLICIT) - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_3: - buf.read_ub4(&num_annotations) - if num_annotations > 0: - buf.skip_ub1() - metadata.annotations = {} - buf.read_ub4(&num_annotations) - buf.skip_ub1() - for i in range(num_annotations): - buf.skip_ub4() # length of key - key = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - value = buf.read_str(CS_FORM_IMPLICIT) - else: - value = "" - metadata.annotations[key] = value - buf.skip_ub4() # flags - buf.skip_ub4() # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_4: - buf.read_ub4(&metadata.vector_dimensions) - buf.read_ub1(&metadata.vector_format) - buf.read_ub1(&metadata.vector_flags) - if ora_type_num == ORA_TYPE_NUM_OBJECT: - if self.type_cache is None: - cache_num = self.conn_impl._dbobject_type_cache_num - self.type_cache = get_dbobject_type_cache(cache_num) - typ_impl = self.type_cache.get_type_for_info(oid, schema, None, - name) - if typ_impl.is_xml_type: - metadata.dbtype = DB_TYPE_XMLTYPE - else: - metadata.objtype = typ_impl - return metadata - cdef int _process_describe_info(self, ReadBuffer buf, object cursor, BaseThinCursorImpl cursor_impl) except -1: @@ -976,7 +978,7 @@ cdef class MessageWithData(Message): type_handler = cursor_impl._get_output_type_handler(&uses_metadata) conn = self.cursor.connection for i in range(cursor_impl._num_columns): - metadata = self._process_column_info(buf, cursor_impl) + metadata = self._process_metadata(buf) if prev_fetch_var_impls is not None \ and i < len(prev_fetch_var_impls): self._adjust_metadata(prev_fetch_var_impls[i], metadata) From ac31eaf1c91c4b52329f8e87100936f55b297863 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:49:28 -0600 Subject: [PATCH 018/239] Improved error message when getting attribute Connection.max_open_cursors with Oracle Client libraries 11.2. --- doc/src/api_manual/connection.rst | 4 +++- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thick/odpi | 2 +- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index b962d875..803b1f4d 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -850,7 +850,9 @@ Connection Attributes This read-only attribute specifies the maximum number of cursors that the database can have open concurrently. It is the same value returned by the - SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = 'open_cursors'``. + SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = 'open_cursors'``. When + using python-oracledb Thick mode, Oracle Client libraries 12.1 (or later) + are required. .. versionadded:: 2.0.0 diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e53066af..2273e0cc 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -29,6 +29,9 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Improved error message when getting :attr:`Connection.max_open_cursors` + when using Oracle Client libraries 11.2. + Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 8c522b96..3a578197 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 8c522b965cba883d08907aa0dca3aa02137d2da2 +Subproject commit 3a578197cae567028bfe9d39e7e05bfc5869c650 From fc86cbfc2580ad794fef86c74e08d5edf803491f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:51:53 -0600 Subject: [PATCH 019/239] Fixed wildcard matching of domains in Subject Alternative Names (#462). --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/crypto.pyx | 51 +++++++++++++++++++++++++++---- 2 files changed, 47 insertions(+), 6 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2273e0cc..c5baeb43 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -25,6 +25,8 @@ Thin Mode Changes (`issue 455 `__). #) Fixed decoding of nested PL/SQL records (`issue 456 `__). +#) Fixed wildcard matching of domains in Subject Alternative Names + (`issue 462 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/crypto.pyx b/src/oracledb/impl/thin/crypto.pyx index b9d7c9c6..8275dc02 100644 --- a/src/oracledb/impl/thin/crypto.pyx +++ b/src/oracledb/impl/thin/crypto.pyx @@ -42,6 +42,45 @@ except Exception as e: DN_REGEX = '(?:^|,\s?)(?:(?P[A-Z]+)=(?P"(?:[^"]|"")+"|[^,]+))+' PEM_WALLET_FILE_NAME = "ewallet.pem" +def _name_matches(name_to_check, cert_name): + """ + Returns a boolean indicating if the name to check matches with the + certificate name. The certificate name may contain a wildcard (*) + character. + """ + + # check for a full match (case insensitive) + cert_name = cert_name.lower() + name_to_check = name_to_check.lower() + if name_to_check == cert_name: + return True + + # ensure that more than one label exists in both the name to check and the + # certificate name + check_pos = name_to_check.find(".") + cert_pos = cert_name.find(".") + if check_pos <= 0 or cert_pos <= 0: + return False + + # ensure that the right hand labels all match + if name_to_check[check_pos:] != cert_name[cert_pos:]: + return False + + # match wildcards, if applicable + cert_label = cert_name[:cert_pos] + check_label = name_to_check[:check_pos] + if cert_label == "*": + return True + elif cert_label.startswith("*"): + return check_label.endswith(cert_label[1:]) + elif cert_label.endswith("*"): + return check_label.startswith(cert_label[:-1]) + wildcard_pos = cert_name.find("*") + if wildcard_pos < 0: + return False + return check_label.startswith(cert_label[:wildcard_pos]) \ + and check_label.endswith(cert_label[wildcard_pos + 1:]) + def check_server_dn(sock, expected_dn, expected_name): """ @@ -58,20 +97,20 @@ def check_server_dn(sock, expected_dn, expected_name): errors._raise_err(errors.ERR_INVALID_SERVER_CERT_DN, expected_dn=expected_dn) else: - for name in cert.subject.get_attributes_for_oid( - x509.oid.NameOID.COMMON_NAME - ): - if name.value == expected_name: - return try: ext = cert.extensions.get_extension_for_oid( x509.oid.ExtensionOID.SUBJECT_ALTERNATIVE_NAME ) for name in ext.value.get_values_for_type(x509.DNSName): - if name == expected_name: + if _name_matches(expected_name, name): return except x509.ExtensionNotFound: pass + for name in cert.subject.get_attributes_for_oid( + x509.oid.NameOID.COMMON_NAME + ): + if _name_matches(expected_name, name.value): + return errors._raise_err(errors.ERR_INVALID_SERVER_NAME, expected_name=expected_name) From 5999b610cbfd2abed689e0ed9b30a8b149bae33b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:53:43 -0600 Subject: [PATCH 020/239] Fixed bug resulting in a segfault when unable to load the Oracle Client libraries. --- doc/src/release_notes.rst | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index c5baeb43..9dd0db1b 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -31,8 +31,12 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Fixed bug resulting in a segfault when unable to load the Oracle Client + libraries + (`ODPI-C `__ dependency update). #) Improved error message when getting :attr:`Connection.max_open_cursors` - when using Oracle Client libraries 11.2. + when using Oracle Client libraries 11.2 + (`ODPI-C `__ dependency update). Common Changes ++++++++++++++ From 552bbf2ace1c8b7eb693cb7edcf041838f6c6bfa Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:54:12 -0600 Subject: [PATCH 021/239] Remove unnecessary cursors. --- samples/bulk_aq.py | 61 ++++++++++++++++++++++------------------------ 1 file changed, 29 insertions(+), 32 deletions(-) diff --git a/samples/bulk_aq.py b/samples/bulk_aq.py index 7c0a1e40..e1b3cc27 100644 --- a/samples/bulk_aq.py +++ b/samples/bulk_aq.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2023, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -64,39 +64,36 @@ ) # create a queue -with connection.cursor() as cursor: - queue = connection.queue(QUEUE_NAME) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG +queue = connection.queue(QUEUE_NAME) +queue.deqoptions.wait = oracledb.DEQ_NO_WAIT +queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - # dequeue all existing messages to ensure the queue is empty, just so that - # the results are consistent - while queue.deqone(): - pass +# dequeue all existing messages to ensure the queue is empty, just so that +# the results are consistent +while queue.deqone(): + pass # enqueue a few messages -with connection.cursor() as cursor: - print("Enqueuing messages...") - batch_size = 6 - data_to_enqueue = PAYLOAD_DATA - while data_to_enqueue: - batch_data = data_to_enqueue[:batch_size] - data_to_enqueue = data_to_enqueue[batch_size:] - messages = [connection.msgproperties(payload=d) for d in batch_data] - for data in batch_data: - print(data) - queue.enqmany(messages) - connection.commit() +print("Enqueuing messages...") +batch_size = 6 +data_to_enqueue = PAYLOAD_DATA +while data_to_enqueue: + batch_data = data_to_enqueue[:batch_size] + data_to_enqueue = data_to_enqueue[batch_size:] + messages = [connection.msgproperties(payload=d) for d in batch_data] + for data in batch_data: + print(data) + queue.enqmany(messages) +connection.commit() # dequeue the messages -with connection.cursor() as cursor: - print("\nDequeuing messages...") - batch_size = 8 - while True: - messages = queue.deqmany(batch_size) - if not messages: - break - for props in messages: - print(props.payload.decode()) - connection.commit() - print("\nDone.") +print("\nDequeuing messages...") +batch_size = 8 +while True: + messages = queue.deqmany(batch_size) + if not messages: + break + for props in messages: + print(props.payload.decode()) +connection.commit() +print("\nDone.") From caf41ae9840219e9f8082e901c6c3ff2327785bb Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:54:50 -0600 Subject: [PATCH 022/239] Refactor: eliminate duplicate code. --- src/oracledb/base_impl.pxd | 2 + src/oracledb/impl/base/buffer.pyx | 20 ++++++++ src/oracledb/impl/thin/messages/auth.pyx | 10 ++-- src/oracledb/impl/thin/messages/base.pyx | 39 +++++----------- src/oracledb/impl/thin/messages/deq.pyx | 58 +++++++----------------- 5 files changed, 53 insertions(+), 76 deletions(-) diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 2811da5f..44daa751 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -283,6 +283,7 @@ cdef class Buffer: ssize_t num_bytes) except -1 cdef inline ssize_t bytes_left(self) cdef object read_bytes(self) + cdef object read_bytes_with_length(self) cdef int read_int32be(self, int32_t *value) except -1 cdef const char_type* read_raw_bytes(self, ssize_t num_bytes) except NULL cdef int read_raw_bytes_and_length(self, const char_type **ptr, @@ -295,6 +296,7 @@ cdef class Buffer: cdef int read_oracle_data(self, OracleMetadata metadata, OracleData* data, bint from_dbobject) except -1 cdef object read_str(self, int csfrm, const char* encoding_errors=*) + cdef object read_str_with_length(self) cdef int read_ub1(self, uint8_t *value) except -1 cdef int read_ub2(self, uint16_t *value) except -1 cdef int read_ub4(self, uint32_t *value) except -1 diff --git a/src/oracledb/impl/base/buffer.pyx b/src/oracledb/impl/base/buffer.pyx index 2208c794..12f08308 100644 --- a/src/oracledb/impl/base/buffer.pyx +++ b/src/oracledb/impl/base/buffer.pyx @@ -223,6 +223,16 @@ cdef class Buffer: if ptr != NULL: return ptr[:num_bytes] + cdef object read_bytes_with_length(self): + """ + Reads a length from the buffer and then, if the length is non-zero, + reads bytes from the buffer and returns it. + """ + cdef uint32_t num_bytes + self.read_ub4(&num_bytes) + if num_bytes > 0: + return self.read_bytes() + cdef int read_int32be(self, int32_t *value) except -1: """ Read a signed 32-bit integer in big endian order from the buffer. @@ -335,6 +345,16 @@ cdef class Buffer: return ptr[:num_bytes].decode(ENCODING_UTF8, encoding_errors) return ptr[:num_bytes].decode(ENCODING_UTF16, encoding_errors) + cdef object read_str_with_length(self): + """ + Reads a length from the buffer and then, if the length is non-zero, + reads string from the buffer and returns it. + """ + cdef uint32_t num_bytes + self.read_ub4(&num_bytes) + if num_bytes > 0: + return self.read_str(CS_FORM_IMPLICIT) + cdef int read_ub1(self, uint8_t *value) except -1: """ Reads an unsigned 8-bit integer from the buffer. diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx index 4597fd7f..0c957c61 100644 --- a/src/oracledb/impl/thin/messages/auth.pyx +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -207,16 +207,12 @@ cdef class AuthMessage(Message): cdef int _process_return_parameters(self, ReadBuffer buf) except -1: cdef: uint16_t num_params, i - uint32_t num_bytes str key, value buf.read_ub2(&num_params) for i in range(num_params): - buf.skip_ub4() - key = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - value = buf.read_str(CS_FORM_IMPLICIT) - else: + key = buf.read_str_with_length() + value = buf.read_str_with_length() + if value is None: value = "" if key == "AUTH_VFR_DATA": buf.read_ub4(&self.verifier_type) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 393484c0..6aea7828 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -253,7 +253,7 @@ cdef class Message: Process metadata from the buffer and return it. """ cdef: - uint32_t num_bytes, uds_flags, num_annotations, i + uint32_t uds_flags, num_annotations, i ThinDbObjectTypeImpl typ_impl str schema, name, key, value uint8_t ora_type_num, csfrm @@ -269,9 +269,7 @@ cdef class Message: buf.read_ub4(&metadata.buffer_size) buf.skip_ub4() # max number of array elements buf.skip_ub8() # cont flags - buf.read_ub4(&num_bytes) # OID - if num_bytes > 0: - oid = buf.read_bytes() + oid = buf.read_bytes_with_length() buf.skip_ub2() # version buf.skip_ub2() # character set id buf.read_ub1(&csfrm) # character set form @@ -284,26 +282,16 @@ cdef class Message: buf.read_ub1(&nulls_allowed) metadata.nulls_allowed = nulls_allowed buf.skip_ub1() # v7 length of name - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.name = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - name = buf.read_str(CS_FORM_IMPLICIT) + metadata.name = buf.read_str_with_length() + schema = buf.read_str_with_length() + name = buf.read_str_with_length() buf.skip_ub2() # column position buf.read_ub4(&uds_flags) metadata.is_json = uds_flags & TNS_UDS_FLAGS_IS_JSON metadata.is_oson = uds_flags & TNS_UDS_FLAGS_IS_OSON if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1: - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_schema = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - metadata.domain_name = buf.read_str(CS_FORM_IMPLICIT) + metadata.domain_schema = buf.read_str_with_length() + metadata.domain_name = buf.read_str_with_length() if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1_EXT_3: buf.read_ub4(&num_annotations) if num_annotations > 0: @@ -312,12 +300,9 @@ cdef class Message: buf.read_ub4(&num_annotations) buf.skip_ub1() for i in range(num_annotations): - buf.skip_ub4() # length of key - key = buf.read_str(CS_FORM_IMPLICIT) - buf.read_ub4(&num_bytes) - if num_bytes > 0: - value = buf.read_str(CS_FORM_IMPLICIT) - else: + key = buf.read_str_with_length() + value = buf.read_str_with_length() + if value is None: value = "" metadata.annotations[key] = value buf.skip_ub4() # flags @@ -348,9 +333,7 @@ cdef class Message: uint8_t opcode buf.read_ub1(&opcode) if opcode == TNS_SERVER_PIGGYBACK_LTXID: - buf.read_ub4(&num_bytes) - if num_bytes > 0: - self.conn_impl._ltxid = buf.read_bytes() + self.conn_impl._ltxid = buf.read_bytes_with_length() elif opcode == TNS_SERVER_PIGGYBACK_QUERY_CACHE_INVALIDATION \ or opcode == TNS_SERVER_PIGGYBACK_TRACE_EVENT: pass diff --git a/src/oracledb/impl/thin/messages/deq.pyx b/src/oracledb/impl/thin/messages/deq.pyx index 94d07307..334e31bd 100644 --- a/src/oracledb/impl/thin/messages/deq.pyx +++ b/src/oracledb/impl/thin/messages/deq.pyx @@ -62,10 +62,10 @@ cdef class DeqMessage(Message): """ cdef: uint32_t num_bytes, num_extensions, i + bytes text_value, binary_value, value ssize_t temp_num_bytes const char_type *ptr - uint16_t temp16, keyword - bytes temp + uint16_t keyword OracleData data uint32_t imageLength ThinDbObjectImpl obj_impl @@ -75,57 +75,33 @@ cdef class DeqMessage(Message): buf.read_sb4(&self.props_impl.priority) # priority buf.read_sb4(&self.props_impl.delay) # delay buf.read_sb4(&self.props_impl.expiration) # expiration - # correlation id - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - self.props_impl.correlation = ptr[:temp_num_bytes].decode() + self.props_impl.correlation = buf.read_str_with_length() buf.read_sb4(&self.props_impl.num_attempts) - # exception queue name - buf.read_ub4(&num_bytes) - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - self.props_impl.exceptionq = ptr[:temp_num_bytes].decode() + self.props_impl.exceptionq = buf.read_str_with_length() buf.read_sb4(&self.props_impl.state) buf.read_ub4(&num_bytes) # enqueue time if num_bytes > 0: buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) decode_date(ptr, temp_num_bytes, &data.buffer) self.props_impl.enq_time = convert_date_to_python(&data.buffer) - buf.read_ub4(&num_bytes) # transaction id - if num_bytes > 0: - ptr = buf._get_raw(num_bytes) - self.props_impl.enq_txn_id = ptr[:num_bytes] - else: - self.props_impl.enq_txn_id = None + self.props_impl.enq_txn_id = buf.read_bytes_with_length() buf.read_ub4(&num_extensions) # number of extensions if num_extensions > 0: buf.skip_ub1() for i in range(num_extensions): - temp = None - temp16 = 0 - buf.read_ub4(&num_bytes) # text value length - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - temp = ptr[:temp_num_bytes] - temp16 = temp_num_bytes - buf.read_ub4(&num_bytes) # binary value length - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - temp = ptr[:temp_num_bytes] + text_value = buf.read_bytes_with_length() + binary_value = buf.read_bytes_with_length() + value = text_value or binary_value buf.read_ub2(&keyword) # extension keyword - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME and - temp is not None and temp16 > 0): - self.props_impl.sender_agent_name = temp - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS and - temp is not None and temp16 > 0): - self.props_impl.sender_agent_address = temp - if (keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL and - temp is not None): - self.props_impl.sender_agent_protocol = temp - if (keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID and - temp is not None): - self.props_impl.original_msg_id = temp + if value is not None: + if keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME: + self.props_impl.sender_agent_name = value + elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS: + self.props_impl.sender_agent_address = value + elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL: + self.props_impl.sender_agent_protocol = value + elif keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID: + self.props_impl.original_msg_id = value buf.read_ub4(&num_bytes) # user properties if num_bytes > 0: errors._raise_err(errors.ERR_NOT_IMPLEMENTED) From 49d9f3bea225f2bee80c1ace5b1b1ed392c0303b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:57:27 -0600 Subject: [PATCH 023/239] Refactor: simplify code and rename files for clarity. --- src/oracledb/aq.py | 3 + src/oracledb/impl/thin/constants.pxi | 1 + src/oracledb/impl/thin/messages/aq_base.pyx | 223 ++++++++++++++++++ .../thin/messages/{deq.pyx => aq_deq.pyx} | 103 +------- .../thin/messages/{enq.pyx => aq_enq.pyx} | 66 +----- src/oracledb/impl/thin/queue.pyx | 30 ++- src/oracledb/thin_impl.pyx | 5 +- 7 files changed, 261 insertions(+), 170 deletions(-) create mode 100644 src/oracledb/impl/thin/messages/aq_base.pyx rename src/oracledb/impl/thin/messages/{deq.pyx => aq_deq.pyx} (54%) rename src/oracledb/impl/thin/messages/{enq.pyx => aq_enq.pyx} (65%) diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index 7bc94257..a9668355 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -68,6 +68,9 @@ def _verify_message(self, message: "MessageProperties") -> None: else: if not isinstance(message.payload, (str, bytes)): errors._raise_err(errors.ERR_PAYLOAD_CANNOT_BE_ENQUEUED) + if self.connection.thin: + if message.recipients: + errors._raise_not_supported("specifying AQ message recipients") @property def connection(self) -> "connection_module.Connection": diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index 03d99683..c7f1e00d 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -551,6 +551,7 @@ cdef enum: TNS_JSON_MAX_LENGTH = 32 * 1024 * 1024 TNS_VECTOR_MAX_LENGTH = 1 * 1024 * 1024 TNS_AQ_MESSAGE_ID_LENGTH = 16 + TNS_AQ_MESSAGE_VERSION = 1 # base 64 encoding alphabet cdef bytes TNS_BASE64_ALPHABET = \ diff --git a/src/oracledb/impl/thin/messages/aq_base.pyx b/src/oracledb/impl/thin/messages/aq_base.pyx new file mode 100644 index 00000000..af22f8ba --- /dev/null +++ b/src/oracledb/impl/thin/messages/aq_base.pyx @@ -0,0 +1,223 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# aq_base.pyx +# +# Cython file defining the base class for messages that are sent to the +# database and the responses that are received by the client for enqueing and +# dequeuing AQ messages (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +cdef class AqBaseMessage(Message): + cdef: + BaseThinQueueImpl queue_impl + ThinDeqOptionsImpl deq_options_impl + ThinEnqOptionsImpl enq_options_impl + bint no_msg_found + + cdef object _process_date(self, ReadBuffer buf): + """ + Processes a date found in the buffer. + """ + cdef: + const char_type *ptr + uint32_t num_bytes + OracleData data + ssize_t length + buf.read_ub4(&num_bytes) + if num_bytes > 0: + buf.read_raw_bytes_and_length(&ptr, &length) + decode_date(ptr, length, &data.buffer) + return convert_date_to_python(&data.buffer) + + cdef int _process_error_info(self, ReadBuffer buf) except -1: + """ + Process error information from the buffer. If the error that indicates + that no messages were received is detected, the error is cleared and + the flag set so that the dequeue can handle that case. + """ + Message._process_error_info(self, buf) + if self.error_info.num == TNS_ERR_NO_MESSAGES_FOUND: + self.error_info.num = 0 + self.error_occurred = False + self.no_msg_found = True + + cdef int _process_extensions(self, ReadBuffer buf, + ThinMsgPropsImpl props_impl) except -1: + """ + Processes extensions to the message property object returned by the + database. + """ + cdef: + bytes text_value, binary_value, value + uint32_t i, num_extensions + uint16_t keyword + buf.read_ub4(&num_extensions) + if num_extensions > 0: + buf.skip_ub1() + for i in range(num_extensions): + text_value = buf.read_bytes_with_length() + binary_value = buf.read_bytes_with_length() + value = text_value or binary_value + buf.read_ub2(&keyword) + if value is not None: + if keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME: + props_impl.sender_agent_name = value + elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS: + props_impl.sender_agent_address = value + elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL: + props_impl.sender_agent_protocol = value + elif keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID: + props_impl.original_msg_id = value + + cdef bytes _process_msg_id(self, ReadBuffer buf): + """ + Reads a message id from the buffer and returns it. + """ + cdef const char_type *ptr + ptr = buf.read_raw_bytes(TNS_AQ_MESSAGE_ID_LENGTH) + return ptr[:TNS_AQ_MESSAGE_ID_LENGTH] + + cdef int _process_msg_props(self, ReadBuffer buf, + ThinMsgPropsImpl props_impl) except -1: + """ + Processes a message property object returned by the database. + """ + cdef uint32_t temp32 + buf.read_sb4(&props_impl.priority) + buf.read_sb4(&props_impl.delay) + buf.read_sb4(&props_impl.expiration) + props_impl.correlation = buf.read_str_with_length() + buf.read_sb4(&props_impl.num_attempts) + props_impl.exceptionq = buf.read_str_with_length() + buf.read_sb4(&props_impl.state) + props_impl.enq_time = self._process_date(buf) + props_impl.enq_txn_id = buf.read_bytes_with_length() + self._process_extensions(buf, props_impl) + buf.read_ub4(&temp32) # user properties + if temp32 > 0: + errors._raise_err(errors.ERR_NOT_IMPLEMENTED) + buf.skip_ub4() # csn + buf.skip_ub4() # dsn + buf.skip_ub4() # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.skip_ub4() # shard number + + cdef object _process_payload(self, ReadBuffer buf): + """ + Processes the payload for an enqueued message returned by the database. + """ + cdef: + ThinDbObjectImpl obj_impl + uint32_t image_length + bytes payload + if self.queue_impl.payload_type is not None: + obj_impl = buf.read_dbobject(self.queue_impl.payload_type) + if obj_impl is None: + obj_impl = self.queue_impl.payload_type.create_new_object() + return PY_TYPE_DB_OBJECT._from_impl(obj_impl) + else: + buf.read_bytes_with_length() # TOID + buf.read_bytes_with_length() # OID + buf.read_bytes_with_length() # snapshot + buf.skip_ub2() # version no + buf.read_ub4(&image_length) # image length + buf.skip_ub2() # flags + if image_length > 0: + payload = buf.read_bytes()[4:image_length] + if self.queue_impl.is_json: + return self.conn_impl.decode_oson(payload) + return payload + elif not self.queue_impl.is_json: + return b'' + + cdef object _process_recipients(self, ReadBuffer buf): + """ + Process recipients for a message. Currently this is unsupported. + """ + cdef uint32_t temp32 + buf.read_ub4(&temp32) + if temp32 > 0: + errors._raise_err(errors.ERR_NOT_IMPLEMENTED) + return [] + + cdef int _write_msg_props(self, WriteBuffer buf, + ThinMsgPropsImpl props_impl) except -1: + """ + Write a message property object to the buffer. + """ + buf.write_ub4(props_impl.priority) + buf.write_ub4(props_impl.delay) + buf.write_sb4(props_impl.expiration) + self._write_value_with_length(buf, props_impl.correlation) + buf.write_ub4(0) # number of attempts + self._write_value_with_length(buf, props_impl.exceptionq) + buf.write_ub4(props_impl.state) + buf.write_ub4(0) # enqueue time length + self._write_value_with_length(buf, props_impl.enq_txn_id) + buf.write_ub4(4) # number of extensions + buf.write_uint8(0x0e) # unknown extra byte + buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) + buf.write_extension_values(None, None, + TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) + buf.write_extension_values(None, b'\x00', + TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) + buf.write_extension_values(None, None, + TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) + buf.write_ub4(0) # user property + buf.write_ub4(0) # cscn + buf.write_ub4(0) # dscn + buf.write_ub4(0) # flags + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.write_ub4(0xffffffffl) # shard id + + cdef int _write_payload(self, WriteBuffer buf, + ThinMsgPropsImpl props_impl) except -1: + """ + Writes the payload of the message property object to the buffer. + """ + if self.queue_impl.is_json: + buf.write_oson(props_impl.payload_obj, + self.conn_impl._oson_max_fname_size, False) + elif self.queue_impl.payload_type is not None: + buf.write_dbobject(props_impl.payload_obj) + else: + buf.write_bytes(props_impl.payload_obj) + + cdef int _write_value_with_length(self, WriteBuffer buf, + object value) except -1: + """ + Write a string to the buffer, prefixed by a length. + """ + cdef bytes value_bytes + if value is None: + buf.write_ub4(0) + else: + if isinstance(value, str): + value_bytes = value.encode() + else: + value_bytes = value + buf.write_ub4(len(value_bytes)) + buf.write_bytes_with_length(value_bytes) diff --git a/src/oracledb/impl/thin/messages/deq.pyx b/src/oracledb/impl/thin/messages/aq_deq.pyx similarity index 54% rename from src/oracledb/impl/thin/messages/deq.pyx rename to src/oracledb/impl/thin/messages/aq_deq.pyx index 334e31bd..83428ffc 100644 --- a/src/oracledb/impl/thin/messages/deq.pyx +++ b/src/oracledb/impl/thin/messages/aq_deq.pyx @@ -23,7 +23,7 @@ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ -# deq.pyx +# aq_deq.pyx # # Cython file defining the messages that are sent to the database and the # responses that are received by the client for dequeuing an AQ message @@ -31,12 +31,9 @@ #------------------------------------------------------------------------------ @cython.final -cdef class DeqMessage(Message): +cdef class AqDeqMessage(AqBaseMessage): cdef: - BaseThinQueueImpl queue_impl - ThinDeqOptionsImpl deq_options_impl ThinMsgPropsImpl props_impl - bint no_msg_found cdef int _initialize_hook(self) except -1: """ @@ -44,104 +41,22 @@ cdef class DeqMessage(Message): """ self.function_code = TNS_FUNC_AQ_DEQ - cdef int _process_error_info(self, ReadBuffer buf) except -1: - """ - Process error information from the buffer. If the error that indicates - that no messages were received is detected, the error is cleared and - the flag set so that the dequeue can handle that case. - """ - Message._process_error_info(self, buf) - if self.error_info.num == TNS_ERR_NO_MESSAGES_FOUND: - self.error_info.num = 0 - self.error_occurred = False - self.no_msg_found = True - cdef int _process_return_parameters(self, ReadBuffer buf) except -1: """ Process the return parameters of the AQ Dequeue request. """ cdef: - uint32_t num_bytes, num_extensions, i - bytes text_value, binary_value, value - ssize_t temp_num_bytes - const char_type *ptr + uint32_t num_bytes uint16_t keyword - OracleData data uint32_t imageLength ThinDbObjectImpl obj_impl ThinDbObjectTypeImpl type_impl buf.read_ub4(&num_bytes) if num_bytes > 0: - buf.read_sb4(&self.props_impl.priority) # priority - buf.read_sb4(&self.props_impl.delay) # delay - buf.read_sb4(&self.props_impl.expiration) # expiration - self.props_impl.correlation = buf.read_str_with_length() - buf.read_sb4(&self.props_impl.num_attempts) - self.props_impl.exceptionq = buf.read_str_with_length() - buf.read_sb4(&self.props_impl.state) - buf.read_ub4(&num_bytes) # enqueue time - if num_bytes > 0: - buf.read_raw_bytes_and_length(&ptr, &temp_num_bytes) - decode_date(ptr, temp_num_bytes, &data.buffer) - self.props_impl.enq_time = convert_date_to_python(&data.buffer) - self.props_impl.enq_txn_id = buf.read_bytes_with_length() - buf.read_ub4(&num_extensions) # number of extensions - if num_extensions > 0: - buf.skip_ub1() - for i in range(num_extensions): - text_value = buf.read_bytes_with_length() - binary_value = buf.read_bytes_with_length() - value = text_value or binary_value - buf.read_ub2(&keyword) # extension keyword - if value is not None: - if keyword == TNS_AQ_EXT_KEYWORD_AGENT_NAME: - self.props_impl.sender_agent_name = value - elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS: - self.props_impl.sender_agent_address = value - elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL: - self.props_impl.sender_agent_protocol = value - elif keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID: - self.props_impl.original_msg_id = value - buf.read_ub4(&num_bytes) # user properties - if num_bytes > 0: - errors._raise_err(errors.ERR_NOT_IMPLEMENTED) - buf.skip_ub4() # csn - buf.skip_ub4() # dsn - buf.skip_ub4() # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: - buf.skip_ub4() # shard number - buf.read_ub4(&num_bytes) # num recipients - if num_bytes > 0: - errors._raise_err(errors.ERR_NOT_IMPLEMENTED) - if self.queue_impl.payload_type is not None: - type_impl = self.queue_impl.payload_type - obj_impl = buf.read_dbobject(type_impl) - if obj_impl is None: - obj_impl = type_impl.create_new_object() - self.props_impl.payload = PY_TYPE_DB_OBJECT._from_impl(obj_impl) - else: - buf.read_ub4(&num_bytes) # TOID len - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub4(&num_bytes) # OID len - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.read_ub4(&num_bytes) # snapshot - if num_bytes > 0: - buf.skip_raw_bytes(num_bytes) - buf.skip_ub2() # version no - buf.read_ub4(&imageLength) # image len - buf.skip_ub2() # flags - if imageLength > 0: - self.props_impl.payload = buf.read_bytes()[4:imageLength] - if self.queue_impl.is_json: - self.props_impl.payload = \ - self.conn_impl.decode_oson(self.props_impl.payload) - else: - if not self.queue_impl.is_json: - self.props_impl.payload = b'' - ptr = buf._get_raw(TNS_AQ_MESSAGE_ID_LENGTH) - self.props_impl.msgid = ptr[:TNS_AQ_MESSAGE_ID_LENGTH] + self._process_msg_props(buf, self.props_impl) + self.props_impl.recipients = self._process_recipients(buf) + self.props_impl.payload = self._process_payload(buf) + self.props_impl.msgid = self._process_msg_id(buf) cdef int _write_message(self, WriteBuffer buf) except -1: """ @@ -190,10 +105,10 @@ cdef class DeqMessage(Message): buf.write_ub4(0) # correlation id len buf.write_uint8(1) # toid of payload buf.write_ub4(16) # toid length - buf.write_ub2(self.props_impl.version) # version of type + buf.write_ub2(TNS_AQ_MESSAGE_VERSION) buf.write_uint8(1) # payload buf.write_uint8(1) # return msg id - buf.write_ub4(16) # mesg id length + buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) deq_flags = 0 delivery_mode = self.deq_options_impl.delivery_mode if (delivery_mode == TNS_AQ_MSG_BUFFERED): diff --git a/src/oracledb/impl/thin/messages/enq.pyx b/src/oracledb/impl/thin/messages/aq_enq.pyx similarity index 65% rename from src/oracledb/impl/thin/messages/enq.pyx rename to src/oracledb/impl/thin/messages/aq_enq.pyx index c84ae8c1..ed226766 100644 --- a/src/oracledb/impl/thin/messages/enq.pyx +++ b/src/oracledb/impl/thin/messages/aq_enq.pyx @@ -23,7 +23,7 @@ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ -# enq.pyx +# aq_enq.pyx # # Cython file defining the messages that are sent to the database and the # responses that are received by the client for enqueuing an AQ message @@ -31,10 +31,8 @@ #------------------------------------------------------------------------------ @cython.final -cdef class EnqMessage(Message): +cdef class AqEnqMessage(AqBaseMessage): cdef: - BaseThinQueueImpl queue_impl - ThinEnqOptionsImpl enq_options_impl ThinMsgPropsImpl props_impl cdef int _initialize_hook(self) except -1: @@ -65,57 +63,16 @@ cdef class EnqMessage(Message): queue_name_bytes = self.queue_impl.name.encode() buf.write_uint8(1) # queue name (pointer) buf.write_ub4(len(queue_name_bytes)) # queue name length - buf.write_ub4(self.props_impl.priority) - buf.write_ub4(self.props_impl.delay) - buf.write_sb4(self.props_impl.expiration) - if self.props_impl.correlation is None: - buf.write_ub4(0) # correlation - else: - correlation_bytes = self.props_impl.correlation.encode() - buf.write_ub4(len(correlation_bytes)) - buf.write_bytes_with_length(correlation_bytes) - buf.write_ub4(0) # number of attempts - if self.props_impl.exceptionq is None: - buf.write_ub4(0) # exception queue - else: - exceptionq_bytes = self.props_impl.exceptionq.encode() - buf.write_ub4(len(exceptionq_bytes)) - buf.write_bytes_with_length(exceptionq_bytes) - buf.write_ub4(self.props_impl.state) # message state - buf.write_ub4(0) # enqueue time length - if self.props_impl.enq_txn_id is None: - buf.write_ub4(0) # enqueue txn id length - else: - buf.write_ub4(len(self.props_impl.enq_txn_id)) - buf.write_bytes_with_length(self.props_impl.enq_txn_id) - buf.write_ub4(4) # number of extensions - buf.write_uint8(0x0e) # unknown extra byte - buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) - buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) - buf.write_extension_values(None, b'\x00', - TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) - buf.write_extension_values(None, None, - TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) - buf.write_ub4(0) # user property - buf.write_ub4(0) # cscn - buf.write_ub4(0) # dscn - buf.write_ub4(0) # flags - if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: - buf.write_ub4(0xffffffffl) # shard id - - if self.props_impl.recipients is None: - buf.write_uint8(0) # recipients (pointer) - buf.write_ub4(0) # number of key/value pairs - else: - buf.write_uint8(1) - buf.write_ub4(len(self.props_impl.recipients) * 3) + self._write_msg_props(buf, self.props_impl) + buf.write_uint8(0) # recipients (pointer) + buf.write_ub4(0) # number of key/value pairs buf.write_ub4(self.enq_options_impl.visibility) buf.write_uint8(0) # relative message id buf.write_ub4(0) # relative message length buf.write_ub4(0) # sequence deviation buf.write_uint8(1) # TOID of payload (pointer) buf.write_ub4(16) # TOID of payload length - buf.write_ub2(self.props_impl.version) + buf.write_ub2(TNS_AQ_MESSAGE_VERSION) if self.queue_impl.is_json: buf.write_uint8(0) # payload (pointer) buf.write_uint8(0) # RAW payload (pointer) @@ -127,7 +84,7 @@ cdef class EnqMessage(Message): else: buf.write_uint8(0) # payload (pointer) buf.write_uint8(1) # RAW payload (pointer) - buf.write_ub4(len(self.props_impl.payloadObject)) + buf.write_ub4(len(self.props_impl.payload_obj)) buf.write_uint8(1) # return message id (pointer) buf.write_ub4(TNS_AQ_MESSAGE_ID_LENGTH) # return message id length enq_flags = 0 @@ -159,11 +116,4 @@ cdef class EnqMessage(Message): buf.write_bytes_with_length(queue_name_bytes) buf.write_bytes(self.queue_impl.payload_toid) - if not self.queue_impl.is_json: - if self.queue_impl.payload_type is not None: - buf.write_dbobject(self.props_impl.payloadObject) - else: - buf.write_bytes(self.props_impl.payloadObject) - if self.queue_impl.is_json: - buf.write_oson(self.props_impl.payloadObject, - self.conn_impl._oson_max_fname_size, False) + self._write_payload(buf, self.props_impl) diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index fdf75ccc..41a36d4c 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -35,26 +35,26 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): BaseThinConnImpl _conn_impl bytes payload_toid - cdef DeqMessage _create_deq_message(self): + cdef AqDeqMessage _create_deq_message(self): """ Create the message for dequeuing a payload. """ cdef: ThinMsgPropsImpl props_impl - DeqMessage message + AqDeqMessage message props_impl = ThinMsgPropsImpl() - message = self._conn_impl._create_message(DeqMessage) + message = self._conn_impl._create_message(AqDeqMessage) message.queue_impl = self message.deq_options_impl = self.deq_options_impl message.props_impl = props_impl return message - cdef EnqMessage _create_enq_message(self, ThinMsgPropsImpl props_impl): + cdef AqEnqMessage _create_enq_message(self, ThinMsgPropsImpl props_impl): """ Create the message for enqueuing the provided payload. """ - cdef EnqMessage message - message = self._conn_impl._create_message(EnqMessage) + cdef AqEnqMessage message + message = self._conn_impl._create_message(AqEnqMessage) message.queue_impl = self message.enq_options_impl = self.enq_options_impl message.props_impl = props_impl @@ -87,7 +87,7 @@ cdef class ThinQueueImpl(BaseThinQueueImpl): """ cdef: Protocol protocol = self._conn_impl._protocol - DeqMessage message + AqDeqMessage message message = self._create_deq_message() protocol._process_single_message(message) if not message.no_msg_found: @@ -99,7 +99,7 @@ cdef class ThinQueueImpl(BaseThinQueueImpl): """ cdef: Protocol protocol = self._conn_impl._protocol - EnqMessage message + AqEnqMessage message message = self._create_enq_message(props_impl) protocol._process_single_message(message) @@ -112,7 +112,7 @@ cdef class AsyncThinQueueImpl(BaseThinQueueImpl): """ cdef: BaseAsyncProtocol protocol - DeqMessage message + AqDeqMessage message protocol = self._conn_impl._protocol message = self._create_deq_message() await protocol._process_single_message(message) @@ -125,7 +125,7 @@ cdef class AsyncThinQueueImpl(BaseThinQueueImpl): """ cdef: BaseAsyncProtocol protocol - EnqMessage message + AqEnqMessage message protocol = self._conn_impl._protocol message = self._create_enq_message(props_impl) await protocol._process_single_message(message) @@ -321,8 +321,7 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): cydatetime.datetime enq_time bytes msgid int32_t state - object payloadObject - int32_t version + object payload_obj BaseThinConnImpl _conn_impl bytes enq_txn_id bytes sender_agent_name @@ -334,7 +333,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): self.delay = TNS_AQ_MSG_NO_DELAY self.expiration = TNS_AQ_MSG_NO_EXPIRATION self.recipients = [] - self.version = 1 self.sender_agent_protocol = 0 def get_num_attempts(self): @@ -425,7 +423,7 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): """ Internal method for setting the payload from bytes. """ - self.payloadObject = value + self.payload_obj = value def set_payload_object(self, ThinDbObjectImpl value): """ @@ -433,13 +431,13 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): """ if not isinstance(value, ThinDbObjectImpl): raise TypeError("Expected ThinDbObjectImpl instance.") - self.payloadObject = value + self.payload_obj = value def set_payload_json(self, object json_val): """ Internal method for setting the payload from a JSON object """ - self.payloadObject = json_val + self.payload_obj = json_val def set_priority(self, int32_t value): """ diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index abbe85df..1eedfd1b 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -208,13 +208,14 @@ include "impl/thin/capabilities.pyx" include "impl/thin/transport.pyx" include "impl/thin/packet.pyx" include "impl/thin/messages/base.pyx" +include "impl/thin/messages/aq_base.pyx" +include "impl/thin/messages/aq_deq.pyx" +include "impl/thin/messages/aq_enq.pyx" include "impl/thin/messages/auth.pyx" include "impl/thin/messages/commit.pyx" include "impl/thin/messages/connect.pyx" include "impl/thin/messages/data_types.pyx" -include "impl/thin/messages/deq.pyx" include "impl/thin/messages/end_pipeline.pyx" -include "impl/thin/messages/enq.pyx" include "impl/thin/messages/execute.pyx" include "impl/thin/messages/fetch.pyx" include "impl/thin/messages/lob_op.pyx" From 5dd313f3704c4f4a0bed3e17fd15ac66065931bb Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:58:04 -0600 Subject: [PATCH 024/239] Fixed bug when binding a temporary LOB IN/OUT to a PL/SQL procedure (#468). --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/messages/base.pyx | 5 ++++- src/oracledb/impl/thin/packet.pyx | 19 ++++++++++++------ tests/sql/create_schema.sql | 17 ++++++++++++++++ tests/test_1900_lob_var.py | 25 +++++++++++++++++++++++- 5 files changed, 60 insertions(+), 8 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9dd0db1b..c8b192df 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -27,6 +27,8 @@ Thin Mode Changes (`issue 456 `__). #) Fixed wildcard matching of domains in Subject Alternative Names (`issue 462 `__). +#) Fixed bug when binding a temporary LOB IN/OUT to a PL/SQL procedure + (`issue 468 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 6aea7828..d82dc8d2 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -893,8 +893,11 @@ cdef class MessageWithData(Message): elif ora_type_num in (ORA_TYPE_NUM_CLOB, ORA_TYPE_NUM_BLOB, ORA_TYPE_NUM_BFILE): + if not self.in_fetch: + column_value = var_impl._values[pos] column_value = buf.read_lob_with_length(self.conn_impl, - metadata.dbtype) + metadata.dbtype, + column_value) elif ora_type_num == ORA_TYPE_NUM_JSON: column_value = buf.read_oson() elif ora_type_num == ORA_TYPE_NUM_VECTOR: diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index d68b51f9..ddbe5b66 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -475,7 +475,7 @@ cdef class ReadBuffer(Buffer): return decoder.decode(data) cdef object read_lob_with_length(self, BaseThinConnImpl conn_impl, - DbType dbtype): + DbType dbtype, object lob): """ Read a LOB locator from the buffer and return a LOB object containing it. @@ -484,6 +484,7 @@ cdef class ReadBuffer(Buffer): uint32_t chunk_size, num_bytes BaseThinLobImpl lob_impl uint64_t size + bytes locator type cls self.read_ub4(&num_bytes) if num_bytes > 0: @@ -492,15 +493,21 @@ cdef class ReadBuffer(Buffer): else: self.read_ub8(&size) self.read_ub4(&chunk_size) - lob_impl = conn_impl._create_lob_impl(dbtype, self.read_bytes()) + locator = self.read_bytes() + if lob is None: + lob_impl = conn_impl._create_lob_impl(dbtype, locator) + cls = PY_TYPE_ASYNC_LOB \ + if conn_impl._protocol._transport._is_async \ + else PY_TYPE_LOB + lob = cls._from_impl(lob_impl) + else: + lob_impl = lob._impl + lob_impl._locator = locator lob_impl._size = size lob_impl._chunk_size = chunk_size lob_impl._has_metadata = \ dbtype._ora_type_num != ORA_TYPE_NUM_BFILE - cls = PY_TYPE_ASYNC_LOB \ - if conn_impl._protocol._transport._is_async \ - else PY_TYPE_LOB - return cls._from_impl(lob_impl) + return lob cdef const char_type* read_raw_bytes(self, ssize_t num_bytes) except NULL: """ diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index 8885b34e..dd90ab74 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -1479,6 +1479,12 @@ create or replace package &main_user..pkg_TestLOBs as a_Size out number ); + procedure TestInOut ( + a_CLOB in out clob, + a_SearchValue varchar2, + a_ReplaceValue varchar2 + ); + end; / @@ -1500,5 +1506,16 @@ create or replace package body &main_user..pkg_TestLOBs as a_Size := dbms_lob.getlength(a_CLOB); end; + procedure TestInOut ( + a_CLOB in out clob, + a_SearchValue varchar2, + a_ReplaceValue varchar2 + ) is + begin + if a_SearchValue is not null then + a_CLOB := replace(a_CLOB, a_SearchValue, a_ReplaceValue); + end if; + end; + end; / diff --git a/tests/test_1900_lob_var.py b/tests/test_1900_lob_var.py index 7fa5d9f6..79f5df1b 100644 --- a/tests/test_1900_lob_var.py +++ b/tests/test_1900_lob_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -614,6 +614,29 @@ def test_1938(self): var.setvalue(0, lob) self.assertIs(var.getvalue(), lob) + def test_1939(self): + "1939 - temporary LOB in/out without modification" + value = "test - 1939" + var = self.cursor.var(oracledb.DB_TYPE_CLOB) + var.setvalue(0, value) + self.assertEqual(var.getvalue().read(), value) + self.cursor.callproc("pkg_TestLOBs.TestInOut", [var, None, None]) + self.assertEqual(var.getvalue().read(), value) + + def test_1940(self): + "1940 - temporary LOB in/out with modification" + search_value = "test" + replace_value = "replaced" + initial_value = f"{search_value} - 1939" + final_value = f"{replace_value} - 1939" + var = self.cursor.var(oracledb.DB_TYPE_CLOB) + var.setvalue(0, initial_value) + self.assertEqual(var.getvalue().read(), initial_value) + self.cursor.callproc( + "pkg_TestLOBs.TestInOut", [var, search_value, replace_value] + ) + self.assertEqual(var.getvalue().read(), final_value) + if __name__ == "__main__": test_env.run_test_cases() From 34280f010adca94ec0f0a29f95cb5af641f58f73 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:58:40 -0600 Subject: [PATCH 025/239] Fixed bug when an error is reported by the server in the middle of a response to a client request (#472). --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thin/packet.pyx | 15 +++++++++++---- src/oracledb/impl/thin/protocol.pyx | 12 +++++++++--- src/oracledb/impl/thin/utils.pyx | 3 +++ tests/test_1100_connection.py | 26 ++++++++++++++++++++++++++ tests/test_5300_connection_async.py | 26 ++++++++++++++++++++++++++ 6 files changed, 78 insertions(+), 7 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index c8b192df..0d6a8edf 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -29,6 +29,9 @@ Thin Mode Changes (`issue 462 `__). #) Fixed bug when binding a temporary LOB IN/OUT to a PL/SQL procedure (`issue 468 `__). +#) Fixed bug when an error is reported by the server in the middle of a + response to a client request + (`issue 472 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index ddbe5b66..21de9b62 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -271,7 +271,7 @@ cdef class ReadBuffer(Buffer): # if no bytes are left in the buffer, a new packet needs to be fetched # before anything else can take place if self._pos == self._size: - self.wait_for_packets_sync() + self.wait_for_packets_sync(check_marker=True) # if there is enough room in the buffer to satisfy the number of bytes # requested, return a pointer to the current location and advance the @@ -306,7 +306,7 @@ cdef class ReadBuffer(Buffer): while num_bytes > 0: # advance to next packet - self.wait_for_packets_sync() + self.wait_for_packets_sync(check_marker=True) # copy data into the chunked buffer or split buffer, as appropriate source_ptr = &self._data[self._pos] @@ -739,12 +739,16 @@ cdef class ReadBuffer(Buffer): await self._waiter self._start_packet() - cdef int wait_for_packets_sync(self) except -1: + cdef int wait_for_packets_sync(self, bint check_marker=False) except -1: """ Wait for packets to arrive in response to the request that was sent to the database (synchronously). If no packets are available and we are using asyncio, raise an exception so that processing can be restarted - once packets have arrived. + once packets have arrived. If the check_marker flag is set and a marker + is detected, throw an exception so that the protocol can process it + accordingly. This is required because the server can send a marker + packet in the middle of the data packets that form the response to the + client's request. """ cdef: bint notify_waiter @@ -758,6 +762,9 @@ cdef class ReadBuffer(Buffer): if notify_waiter: break self._start_packet() + if check_marker \ + and self._current_packet.packet_type == TNS_PACKET_TYPE_MARKER: + raise MarkerDetected() async def wait_for_response_async(self): """ diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index d9cb428f..63692de4 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -406,13 +406,16 @@ cdef class Protocol(BaseProtocol): "socket timed out while recovering from " \ "previous socket timeout") raise + except MarkerDetected: + self._reset() + message.process(self._read_buf) except Exception as e: if not self._in_connect \ and self._write_buf._packet_sent \ and self._read_buf._transport is not None \ and self._read_buf._transport._transport is not None: self._send_marker(self._write_buf, TNS_MARKER_TYPE_BREAK) - self._reset(message) + self._reset() raise if message.flush_out_binds: self._write_buf.start_request(TNS_PACKET_TYPE_DATA) @@ -465,7 +468,7 @@ cdef class Protocol(BaseProtocol): finally: buf._check_request_boundary = orig_check_request_boundary if buf._current_packet.packet_type == TNS_PACKET_TYPE_MARKER: - self._reset(message) + self._reset() elif buf._current_packet.packet_type == TNS_PACKET_TYPE_REFUSE: self._write_buf._packet_sent = False buf.skip_raw_bytes(2) @@ -476,7 +479,7 @@ cdef class Protocol(BaseProtocol): ptr = buf.read_raw_bytes(refuse_message_len) message.error_info.message = ptr[:refuse_message_len].decode() - cdef int _reset(self, Message message) except -1: + cdef int _reset(self) except -1: cdef uint8_t marker_type, packet_type # send reset marker @@ -766,6 +769,9 @@ cdef class BaseAsyncProtocol(BaseProtocol): "socket timed out while recovering from " \ "previous socket timeout") raise + except MarkerDetected: + await self._reset() + message.process(self._read_buf) except: if not self._in_connect \ and self._write_buf._packet_sent \ diff --git a/src/oracledb/impl/thin/utils.pyx b/src/oracledb/impl/thin/utils.pyx index e0984c92..0695d542 100644 --- a/src/oracledb/impl/thin/utils.pyx +++ b/src/oracledb/impl/thin/utils.pyx @@ -31,6 +31,9 @@ class OutOfPackets(Exception): pass +class MarkerDetected(Exception): + pass + class ConnectConstants: def __init__(self): diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index a86647fb..1fff849e 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -978,6 +978,32 @@ def hook3(params): oracledb.unregister_params_hook(hook2) oracledb.unregister_params_hook(hook3) + def test_1158(self): + "1158 - test error in the middle of a database response" + conn = test_env.get_connection() + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] + cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + conn.commit() + cursor.arraysize = 1500 + with self.assertRaisesFullCode("ORA-01476"): + cursor.execute( + """ + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol < 1500 + union all + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol = 1500 + """ + ) + cursor.fetchall() + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5300_connection_async.py b/tests/test_5300_connection_async.py index 32e1a2d9..1233168f 100644 --- a/tests/test_5300_connection_async.py +++ b/tests/test_5300_connection_async.py @@ -718,6 +718,32 @@ async def test_5355(self): self.assertEqual(fetched_edition, edition.upper()) self.assertEqual(conn.edition, edition) + async def test_5356(self): + "5356 - test error in the middle of a database response" + conn = await test_env.get_connection_async() + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] + await cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + await conn.commit() + cursor.arraysize = 1500 + with self.assertRaisesFullCode("ORA-01476"): + await cursor.execute( + """ + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol < 1500 + union all + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol = 1500 + """ + ) + await cursor.fetchall() + if __name__ == "__main__": test_env.run_test_cases() From 2bd2131636ac8412f7bc4b1b859e1c8be3c6bd9d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 16:59:31 -0600 Subject: [PATCH 026/239] Added support for array enqueue and dequeue for AQ in thin mode. --- doc/src/api_manual/aq.rst | 16 +- doc/src/api_manual/async_aq.rst | 13 ++ doc/src/release_notes.rst | 10 +- doc/src/user_guide/aq.rst | 32 +-- samples/bulk_aq.py | 5 +- samples/bulk_aq_async.py | 107 +++++++++ src/oracledb/aq.py | 27 ++- src/oracledb/impl/thin/constants.pxi | 6 + src/oracledb/impl/thin/messages/aq_array.pyx | 202 +++++++++++++++++ src/oracledb/impl/thin/queue.pyx | 76 +++++++ src/oracledb/thin_impl.pyx | 1 + tests/test_2700_aq.py | 16 -- tests/test_2800_bulk_aq.py | 7 +- tests/test_8200_bulk_aq_async.py | 227 +++++++++++++++++++ tests/test_env.py | 3 +- 15 files changed, 701 insertions(+), 47 deletions(-) create mode 100644 samples/bulk_aq_async.py create mode 100644 src/oracledb/impl/thin/messages/aq_array.pyx create mode 100644 tests/test_8200_bulk_aq_async.py diff --git a/doc/src/api_manual/aq.rst b/doc/src/api_manual/aq.rst index e171668f..ba7501d5 100644 --- a/doc/src/api_manual/aq.rst +++ b/doc/src/api_manual/aq.rst @@ -21,7 +21,7 @@ used to enqueue and dequeue messages. Queue Methods ------------- -.. method:: Queue.deqmany(maxMessages) +.. method:: Queue.deqmany(max_num_messages) Dequeues up to the specified number of messages from the queue and returns a list of these messages. Each element of the returned list is a @@ -50,12 +50,14 @@ Queue Methods .. warning:: - Prior to Oracle Database 21c, calling this function in parallel on - different connections acquired from the same pool may fail due to - Oracle bug 29928074. Either ensure that this function is not run in - parallel, use standalone connections or connections from different - pools, or make multiple calls to :meth:`Queue.enqone()` instead. The - function :meth:`Queue.deqmany()` call is not affected. + In python-oracledb Thick mode using Oracle Client libraries prior to + 21c, calling :meth:`Queue.enqmany()` in parallel on different + connections acquired from the same connection pool may fail due to + Oracle bug 29928074. To avoid this, do one of: upgrade the client + libraries, ensure that :meth:`Queue.enqmany()` is not run in parallel, + use standalone connections or connections from different pools, or make + multiple calls to :meth:`Queue.enqone()`. The function + :meth:`Queue.deqmany()` call is not affected. For consistency and compliance with the PEP 8 naming style, the name of the method was changed from `enqMany()`. The old name will continue diff --git a/doc/src/api_manual/async_aq.rst b/doc/src/api_manual/async_aq.rst index bf9c7458..a6df9d6c 100644 --- a/doc/src/api_manual/async_aq.rst +++ b/doc/src/api_manual/async_aq.rst @@ -23,12 +23,25 @@ are used to enqueue and dequeue messages. AsyncQueue Methods ------------------ +.. method:: AsyncQueue.deqmany(max_num_messages) + + Dequeues up to the specified number of messages from the queue and returns + a list of these messages. Each element of the returned list is a + :ref:`message property ` object. + .. method:: AsyncQueue.deqone() Dequeues at most one message from the queue. If a message is dequeued, it will be a :ref:`message property ` object; otherwise, the value *None* will be returned. +.. method:: AsyncQueue.enqmany(messages) + + Enqueues multiple messages into the queue. The ``messages`` parameter must + be a sequence containing :ref:`message property ` objects + which have all had their payload attribute set to a value that the queue + supports. + .. method:: AsyncQueue.enqone(message) Enqueues a single message into the queue. The message must be a diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0d6a8edf..d3c097b4 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,10 +17,12 @@ oracledb 3.1.0 (TBD) Thin Mode Changes +++++++++++++++++ -#) Added :ref:`Oracle Advanced Queuing ` support for single - enqueue and dequeue of JSON payloads. -#) Added Async :ref:`Oracle Advanced Queuing ` support for single - enqueue and dequeue of RAW and Oracle object payload types. +#) Improved support for :ref:`Oracle Advanced Queuing `: + + - added support for JSON payloads + - added support for bulk enqueuing and dequeuing + - added support for using AQ with asyncio + #) Improved error message when the cryptography package cannot be imported (`issue 455 `__). #) Fixed decoding of nested PL/SQL records diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 3491640f..8dfedb27 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -26,9 +26,8 @@ types. - JSON payloads require Oracle Database 21c (or later). In python-oracle Thick mode, Oracle Client libraries 21c (or later) are also needed. -JMS payloads, array message queuing and dequeuing operations, and -:ref:`Recipient Lists ` are only supported in python-oracledb -:ref:`Thick mode `. +JMS payloads and :ref:`Recipient Lists ` are only supported in +python-oracledb :ref:`Thick mode `. There are examples of AQ Classic Queues in the `GitHub samples `__ directory. @@ -343,8 +342,9 @@ message will be dropped from the queue. Bulk Enqueue and Dequeue ======================== -The :meth:`~Queue.enqmany()` and :meth:`~Queue.deqmany()` methods can be used -for efficient bulk message handling. +The :meth:`Queue.enqmany()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.enqmany()`, and :meth:`AsyncQueue.deqmany()` methods can be +used for efficient bulk message handling. The :meth:`~Queue.enqmany()` method is similar to :meth:`~Queue.enqone()` but accepts an array of messages: @@ -362,16 +362,18 @@ accepts an array of messages: .. warning:: - Calling :meth:`~Queue.enqmany()` in parallel on different connections - acquired from the same pool may fail due to Oracle bug 29928074. To avoid - this, ensure that :meth:`~Queue.enqmany()` is not run in parallel, use - standalone connections or connections from different pools, or make - multiple calls to :meth:`~Queue.enqone()` instead. The function - :meth:`~Queue.deqmany()` call is not affected. - -To dequeue multiple messages at one time, use :meth:`~Queue.deqmany()`. This -takes an argument specifying the maximum number of messages to dequeue at one -time: + In python-oracledb Thick mode using Oracle Client libraries prior to 21c, + calling :meth:`Queue.enqmany()` in parallel on different connections + acquired from the same connection pool may fail due to Oracle + bug 29928074. To avoid this, do one of: upgrade the client libraries, + ensure that :meth:`Queue.enqmany()` is not run in parallel, use standalone + connections or connections from different pools, or make multiple calls to + :meth:`Queue.enqone()`. The function :meth:`Queue.deqmany()` call is not + affected. + +To dequeue multiple messages at one time, use :meth:`Queue.deqmany()` or +:meth:`AsyncQueue.deqmany()`. This takes an argument specifying the maximum +number of messages to dequeue at one time: .. code-block:: python diff --git a/samples/bulk_aq.py b/samples/bulk_aq.py index e1b3cc27..8ea983b1 100644 --- a/samples/bulk_aq.py +++ b/samples/bulk_aq.py @@ -53,8 +53,9 @@ "The twelfth and final message", ] -# this script is currently only supported in python-oracledb thick mode -oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) +# determine whether to use python-oracledb thin mode or thick mode +if not sample_env.get_is_thin(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # connect to database connection = oracledb.connect( diff --git a/samples/bulk_aq_async.py b/samples/bulk_aq_async.py new file mode 100644 index 00000000..539a6206 --- /dev/null +++ b/samples/bulk_aq_async.py @@ -0,0 +1,107 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. +# +# Portions Copyright 2001-2007, Computronix (Canada) Ltd., Edmonton, Alberta, +# Canada. All rights reserved. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# bulk_aq_async.py +# +# Demonstrates how to use bulk enqueuing and dequeuing of messages with +# advanced queuing using asyncio. It makes use of a RAW queue created in the +# sample setup. +# ----------------------------------------------------------------------------- + +import asyncio + +import oracledb +import sample_env + +QUEUE_NAME = "DEMO_RAW_QUEUE" +PAYLOAD_DATA = [ + "The first message", + "The second message", + "The third message", + "The fourth message", + "The fifth message", + "The sixth message", + "The seventh message", + "The eighth message", + "The ninth message", + "The tenth message", + "The eleventh message", + "The twelfth and final message", +] + + +async def main(): + + # connect to database + async with oracledb.connect_async( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + ) as connection: + + # create a queue + queue = connection.queue(QUEUE_NAME) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + + # dequeue all existing messages to ensure the queue is empty, just so + # that the results are consistent + while await queue.deqone(): + pass + + # enqueue a few messages + print("Enqueuing messages...") + batch_size = 6 + data_to_enqueue = PAYLOAD_DATA + while data_to_enqueue: + batch_data = data_to_enqueue[:batch_size] + data_to_enqueue = data_to_enqueue[batch_size:] + messages = [ + connection.msgproperties(payload=d) for d in batch_data + ] + for data in batch_data: + print(data) + await queue.enqmany(messages) + await connection.commit() + + # dequeue the messages + print("\nDequeuing messages...") + batch_size = 8 + while True: + messages = await queue.deqmany(batch_size) + if not messages: + break + for props in messages: + print(props.payload.decode()) + await connection.commit() + print("\nDone.") + + +asyncio.run(main()) diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index a9668355..a1238c42 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -180,7 +180,7 @@ def enqmany(self, messages: list) -> None: acquired from the same pool may fail due to Oracle bug 29928074. Ensure that this function is not run in parallel, use standalone connections or connections from different pools, or make multiple calls to - enqOne() instead. The function Queue.deqMany() call is not affected. + enqone() instead. The function Queue.deqmany() call is not affected. """ for message in messages: self._verify_message(message) @@ -211,6 +211,14 @@ def enqOne(self, message: "MessageProperties") -> None: class AsyncQueue(BaseQueue): + async def deqmany(self, max_num_messages: int) -> list: + """ + Dequeues up to the specified number of messages from the queue and + returns a list of these messages. + """ + message_impls = await self._impl.deq_many(max_num_messages) + return [MessageProperties._from_impl(impl) for impl in message_impls] + async def deqone(self) -> Union["MessageProperties", None]: """ Dequeues at most one message from the queue and returns it. If no @@ -220,6 +228,23 @@ async def deqone(self) -> Union["MessageProperties", None]: if message_impl is not None: return MessageProperties._from_impl(message_impl) + async def enqmany(self, messages: list) -> None: + """ + Enqueues multiple messages into the queue. The messages parameter must + be a sequence containing message property objects which have all had + their payload attribute set to a value that the queue supports. + + Warning: calling this function in parallel on different connections + acquired from the same pool may fail due to Oracle bug 29928074. Ensure + that this function is not run in parallel, use standalone connections + or connections from different pools, or make multiple calls to + enqone() instead. The function Queue.deqmany() call is not affected. + """ + for message in messages: + self._verify_message(message) + message_impls = [m._impl for m in messages] + await self._impl.enq_many(message_impls) + async def enqone(self, message: "MessageProperties") -> None: """ Enqueues a single message into the queue. The message must be a message diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index c7f1e00d..d2f10dc4 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -103,6 +103,11 @@ cdef enum: cdef enum: TNS_AQ_MSG_NO_DELAY = 0 TNS_AQ_MSG_NO_EXPIRATION = -1 + TNS_AQ_ARRAY_ENQ = 0x01 + TNS_AQ_ARRAY_DEQ = 0x02 + TNS_AQ_ARRAY_FLAGS_RETURN_MESSAGE_ID = 0x01 + TNS_TTC_ENQ_STREAMING_ENABLED = 0x00000001 + TNS_TTC_ENQ_STREAMING_DISABLED = 0x00000000 # AQ flags cdef enum: @@ -340,6 +345,7 @@ cdef enum: TNS_FUNC_LOB_OP = 96 TNS_FUNC_AQ_ENQ = 121 TNS_FUNC_AQ_DEQ = 122 + TNS_FUNC_ARRAY_AQ = 145 TNS_FUNC_LOGOFF = 9 TNS_FUNC_PING = 147 TNS_FUNC_PIPELINE_BEGIN = 199 diff --git a/src/oracledb/impl/thin/messages/aq_array.pyx b/src/oracledb/impl/thin/messages/aq_array.pyx new file mode 100644 index 00000000..2dc511ad --- /dev/null +++ b/src/oracledb/impl/thin/messages/aq_array.pyx @@ -0,0 +1,202 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# aq_array.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for enqueuing and dequeuing +# an array of AQ messages (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class AqArrayMessage(AqBaseMessage): + cdef: + list props_impls + int operation + uint32_t num_iters + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization + """ + self.function_code = TNS_FUNC_ARRAY_AQ + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters of the AQ array enqueue/dequeue request. + """ + cdef: + uint32_t i, j, num_iters, temp32 + ThinMsgPropsImpl props_impl + uint16_t temp16 + bytes msgid + buf.read_ub4(&num_iters) + for i in range(num_iters): + props_impl = self.props_impls[i] + buf.read_ub2(&temp16) + if temp16 > 0: + buf.skip_ub1() + self._process_msg_props(buf, props_impl) + self._process_recipients(buf) + buf.read_ub2(&temp16) + if temp16 > 0: + props_impl.payload = self._process_payload(buf) + msgid = buf.read_bytes_with_length() + if self.operation == TNS_AQ_ARRAY_ENQ: + for j, props_impl in enumerate(self.props_impls): + props_impl.msgid = msgid[j * 16:(j + 1) * 16] + else: + props_impl.msgid = msgid + buf.read_ub2(&temp16) # extensions len + if temp16 > 0: + errors._raise_err(errors.ERR_NOT_IMPLEMENTED) + buf.skip_ub2() # output ack + if self.operation == TNS_AQ_ARRAY_ENQ: + buf.read_ub4(&self.num_iters) + else: + self.num_iters = num_iters + + cdef int _write_array_deq(self, WriteBuffer buf) except -1: + """ + Writes to the buffer the fields specific to the array dqeueue of AQ + messages. + """ + cdef: + bytes consumer_name_bytes = None + bytes correlation_bytes = None + bytes condition_bytes = None + ThinMsgPropsImpl props_impl + bytes queue_name_bytes + uint16_t delivery_mode + uint32_t flags = 0 + + # setup + queue_name_bytes = self.queue_impl.name.encode() + delivery_mode = self.deq_options_impl.delivery_mode + if delivery_mode == TNS_AQ_MSG_BUFFERED: + flags |= TNS_KPD_AQ_BUFMSG + elif delivery_mode == TNS_AQ_MSG_PERSISTENT_OR_BUFFERED: + flags |= TNS_KPD_AQ_EITHER + if self.deq_options_impl.consumer_name: + consumer_name_bytes = self.deq_options_impl.consumer_name.encode() + if self.deq_options_impl.condition: + condition_bytes = self.deq_options_impl.condition.encode() + if self.deq_options_impl.correlation: + correlation_bytes = self.deq_options_impl.correlation.encode() + + # write message + for props_impl in self.props_impls: + buf.write_ub4(len(queue_name_bytes)) + buf.write_bytes_with_length(queue_name_bytes) + self._write_msg_props(buf, props_impl) + buf.write_ub4(0) # num recipients + self._write_value_with_length(buf, consumer_name_bytes) + buf.write_sb4(self.deq_options_impl.mode) + buf.write_sb4(self.deq_options_impl.navigation) + buf.write_sb4(self.deq_options_impl.visibility) + buf.write_sb4(self.deq_options_impl.wait) + self._write_value_with_length(buf, self.deq_options_impl.msgid) + self._write_value_with_length(buf, correlation_bytes) + self._write_value_with_length(buf, condition_bytes) + buf.write_ub4(0) # extensions + buf.write_ub4(0) # rel msg id + buf.write_sb4(0) # seq deviation + buf.write_ub4(16) # toid length + buf.write_bytes_with_length(self.queue_impl.payload_toid) + buf.write_ub2(TNS_AQ_MESSAGE_VERSION) + buf.write_ub4(0) # payload length + buf.write_ub4(0) # raw pay length + buf.write_ub4(0) + buf.write_ub4(flags) + buf.write_ub4(0) # extensions len + buf.write_ub4(0) # source seq len + + cdef int _write_array_enq(self, WriteBuffer buf) except -1: + """ + Writing input parameters incase of array enqueue + """ + cdef: + ThinMsgPropsImpl props_impl + bytes queue_name_bytes + uint32_t flags = 0 + + # setup + queue_name_bytes = self.queue_impl.name.encode() + if self.enq_options_impl.delivery_mode == TNS_AQ_MSG_BUFFERED: + flags |= TNS_KPD_AQ_BUFMSG + + # write message + buf.write_ub4(0) # rel msgid len + buf.write_uint8(TNS_MSG_TYPE_ROW_HEADER) + buf.write_ub4(len(queue_name_bytes)) + buf.write_bytes_with_length(queue_name_bytes) + buf.write_bytes(self.queue_impl.payload_toid) + buf.write_ub2(TNS_AQ_MESSAGE_VERSION) + buf.write_ub4(flags) + for props_impl in self.props_impls: + buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) + buf.write_ub4(flags) # aqi flags + self._write_msg_props(buf, props_impl) + buf.write_ub4(0) # num recipients + buf.write_sb4(self.enq_options_impl.visibility) + buf.write_ub4(0) # relative msg id + buf.write_sb4(0) # seq deviation + if self.queue_impl.payload_type is None \ + and not self.queue_impl.is_json: + buf.write_ub4(len(props_impl.payload_obj)) + self._write_payload(buf, props_impl) + buf.write_uint8(TNS_MSG_TYPE_STATUS) + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Write message to the network buffers. + """ + self._write_function_code(buf) + if self.operation == TNS_AQ_ARRAY_ENQ: + buf.write_uint8(0) # input params + buf.write_ub4(0) # length + else: + buf.write_uint8(1) + buf.write_ub4(self.num_iters) + buf.write_ub4(TNS_AQ_ARRAY_FLAGS_RETURN_MESSAGE_ID) + if self.operation == TNS_AQ_ARRAY_ENQ: + buf.write_uint8(1) # output params + buf.write_uint8(0) # length + else: + buf.write_uint8(1) + buf.write_uint8(1) + buf.write_sb4(self.operation) + if self.operation == TNS_AQ_ARRAY_ENQ: + buf.write_uint8(1) # num iters (pointer) + else: + buf.write_uint8(0) + if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: + buf.write_ub4(0xffff) # shard id + if self.operation == TNS_AQ_ARRAY_ENQ: + buf.write_ub4(self.num_iters) + if self.operation == TNS_AQ_ARRAY_ENQ: + self._write_array_enq(buf) + else: + self._write_array_deq(buf) diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 41a36d4c..0a24b4fb 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -35,6 +35,34 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): BaseThinConnImpl _conn_impl bytes payload_toid + cdef AqArrayMessage _create_array_deq_message(self, uint32_t num_iters): + """ + Create the message used for dequeuing multiple AQ messages + """ + cdef: + AqArrayMessage message + uint32_t i + message = self._conn_impl._create_message(AqArrayMessage) + message.num_iters = num_iters + message.props_impls = [ThinMsgPropsImpl() for i in range(num_iters)] + message.queue_impl = self + message.deq_options_impl = self.deq_options_impl + message.operation = TNS_AQ_ARRAY_DEQ + return message + + cdef AqArrayMessage _create_array_enq_message(self, list props_impls): + """ + Create the message used for enqueuing multiple AQ messages + """ + cdef AqArrayMessage message + message = self._conn_impl._create_message(AqArrayMessage) + message.queue_impl = self + message.enq_options_impl = self.enq_options_impl + message.props_impls = props_impls + message.operation = TNS_AQ_ARRAY_ENQ + message.num_iters = len(props_impls) + return message + cdef AqDeqMessage _create_deq_message(self): """ Create the message for dequeuing a payload. @@ -81,6 +109,19 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): cdef class ThinQueueImpl(BaseThinQueueImpl): + def deq_many(self, uint32_t max_num_messages): + """ + Internal method for dequeuing multiple messages from a queue. + """ + cdef: + Protocol protocol = self._conn_impl._protocol + AqArrayMessage message + message = self._create_array_deq_message(max_num_messages) + protocol._process_single_message(message) + if message.no_msg_found: + return [] + return message.props_impls[:message.num_iters] + def deq_one(self): """ Internal method for dequeuing a single message from a queue. @@ -93,6 +134,16 @@ cdef class ThinQueueImpl(BaseThinQueueImpl): if not message.no_msg_found: return message.props_impl + def enq_many(self, list props_impls): + """ + Internal method for enqueuing many messages into a queue. + """ + cdef : + Protocol protocol = self._conn_impl._protocol + AqArrayMessage message + message = self._create_array_enq_message(props_impls) + protocol._process_single_message(message) + def enq_one(self, ThinMsgPropsImpl props_impl): """ Internal method for enqueuing a single message into a queue. @@ -106,6 +157,20 @@ cdef class ThinQueueImpl(BaseThinQueueImpl): cdef class AsyncThinQueueImpl(BaseThinQueueImpl): + async def deq_many(self, uint32_t max_num_messages): + """ + Internal method for dequeuing multiple messages from a queue. + """ + cdef: + BaseAsyncProtocol protocol + AqArrayMessage message + protocol = self._conn_impl._protocol + message = self._create_array_deq_message(max_num_messages) + await protocol._process_single_message(message) + if message.no_msg_found: + return [] + return message.props_impls[:message.num_iters] + async def deq_one(self): """ Internal method for dequeuing a single message from a queue. @@ -119,6 +184,17 @@ cdef class AsyncThinQueueImpl(BaseThinQueueImpl): if not message.no_msg_found: return message.props_impl + async def enq_many(self, list props_impls): + """ + Internal method for enqueuing many messages into a queue. + """ + cdef : + BaseAsyncProtocol protocol + AqArrayMessage message + protocol = self._conn_impl._protocol + message = self._create_array_enq_message(props_impls) + await protocol._process_single_message(message) + async def enq_one(self, ThinMsgPropsImpl props_impl): """ Internal method for enqueuing a single message into a queue. diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index 1eedfd1b..bb03a29d 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -209,6 +209,7 @@ include "impl/thin/transport.pyx" include "impl/thin/packet.pyx" include "impl/thin/messages/base.pyx" include "impl/thin/messages/aq_base.pyx" +include "impl/thin/messages/aq_array.pyx" include "impl/thin/messages/aq_deq.pyx" include "impl/thin/messages/aq_enq.pyx" include "impl/thin/messages/auth.pyx" diff --git a/tests/test_2700_aq.py b/tests/test_2700_aq.py index e8139fa9..28f792b5 100644 --- a/tests/test_2700_aq.py +++ b/tests/test_2700_aq.py @@ -628,10 +628,6 @@ def test_2728(self): with self.assertRaises(AttributeError): queue.deqoptions.deliverymode - @unittest.skipIf( - test_env.get_is_thin(), - "Thin mode doesn't support enqmany and deqmany yet", - ) def test_2729(self): "2729 - test correlation deqoption" queue = self.get_and_clear_queue( @@ -659,10 +655,6 @@ def test_2729(self): correlated_messages = queue.deqmany(num_messages + 1) self.assertEqual(len(correlated_messages), num_messages) - @unittest.skipIf( - test_env.get_is_thin(), - "Thin mode doesn't support enqmany and deqmany yet", - ) def test_2730(self): "2730 - test correlation deqoption with pattern-matching characters" queue = self.get_and_clear_queue( @@ -680,10 +672,6 @@ def test_2730(self): messages = queue.deqmany(5) self.assertEqual(len(messages), 2) - @unittest.skipIf( - test_env.get_is_thin(), - "Thin mode doesn't support enqmany and deqmany yet", - ) def test_2731(self): "2731 - test condition deqoption with priority" queue = self.get_and_clear_queue( @@ -711,10 +699,6 @@ def test_2731(self): data = book.TITLE, book.AUTHORS, book.PRICE self.assertEqual(data, self.book_data[ix]) - @unittest.skipIf( - test_env.get_is_thin(), - "Thin mode doesn't support enqmany and deqmany yet", - ) def test_2732(self): "2732 - test mode deqoption with DEQ_REMOVE_NODATA" queue = self.get_and_clear_queue( diff --git a/tests/test_2800_bulk_aq.py b/tests/test_2800_bulk_aq.py index 45cf5f32..5eef9e90 100644 --- a/tests/test_2800_bulk_aq.py +++ b/tests/test_2800_bulk_aq.py @@ -63,7 +63,6 @@ ] -@unittest.skipIf(test_env.get_is_thin(), "thin mode doesn't support AQ yet") class TestCase(test_env.BaseTestCase): def __deq_in_thread(self, results): with test_env.get_connection() as conn: @@ -98,6 +97,9 @@ def test_2801(self): self.conn.commit() self.assertEqual(messages, []) + @unittest.skipIf( + test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" + ) def test_2802(self): "2802 - test bulk dequeue with wait" queue = self.get_and_clear_queue(RAW_QUEUE_NAME) @@ -131,6 +133,9 @@ def test_2803(self): self.conn.commit() self.assertEqual(all_data, RAW_PAYLOAD_DATA) + @unittest.skipIf( + test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" + ) def test_2804(self): "2804 - test visibility option for enqueue and dequeue" queue = self.get_and_clear_queue(RAW_QUEUE_NAME) diff --git a/tests/test_8200_bulk_aq_async.py b/tests/test_8200_bulk_aq_async.py new file mode 100644 index 00000000..3b47ff5f --- /dev/null +++ b/tests/test_8200_bulk_aq_async.py @@ -0,0 +1,227 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +8200 - Module for testing AQ Bulk enqueue/dequeue with asyncio +""" + +import datetime +import threading +import unittest + +import oracledb +import test_env + +RAW_QUEUE_NAME = "TEST_RAW_QUEUE" +JSON_QUEUE_NAME = "TEST_JSON_QUEUE" +RAW_PAYLOAD_DATA = [ + "The first message", + "The second message", + "The third message", + "The fourth message", + "The fifth message", + "The sixth message", + "The seventh message", + "The eighth message", + "The ninth message", + "The tenth message", + "The eleventh message", + "The twelfth and final message", +] + +JSON_DATA_PAYLOAD = [ + [ + 2.75, + True, + "Ocean Beach", + b"Some bytes", + {"keyA": 1.0, "KeyB": "Melbourne"}, + datetime.datetime(2022, 8, 1, 0, 0), + ], + dict(name="John", age=30, city="New York"), +] + + +@unittest.skipUnless( + test_env.get_is_thin(), "asyncio not supported in thick mode" +) +class TestCase(test_env.BaseAsyncTestCase): + async def __deq_in_thread(self, results): + async with test_env.get_connection_async() as conn: + queue = conn.queue(RAW_QUEUE_NAME) + queue.deqoptions.wait = 10 + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + while len(results) < len(RAW_PAYLOAD_DATA): + messages = await queue.deqmany(5) + if not messages: + break + for message in messages: + results.append(message.payload.decode()) + await conn.commit() + + async def test_8200(self): + "8200 - test bulk enqueue and dequeue" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + messages = [ + self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + ] + await queue.enqmany(messages) + messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) + data = [message.payload.decode() for message in messages] + await self.conn.commit() + self.assertEqual(data, RAW_PAYLOAD_DATA) + + async def test_8201(self): + "8201 - test empty bulk dequeue" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = await queue.deqmany(5) + await self.conn.commit() + self.assertEqual(messages, []) + + @unittest.skipIf( + test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" + ) + async def test_8202(self): + "8202 - test bulk dequeue with wait" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + results = [] + thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) + thread.start() + messages = [ + self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + ] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + await queue.enqmany(messages) + thread.join() + self.assertEqual(results, RAW_PAYLOAD_DATA) + + async def test_8203(self): + "8203 - test enqueue and dequeue multiple times" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + data_to_enqueue = RAW_PAYLOAD_DATA + for num in (2, 6, 4): + messages = [ + self.conn.msgproperties(payload=data) + for data in data_to_enqueue[:num] + ] + data_to_enqueue = data_to_enqueue[num:] + await queue.enqmany(messages) + await self.conn.commit() + all_data = [] + for num in (3, 5, 10): + messages = await queue.deqmany(num) + all_data.extend(message.payload.decode() for message in messages) + await self.conn.commit() + self.assertEqual(all_data, RAW_PAYLOAD_DATA) + + @unittest.skipIf( + test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" + ) + async def test_8204(self): + "8204 - test visibility option for enqueue and dequeue" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + + # first test with ENQ_ON_COMMIT (commit required) + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props1 = self.conn.msgproperties(payload="A first message") + props2 = self.conn.msgproperties(payload="A second message") + await queue.enqmany([props1, props2]) + async with test_env.get_connection_async() as other_conn: + other_queue = other_conn.queue(RAW_QUEUE_NAME) + other_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + other_queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + messages = await other_queue.deqmany(5) + self.assertEqual(len(messages), 0) + await self.conn.commit() + messages = await other_queue.deqmany(5) + self.assertEqual(len(messages), 2) + await other_conn.rollback() + + # second test with ENQ_IMMEDIATE (no commit required) + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + other_queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.enqmany([props1, props2]) + messages = await other_queue.deqmany(5) + self.assertEqual(len(messages), 4) + await other_conn.rollback() + messages = await other_queue.deqmany(5) + self.assertEqual(len(messages), 0) + + async def test_8205(self): + "8205 - test error for messages with no payload" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + messages = [self.conn.msgproperties() for _ in RAW_PAYLOAD_DATA] + with self.assertRaisesFullCode("DPY-2000"): + await queue.enqmany(messages) + + async def test_8206(self): + "8206 - verify that the msgid property is returned correctly" + queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) + messages = [ + self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + ] + await queue.enqmany(messages) + await self.cursor.execute("select msgid from raw_queue_tab") + actual_msgids = set(m for m, in await self.cursor.fetchall()) + msgids = set(message.msgid for message in messages) + self.assertEqual(msgids, actual_msgids) + messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) + msgids = set(message.msgid for message in messages) + self.assertEqual(msgids, actual_msgids) + + async def test_8207(self): + "4800 - test enqueuing and dequeuing JSON message" + queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") + props = [ + self.conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD + ] + await queue.enqmany(props) + await self.conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = await queue.deqmany(5) + actual_data = [message.payload for message in messages] + self.assertEqual(actual_data, JSON_DATA_PAYLOAD) + + async def test_8208(self): + "8208 - test enqueuing to a JSON queue without a JSON payload" + queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") + props = self.conn.msgproperties(payload="string message") + with self.assertRaisesFullCode("DPY-2062"): + await queue.enqmany([props, props]) + + async def test_8209(self): + "8209 - test errors for invalid values for enqmany and deqmany" + queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") + props = self.conn.msgproperties(payload="string message") + with self.assertRaises(TypeError): + await queue.enqmany(props) + with self.assertRaises(TypeError): + await queue.enqmany(["Not", "msgproperties"]) + with self.assertRaises(TypeError): + await queue.deqmany("5") + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_env.py b/tests/test_env.py index 1e8bcd04..f89a1786 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -740,7 +740,8 @@ async def get_and_clear_queue( message="not supported with this client/server combination", ): if payload_type == "JSON": - self.skipTest(message) + if get_server_version() < (21, 0): + self.skipTest(message) elif isinstance(payload_type, str): payload_type = await self.conn.gettype(payload_type) queue = self.conn.queue(queue_name, payload_type) From 4e9246eae40414b5822bfcad09e3bb15fa1f4f6f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 17 Mar 2025 17:11:00 -0600 Subject: [PATCH 027/239] Documentation updates. --- README.md | 6 +- doc/src/api_manual/connect_params.rst | 2 +- doc/src/api_manual/connection.rst | 14 ++- doc/src/api_manual/connection_pool.rst | 4 + doc/src/api_manual/module.rst | 66 +++++++------ doc/src/release_notes.rst | 16 +-- doc/src/user_guide/appendix_a.rst | 2 +- doc/src/user_guide/connection_handling.rst | 109 ++++++++++----------- doc/src/user_guide/sql_execution.rst | 64 +++++++----- 9 files changed, 155 insertions(+), 128 deletions(-) diff --git a/README.md b/README.md index b2b48dd7..096390d9 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,12 @@ See [python-oracledb Installation][installation]. - Python versions 3.8 through 3.13. - Prebuilt packages are available for these Python versions on Windows, on - macOS and on Linux. + Pre-built packages are available on [PyPI][pypi] and other repositories. Source code is also available. + Previous versions of python-oracledb supported older Python versions. + - Oracle Client libraries are *optional*. **Thin mode**: By default python-oracledb runs in a 'Thin' mode which @@ -98,3 +99,4 @@ See [LICENSE][license], [THIRD_PARTY_LICENSES][tplicense], and [installation]: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html [features]: https://oracle.github.io/python-oracledb/#features [concurrent]: https://python-oracledb.readthedocs.io/en/latest/user_guide/asyncio.html +[pypi]: https://pypi.org/project/oracledb diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index 6e51f1a8..c89892de 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -410,7 +410,7 @@ ConnectParams Attributes throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and the database network SDU configuration value. - See the `SQL*Net documentation + See the `Database Net Services documentation `__ for more details. diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 803b1f4d..a5597de5 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -823,14 +823,18 @@ Connection Attributes This read-only attribute returns the logical transaction id for the connection. It is used within Oracle Transaction Guard as a means of - ensuring that transactions are not duplicated. See the Oracle documentation - and the provided sample for more information. + ensuring that transactions are not duplicated. See :ref:`tg` for more + information. - .. note: + .. versionchanged:: 3.0.0 + + This attribute was added to python-oracledb Thin mode. + + .. note:: This attribute is an extension to the DB API definition. It is only - available with Oracle Database 12.1 or higher. In python-oracledb Thick - mode, it also requires Oracle Client libraries 12.1 or higer. + available with Oracle Database 12.1 or later. In python-oracledb Thick + mode, it also requires Oracle Client libraries 12.1 or later. .. attribute:: Connection.max_identifier_length diff --git a/doc/src/api_manual/connection_pool.rst b/doc/src/api_manual/connection_pool.rst index dc8c39d4..51662ff8 100644 --- a/doc/src/api_manual/connection_pool.rst +++ b/doc/src/api_manual/connection_pool.rst @@ -225,6 +225,10 @@ ConnectionPool Attributes or later must be used and, prior to Oracle Client 21, cleanup only occurs when the pool is accessed. + .. versionchanged:: 3.0.0 + + This attribute was added to python-oracledb Thin mode. + .. attribute:: ConnectionPool.max_sessions_per_shard This read-write attribute returns the number of sessions that can be diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 042f7e54..27107c5b 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -325,9 +325,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. This value is used in both the python-oracledb Thin and Thick modes. The default value is *8192* bytes. @@ -370,8 +370,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the @@ -685,9 +686,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. The default value is *8192* bytes. @@ -729,8 +730,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the @@ -1043,9 +1045,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. This value is used in both the python-oracledb Thin and Thick modes. The default value is *8192* bytes. @@ -1088,8 +1090,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the @@ -1546,9 +1549,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. This value is used in both the python-oracledb Thin and Thick modes. The default value is *8192* bytes. @@ -1591,8 +1594,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the @@ -1969,9 +1973,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. The default value is *8192* bytes. @@ -2013,8 +2017,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the @@ -2593,9 +2598,9 @@ Oracledb Methods requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is - negotiated down to the lower of this value and the database network SDU - configuration value. See the `SQL*Net documentation `__ for more details. This value is used in both the python-oracledb Thin and Thick modes. The default value is *8192* bytes. @@ -2638,8 +2643,9 @@ Oracledb Methods whether to use the TLS Server Name Indication (SNI) extension to bypass the second TLS negotiation that would otherwise be required. This parameter is used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `SQL*Net - documentation `__ for more details. The ``program`` parameter is expected to be a string which specifies the diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index d3c097b4..ac746ae1 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -64,8 +64,10 @@ Common Changes :attr:`defaults.fetch_decimals` is set to *True*. #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. -#) An error message that links to documention on setting up a protocol hook - function is now returned for "ldap" and "ldaps" connection strings. +#) An error message that links to :ref:`documentation ` on + setting up a protocol hook function is now returned for LDAP and LDAPS URL + connection strings in python-oracledb thin mode, or when + :attr:`defaults.thick_mode_dsn_passthrough` is *False*. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, @@ -2743,9 +2745,9 @@ cx_Oracle 5.3 (March 2017) versions of the gcc compiler for Cygwin. #) Simplified test suite by combining Python 2 and 3 scripts into one script and separated out 12.1 features into a single script. -#) Updated samples to use code that works on both Python 2 and 3 +#) Updated samples to use code that works on both Python 2 and 3. #) Added support for pickling/unpickling error objects - (`Issue #23 `__) + (Bitbucket Issue #23). #) Dropped support for callbacks on OCI functions. #) Removed deprecated types UNICODE, FIXED_UNICODE and LONG_UNICODE (use NCHAR, FIXED_NCHAR and LONG_NCHAR instead). @@ -2821,10 +2823,10 @@ cx_Oracle 5.2 (June 2015) #) Removed remaining remnants of support Oracle 9i. #) Added __version__ attribute to conform with PEP 396. #) Ensure that sessions are released to the pool when calling - connection.close() - (`Issue #2 `__) + connection.close(). + (Bitbucket Issue #2). #) Fixed handling of datetime intervals - (`Issue #7 `__) + (Bitbucket Issue #7). cx_Oracle 5.1.3 (May 2014) diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 26209b22..91621696 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -252,7 +252,7 @@ see :ref:`driverdiff` and :ref:`compatibility`. - Yes - Yes * - Oracle Transactional Event Queues and Advanced Queuing (AQ) (see :ref:`aqusermanual`) - - Yes - only "Classic" queue is supported (RAW, named Oracle object, and JSON payloads) + - Yes - only "Classic" queues are supported (RAW, named Oracle object, and JSON payloads) - Yes - Yes * - Call timeouts (see :attr:`Connection.call_timeout`) diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 74308c2c..ef2dc278 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -488,7 +488,7 @@ Centralized Configuration Provider URL Connection Strings A :ref:`Centralized Configuration Provider ` URL connection string allows python-oracledb configuration information to be stored -centrally in OCI Object Storage, using Azure App Configuration, or in a local +centrally in OCI Object Storage, in Azure App Configuration, or in a local file. Given a provider URL, python-oracledb will access the information stored in the configuration provider and use it to connect to Oracle Database. @@ -600,7 +600,8 @@ Oracle Net Connect Descriptor and Easy Connect Keywords Easy Connect syntax is described in :ref:`easyconnect`. Connect Descriptor keywords are shown in the `Database Net Services Reference -`__. +`__. **Notes on specific keywords** @@ -702,10 +703,9 @@ be in the JSON file under the key "pyo". An example is: With :ref:`Azure App Configuration `, values are set using a key such as "/pyo/". This is similar to how `Oracle -Call Interface -`__ settings use -the key "/oci/" as shown in `Oracle Net Service -Administrator’s Guide `__ settings use the key "/oci/" as shown in +`Oracle Net Service Administrator’s Guide `__. .. _params_ez_config_provider: @@ -993,10 +993,10 @@ user name, password, a cache time, and :ref:`python-oracledb settings - The database :ref:`connection string `. - Required * - ``config_time_to_live`` - - How many seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). + - The number of seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). - Optional * - ``config_time_to_live_grace_period`` - - How many seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). + - The number of seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). - Optional * - ``pyo`` - See :ref:`pyoparams`. @@ -1156,10 +1156,10 @@ keys that can be in the JSON file are listed below. - The database :ref:`connection string `. - Required * - ``config_time_to_live`` - - How many seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). + - The number of seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). - Optional * - ``config_time_to_live_grace_period`` - - How many seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). + - The number of seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). - Optional * - ``pyo`` - See :ref:`pyoparams`. @@ -1184,7 +1184,7 @@ The elements of the connection string are detailed in the table below. :class: wy-table-responsive :widths: 15 25 15 :name: _connection_string_for_oci_object_storage - :summary: The first row displays the name of the connection string parameter. The second row displays whether the connection string parameter is required or optional. The third row displays the description of the connection string parameter. + :summary: The first row displays the name of the connection string parameter. The second row displays the description of the connection string parameter. The third row displays whether the connection string parameter is required or optional. * - Parameter - Description @@ -1242,11 +1242,12 @@ syntax is:: } Passwords can optionally be stored using the Azure Key Vault. To do this, -you must ``import oracledb.plugins.azure_config_provider`` in your application and you must -define the Azure Key Vault credentials in the ``password`` key. -In this, the ``azure_client_id`` and ``azure_tenant_id`` must be specified. -Also, either ``azure_client_secret`` or ``azure_client_certificate_path`` -should be specified. For example:: +you must import the :ref:`oracledb.plugins.azure_config_provider +` python-oracledb plugin in your application and you must +define the Azure Key Vault credentials in the ``password`` key. In this, the +``azure_client_id`` and ``azure_tenant_id`` must be specified. Also, either +``azure_client_secret`` or ``azure_client_certificate_path`` should be +specified. For example:: "password": { "type": "azurevault", @@ -1385,10 +1386,10 @@ The keys that can be added in Azure App Configuration are listed below: - The database :ref:`connection string `. - Required * - ``config_time_to_live`` - - How many seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). + - The number of seconds the configuration is cached for. Defaults to 86,400 seconds (24 hours). - Optional * - ``config_time_to_live_grace_period`` - - How many seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). + - The number of seconds an expired configuration can still be used if a new configuration cannot be obtained. Defaults to 1,800 seconds (30 minutes). - Optional * - ``pyo`` - See :ref:`pyoparams`. @@ -1413,7 +1414,7 @@ The elements of the connection string are detailed in the table below. :align: center :widths: 15 25 15 :name: _connection_string_for_azure_app - :summary: The first row displays the name of the connection string parameter. The second row displays whether the connection string parameter is required or optional. The third row displays the description of the connection string parameter. + :summary: The first row displays the name of the connection string parameter. The second row displays the description of the connection string parameter. The third row displays whether the connection string parameter is required or optional. * - Parameter - Description @@ -3684,14 +3685,15 @@ Connecting Using OAuth 2.0 Token-Based Authentication ----------------------------------------------------- Oracle Cloud Infrastructure (OCI) users can be centrally managed in a Microsoft -Azure Active Directory (Azure AD) service. Open Authorization (OAuth 2.0) -token-based authentication allows users to authenticate to Oracle Database -using Azure AD OAuth2 tokens. Ensure that you have a Microsoft Azure account -and your Oracle Database is registered with Azure AD. See `Configuring the -Oracle Autonomous Database for Microsoft Azure AD Integration `_ for more information. Both Thin and Thick modes of the -python-oracledb driver support OAuth 2.0 token-based authentication. +Entra ID (formerly Microsoft Azure Active Directory) service. Open +Authorization (OAuth 2.0) token-based authentication allows users to +authenticate to Oracle Database using Entra ID OAuth2 tokens. Ensure that you +have a Microsoft Azure account and your Oracle Database is registered with +Microsoft Entra ID. See `Configuring the Oracle Database for Microsoft Entra +ID Integration `_ for more information. Both Thin +and Thick modes of the python-oracledb driver support OAuth 2.0 token-based +authentication. When using python-oracledb in Thick mode, Oracle Client libraries 19.15 (or later), or 21.7 (or later) are needed. @@ -3708,20 +3710,20 @@ Token-Based Authentication Connection Strings `. OAuth2 Token Generation And Extraction ++++++++++++++++++++++++++++++++++++++ -There are different ways to retrieve Azure AD OAuth2 tokens. You can use +There are different ways to retrieve Entra ID OAuth2 tokens. You can use python-oracledb's :ref:`azure_tokens ` plugin to generate tokens. Some of the other ways to retrieve OAuth2 tokens are detailed in -`Examples of Retrieving Azure AD OAuth2 Tokens `_. You can -also retrieve Azure AD OAuth2 tokens by using `Azure Identity client library -for Python `_. You +can also retrieve Entra ID OAuth2 tokens by using `Azure Identity client +library for Python `_. .. _oauthhandler: **Example of Generating an OAuth2 Token** -An example of automating the process of generating and reading Azure AD OAuth2 +An example of automating the process of generating and reading Entra ID OAuth2 tokens is: .. code:: python @@ -3790,7 +3792,7 @@ Connection Creation with OAuth2 Access Tokens For OAuth 2.0 Token-Based Authentication using a class such as the sample :ref:`TokenHandlerOAuth class `, the ``access_token`` connection parameter must be specified. This parameter should be a string (or a callable -that returns a string) specifying an Azure AD OAuth2 token. In the examples +that returns a string) specifying an Entra ID OAuth2 token. In the examples used below, the ``access_token`` parameter is set to a callable. The examples used in the subsequent sections use the @@ -3837,7 +3839,7 @@ you need to explicitly set the ``access_token``, ``homogeneous``, Note that the ``access_token`` parameter should be set to a callable. This is useful when the connection pool needs to expand and create new connections but the current token has expired. In such a case, the callable should return a -string specifying the new, valid Azure AD OAuth2 token. +string specifying the new, valid Entra ID OAuth2 token. **Standalone Connections Thick Mode Using OAuth2 Tokens** @@ -3871,7 +3873,7 @@ you need to explicitly set the ``access_token``, ``externalauth``, and Note that the ``access_token`` parameter should be set to a callable. This is useful when the connection pool needs to expand and create new connections but the current token has expired. In such a case, the callable should return a -string specifying the new, valid Azure AD OAuth2 token. +string specifying the new, valid Entra ID OAuth2 token. .. _oauth2connstr: @@ -3889,10 +3891,10 @@ introduced in python-oracledb 1.1 instead. See OAuth 2.0 Token-Based Authentication Connection Strings is only supported in the python-oracledb Thick mode. See :ref:`enablingthick`. -There are different ways to retrieve Azure AD OAuth2 tokens. Some of the ways to -retrieve OAuth2 tokens are detailed in `Examples of Retrieving Azure AD OAuth2 -Tokens `_. You can also retrieve Azure AD OAuth2 +There are different ways to retrieve Entra ID OAuth2 tokens. Some of the ways to +retrieve OAuth2 tokens are detailed in `Examples of Retrieving Entra ID OAuth2 +Tokens `_. You can also retrieve Entra ID OAuth2 tokens by using `Azure Identity client library for Python `_. @@ -3903,7 +3905,7 @@ azure-python>`_. Here, as an example, we are using Curl with a Resource Owner Password Credential (ROPC) Flow, that is, a ``curl`` command is used against -the Azure AD API to get the Azure AD OAuth2 token:: +the Entra ID API to get the Entra ID OAuth2 token:: curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' https://login.microsoftonline.com/your_tenant_id/oauth2/v2.0/token @@ -4266,8 +4268,8 @@ to Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. **Standalone Connections in Thin Mode Using OCI IAM Tokens** When using a class such as the :ref:`TokenHandlerIAM class ` to -generate OCI IAM tokens to connect to Oracle Cloud Database in Thin mode, you -need to explicitly set the ``access_token``, ``config_dir``, +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token``, ``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of :func:`~oracledb.connect`. For example: @@ -4283,9 +4285,9 @@ need to explicitly set the ``access_token``, ``config_dir``, **Connection Pools in Thin Mode Using OCI IAM Tokens** When using a class such as :ref:`TokenHandlerIAM class ` to -generate OCI IAM tokens to connect to Oracle Cloud Database in Thin mode, you -need to explicitly set the ``access_token``, ``homogeneous``, ``config_dir``, -``wallet_location``, and ``wallet_password`` parameters of +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token``, ``homogeneous``, +``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of :func:`~oracledb.create_pool`. For example: .. code:: python @@ -4321,8 +4323,8 @@ of :func:`~oracledb.connect`. For example: **Connection Pools in Thick Mode Using OCI IAM Tokens** When using a class such as :ref:`TokenHandlerIAM class ` to -generate OCI IAM tokens to connect to Oracle Cloud Database in Thick mode, you -need to explicitly set the ``access_token``, ``externalauth``, and +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``access_token``, ``externalauth``, and ``homogeneous`` parameters of :func:`oracledb.create_pool`. For example: .. code:: python @@ -4438,7 +4440,7 @@ The plugin has a Python package dependency which needs to be installed separately before the plugin can be used, see :ref:`ocitokenmodules`. The ``oci_tokens`` plugin defines and registers a :ref:`parameter hook -` function which uses the connetion parameter +` function which uses the connection parameter ``extra_auth_params`` passed to :meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, or :meth:`oracledb.create_pool_async()`. Using this parameter's values, the hook @@ -4447,11 +4449,6 @@ function sets the ``access_token`` parameter of a :ref:`ConnectParams object then acquires and uses a token to transparently complete connection or pool creation calls. -The ``extra_auth_params`` connection parameter should be a dictionary -containing the configuration parameters necessary to retrieve a token for -Oracle Database authentication. The OCI specific configuration parameters that -can be specified in ``extra_auth_params`` are: - For OCI Cloud Native Authentication connection and pool creation, the ``extra_auth_params`` parameter should be a dictionary with keys as shown in the following table. @@ -4551,8 +4548,8 @@ and ``extra_auth_params`` parameters of :func:`~oracledb.connect`. For example: **Connection Pools in Thin Mode Using OCI IAM Tokens** When using the :ref:`oci_tokens ` plugin to generate -OCI IAM tokens to connect to Oracle Cloud Database in Thin mode, you need to -explicitly set the ``config_dir``, ``homogeneous``, ``wallet_location``, +OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, you need +to explicitly set the ``config_dir``, ``homogeneous``, ``wallet_location``, ``wallet_password``, and ``extra_auth_params`` parameters of :func:`~oracledb.create_pool`. For example: diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 2b4cf2c6..ca58b608 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -824,13 +824,14 @@ Oracle Database will result in an exception. :header-rows: 1 :class: wy-table-responsive :widths: 1 1 + :width: 100% :align: left :summary: The first column is the Oracle Database type. The second column is the Arrow data type used in the OracleDataFrame object. * - Oracle Database Type - Arrow Data Type * - DB_TYPE_NUMBER - - DECIMAL128, INT64, or DOUBLE. See notes below + - DECIMAL128, INT64, or DOUBLE * - DB_TYPE_CHAR - STRING * - DB_TYPE_VARCHAR @@ -850,33 +851,44 @@ Oracle Database will result in an exception. * - DB_TYPE_TIMESTAMP_TZ - TIMESTAMP -When converting Oracle Database NUMBERs, if :attr:`defaults.fetch_decimals` is -*True*, the Arrow data type is DECIMAL128. Note Arrow's DECIMAL128 format only -supports precision of up to 38 decimal digits. Else, if the Oracle number data -type has scale of 0, and precision less than or equal to 18, then the Arrow -data type is INT64. In all other cases, the Arrow data type is DOUBLE. +When converting Oracle Database NUMBERs: -The Arrow TIMESTAMP for Oracle Database DATEs will have a time unit of -"seconds". For Oracle Database TIMESTAMP types, the time unit depends on the -Oracle type's fractional precision: +- If the column has been created without a precision and scale, then the Arrow + data type will be DOUBLE. -.. list-table-with-summary:: - :header-rows: 1 - :class: wy-table-responsive - :widths: 1 1 - :align: left - :summary: The first column is the Oracle Database TIMESTAMP-type fractional second precision. The second column is the resulting Arrow TIMESTAMP time unit. - - * - Oracle Database TIMESTAMP fractional second precision range - - Arrow TIMESTAMP time unit - * - 0 - - seconds - * - 1 - 3 - - milliseconds - * - 4 - 6 - - microconds - * - 7 - 9 - - nanoseconds +- If :attr:`defaults.fetch_decimals` is set to *True*, then the Arrow data + type is DECIMAL128. + +- If the column has been created with a scale of *0*, and a precision value + that is less than or equal to *18*, then the Arrow data type is INT64. + +- In all other cases, the Arrow data type is DOUBLE. + +When converting Oracle Database DATEs and TIMESTAMPs: + +- For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of + "seconds". + +- For Oracle Database TIMESTAMP types, the Arrow TIMESTAMP time unit depends on + the Oracle type's fractional precision as shown in the table below: + + .. list-table-with-summary:: + :header-rows: 1 + :class: wy-table-responsive + :widths: 1 1 + :align: left + :summary: The first column is the Oracle Database TIMESTAMP-type fractional second precision. The second column is the resulting Arrow TIMESTAMP time unit. + + * - Oracle Database TIMESTAMP fractional second precision range + - Arrow TIMESTAMP time unit + * - 0 + - seconds + * - 1 - 3 + - milliseconds + * - 4 - 6 + - microconds + * - 7 - 9 + - nanoseconds Arrow TIMESTAMPs will not have timezone data. From 2aa735eba1cfe1ecd713f182d9b16b23e9f1e25d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 19 Mar 2025 10:38:27 -0600 Subject: [PATCH 028/239] Indicate that behavior can be changed. --- doc/src/release_notes.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ac746ae1..a43297a7 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -65,8 +65,8 @@ Common Changes #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. #) An error message that links to :ref:`documentation ` on - setting up a protocol hook function is now returned for LDAP and LDAPS URL - connection strings in python-oracledb thin mode, or when + setting up a protocol hook function is now returned by default for LDAP and + LDAPS URL connection strings in python-oracledb thin mode, or when :attr:`defaults.thick_mode_dsn_passthrough` is *False*. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a From b65e0098a115f097985ff2fc0f6677a66b2759c1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 19 Mar 2025 10:38:44 -0600 Subject: [PATCH 029/239] Eliminated small memory leak with production of all data frames. --- doc/src/release_notes.rst | 1 + src/oracledb/interchange/nanoarrow_bridge.pyx | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index a43297a7..e1ab02ff 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -57,6 +57,7 @@ Common Changes - avoid memory allocation/free cycles for decimal data - eliminated memory leak if OracleDataFrame is not converted to an external data frame + - eliminated small memory leak with production of all data frames #) Fixed bug when NUMBER data is fetched with :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()` diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 4fd9ff56..24272843 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -336,11 +336,10 @@ cdef class OracleArrowArray: cdef: int64_t n_buffers = self.arrow_array.n_buffers ArrowBufferView *buffer - ArrowArrayView *view - view = cpython.PyMem_Malloc(sizeof(ArrowArrayView)) - _check_nanoarrow(ArrowArrayViewInitFromSchema(view, self.arrow_schema, + ArrowArrayView view + _check_nanoarrow(ArrowArrayViewInitFromSchema(&view, self.arrow_schema, NULL)) - _check_nanoarrow(ArrowArrayViewSetArray(view, self.arrow_array, NULL)) + _check_nanoarrow(ArrowArrayViewSetArray(&view, self.arrow_array, NULL)) # initialize all buffers to None to begin with buffers = { From e5b3a79d62ccbea5d8759b9f0746acb7ce637d78 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 19 Mar 2025 10:39:06 -0600 Subject: [PATCH 030/239] Removed dead code. --- src/oracledb/impl/base/converters.pyx | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 9e9416f8..341469c0 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -141,21 +141,6 @@ cdef int convert_number_to_arrow_int64(OracleArrowArray arrow_array, arrow_array.append_int64(atoi(value.chars[:value.num_chars])) -cdef int convert_number_to_arrow_string(OracleArrowArray arrow_array, - OracleDataBuffer *buffer) except -1: - """ - Converts a NUMBER value stored in the buffer to Arrow string. - """ - cdef: - OracleNumber *value = &buffer.as_number - char* ptr - if value.is_max_negative_value: - ptr = "-1e126" - arrow_array.append_bytes(ptr, 6) - else: - arrow_array.append_bytes(value.chars, value.num_chars) - - cdef object convert_number_to_python_decimal(OracleDataBuffer *buffer): """ Converts a NUMBER value stored in the buffer to Python decimal.Decimal(). From 9c3f8dd72d78ad1cf00e3b2b710e32e729c02986 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 19 Mar 2025 10:39:30 -0600 Subject: [PATCH 031/239] Improved error message when attempting to work with sparse vectors using Oracle Client libraries 23.6 or earlier. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thick/odpi | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e1ab02ff..ffff4fb2 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -44,6 +44,9 @@ Thick Mode Changes #) Improved error message when getting :attr:`Connection.max_open_cursors` when using Oracle Client libraries 11.2 (`ODPI-C `__ dependency update). +#) Improved error message when attempting to work with sparse vectors using + Oracle Client libraries 23.6 or earlier + (`ODPI-C `__ dependency update). Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 3a578197..a9251089 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 3a578197cae567028bfe9d39e7e05bfc5869c650 +Subproject commit a925108924c3d048e77ca232134b283be5ec197d From a40023882845101fa6f95b858eb265ce3df31b2f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:23:13 -0600 Subject: [PATCH 032/239] Update to released version of ODPI-C. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index a9251089..18fa0ef0 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit a925108924c3d048e77ca232134b283be5ec197d +Subproject commit 18fa0ef0815b0256a8f5a1540ed4b2efc535298d From 802468ca970933aaabca9d06467291e61fe74639 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:24:39 -0600 Subject: [PATCH 033/239] Rename constants and variables for increased accuracy in their use. --- src/oracledb/impl/thin/constants.pxi | 7 +++++-- src/oracledb/impl/thin/messages/execute.pyx | 18 +++++++++--------- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index d2f10dc4..f68b5e80 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -230,13 +230,16 @@ cdef enum: TNS_EXEC_OPTION_COMMIT = 0x100 TNS_EXEC_OPTION_COMMIT_REEXECUTE = 0x1 TNS_EXEC_OPTION_PLSQL_BIND = 0x400 - TNS_EXEC_OPTION_DML_ROWCOUNTS = 0x4000 TNS_EXEC_OPTION_NOT_PLSQL = 0x8000 - TNS_EXEC_OPTION_IMPLICIT_RESULTSET = 0x8000 TNS_EXEC_OPTION_DESCRIBE = 0x20000 TNS_EXEC_OPTION_NO_COMPRESSED_FETCH = 0x40000 TNS_EXEC_OPTION_BATCH_ERRORS = 0x80000 +# execute flags +cdef enum: + TNS_EXEC_FLAGS_DML_ROWCOUNTS = 0x4000 + TNS_EXEC_FLAGS_IMPLICIT_RESULTSET = 0x8000 + # server side piggyback op codes cdef enum: TNS_SERVER_PIGGYBACK_QUERY_CACHE_INVALIDATION = 1 diff --git a/src/oracledb/impl/thin/messages/execute.pyx b/src/oracledb/impl/thin/messages/execute.pyx index 540d4c10..2333b0cc 100644 --- a/src/oracledb/impl/thin/messages/execute.pyx +++ b/src/oracledb/impl/thin/messages/execute.pyx @@ -38,7 +38,7 @@ cdef class ExecuteMessage(MessageWithData): Write the message for a full execute. """ cdef: - uint32_t options, dml_options = 0, num_params = 0, num_iters = 1 + uint32_t options, exec_flags = 0, num_params = 0, num_iters = 1 Statement stmt = self.cursor_impl._statement BaseThinCursorImpl cursor_impl = self.cursor_impl list params = stmt._bind_info_list @@ -51,7 +51,7 @@ cdef class ExecuteMessage(MessageWithData): if stmt._requires_define: options |= TNS_EXEC_OPTION_DEFINE elif not self.parse_only and stmt._sql is not None: - dml_options = TNS_EXEC_OPTION_IMPLICIT_RESULTSET + exec_flags = TNS_EXEC_FLAGS_IMPLICIT_RESULTSET options |= TNS_EXEC_OPTION_EXECUTE if stmt._cursor_id == 0 or stmt._is_ddl: options |= TNS_EXEC_OPTION_PARSE @@ -75,7 +75,7 @@ cdef class ExecuteMessage(MessageWithData): if self.batcherrors: options |= TNS_EXEC_OPTION_BATCH_ERRORS if self.arraydmlrowcounts: - dml_options = TNS_EXEC_OPTION_DML_ROWCOUNTS + exec_flags = TNS_EXEC_FLAGS_DML_ROWCOUNTS if self.conn_impl.autocommit and not self.parse_only: options |= TNS_EXEC_OPTION_COMMIT @@ -160,7 +160,7 @@ cdef class ExecuteMessage(MessageWithData): buf.write_ub4(0) # al8i4[6] SCN (part 2) buf.write_ub4(stmt._is_query) # al8i4[7] is query buf.write_ub4(0) # al8i4[8] - buf.write_ub4(dml_options) # al8i4[9] DML row counts/implicit + buf.write_ub4(exec_flags) # al8i4[9] execute flags buf.write_ub4(0) # al8i4[10] buf.write_ub4(0) # al8i4[11] buf.write_ub4(0) # al8i4[12] @@ -174,7 +174,7 @@ cdef class ExecuteMessage(MessageWithData): Write the message for a re-execute. """ cdef: - uint32_t i, exec_flags_1 = 0, exec_flags_2 = 0, num_iters + uint32_t i, options_1 = 0, options_2 = 0, num_iters Statement stmt = self.cursor_impl._statement list params = stmt._bind_info_list BindInfo info @@ -188,19 +188,19 @@ cdef class ExecuteMessage(MessageWithData): if info.bind_dir != TNS_BIND_DIR_OUTPUT \ and not info._is_return_bind] if self.function_code == TNS_FUNC_REEXECUTE_AND_FETCH: - exec_flags_1 |= TNS_EXEC_OPTION_EXECUTE + options_1 |= TNS_EXEC_OPTION_EXECUTE num_iters = self.cursor_impl.prefetchrows self.cursor_impl._set_fetch_array_size(num_iters) else: if self.conn_impl.autocommit: - exec_flags_2 |= TNS_EXEC_OPTION_COMMIT_REEXECUTE + options_2 |= TNS_EXEC_OPTION_COMMIT_REEXECUTE num_iters = self.num_execs self._write_function_code(buf) buf.write_ub4(stmt._cursor_id) buf.write_ub4(num_iters) - buf.write_ub4(exec_flags_1) - buf.write_ub4(exec_flags_2) + buf.write_ub4(options_1) + buf.write_ub4(options_2) if params: for i in range(self.num_execs): buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) From 92ac5a6f8212ca6b3de9346d98a54f6977f05b31 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:25:10 -0600 Subject: [PATCH 034/239] Use operator |= for safety in all cases. --- src/oracledb/impl/thin/messages/execute.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oracledb/impl/thin/messages/execute.pyx b/src/oracledb/impl/thin/messages/execute.pyx index 2333b0cc..cc74d2bb 100644 --- a/src/oracledb/impl/thin/messages/execute.pyx +++ b/src/oracledb/impl/thin/messages/execute.pyx @@ -51,7 +51,7 @@ cdef class ExecuteMessage(MessageWithData): if stmt._requires_define: options |= TNS_EXEC_OPTION_DEFINE elif not self.parse_only and stmt._sql is not None: - exec_flags = TNS_EXEC_FLAGS_IMPLICIT_RESULTSET + exec_flags |= TNS_EXEC_FLAGS_IMPLICIT_RESULTSET options |= TNS_EXEC_OPTION_EXECUTE if stmt._cursor_id == 0 or stmt._is_ddl: options |= TNS_EXEC_OPTION_PARSE @@ -75,7 +75,7 @@ cdef class ExecuteMessage(MessageWithData): if self.batcherrors: options |= TNS_EXEC_OPTION_BATCH_ERRORS if self.arraydmlrowcounts: - exec_flags = TNS_EXEC_FLAGS_DML_ROWCOUNTS + exec_flags |= TNS_EXEC_FLAGS_DML_ROWCOUNTS if self.conn_impl.autocommit and not self.parse_only: options |= TNS_EXEC_OPTION_COMMIT From 253d569488bcd465ae51ccbcb42e43aae864d1e7 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:26:56 -0600 Subject: [PATCH 035/239] Support LOB types in the fetch data frame APIs. --- doc/src/release_notes.rst | 2 + doc/src/user_guide/sql_execution.rst | 10 ++++ src/oracledb/base_impl.pyx | 3 ++ src/oracledb/impl/base/converters.pyx | 7 ++- src/oracledb/impl/base/cursor.pyx | 2 +- src/oracledb/impl/base/metadata.pyx | 6 +++ src/oracledb/interchange/column.py | 17 ++++++- src/oracledb/interchange/nanoarrow_bridge.pxd | 3 ++ src/oracledb/interchange/nanoarrow_bridge.pyx | 22 ++++++++- tests/test_8000_dataframe.py | 46 ++++++++++++++++++- tests/test_8100_dataframe_async.py | 4 +- 11 files changed, 114 insertions(+), 8 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ffff4fb2..997a3403 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -62,6 +62,8 @@ Common Changes data frame - eliminated small memory leak with production of all data frames +#) Added support for CLOB, BLOB and RAW data types in + :meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()` #) Fixed bug when NUMBER data is fetched with :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()` that does not have precision or scale specified and diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index ca58b608..b0835355 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -850,6 +850,12 @@ Oracle Database will result in an exception. - TIMESTAMP * - DB_TYPE_TIMESTAMP_TZ - TIMESTAMP + * - DB_TYPE_CLOB + - LARGE_STRING + * - DB_TYPE_BLOB + - LARGE_BINARY + * - DB_TYPE_RAW + - BINARY When converting Oracle Database NUMBERs: @@ -864,6 +870,10 @@ When converting Oracle Database NUMBERs: - In all other cases, the Arrow data type is DOUBLE. +When converting Oracle Database CLOBs and BLOBs: + +- The LOBs must be no more than 1 GB in length. + When converting Oracle Database DATEs and TIMESTAMPs: - For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 71e6390b..cc5a9a84 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -48,10 +48,13 @@ from .interchange.nanoarrow_bridge cimport ( NANOARROW_TIME_UNIT_MICRO, NANOARROW_TIME_UNIT_NANO, NANOARROW_TYPE_BOOL, + NANOARROW_TYPE_BINARY, NANOARROW_TYPE_DECIMAL128, NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FLOAT, NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING, NANOARROW_TYPE_STRING, NANOARROW_TYPE_TIMESTAMP, ) diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 341469c0..7999f227 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -232,7 +232,12 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, arrow_array.append_double(data.buffer.as_double) elif arrow_type == NANOARROW_TYPE_FLOAT: arrow_array.append_float(data.buffer.as_float) - elif arrow_type == NANOARROW_TYPE_STRING: + elif arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING + ): rb = &data.buffer.as_raw_bytes arrow_array.append_bytes( rb.ptr, rb.num_bytes) elif arrow_type == NANOARROW_TYPE_TIMESTAMP: diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index ed82659b..cc885bca 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -221,7 +221,7 @@ cdef class BaseCursorImpl: var_impl.outconverter = conn_impl.decode_oson elif metadata.is_json and db_type_num != DB_TYPE_NUM_JSON: var_impl.outconverter = self._build_json_converter_fn() - elif not C_DEFAULTS.fetch_lobs: + elif not C_DEFAULTS.fetch_lobs or self.fetching_arrow: if db_type_num == DB_TYPE_NUM_BLOB: var_impl.metadata.dbtype = DB_TYPE_LONG_RAW var_impl._fetch_metadata.dbtype = DB_TYPE_LONG_RAW diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index a4b3c4c0..a253c91a 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -84,6 +84,12 @@ cdef class OracleMetadata: DB_TYPE_NUM_TIMESTAMP_LTZ, DB_TYPE_NUM_TIMESTAMP_TZ): self._arrow_type = NANOARROW_TYPE_TIMESTAMP + elif db_type_num == DB_TYPE_NUM_LONG_RAW: + self._arrow_type = NANOARROW_TYPE_LARGE_BINARY + elif db_type_num == DB_TYPE_NUM_LONG_VARCHAR: + self._arrow_type = NANOARROW_TYPE_LARGE_STRING + elif db_type_num == DB_TYPE_NUM_RAW: + self._arrow_type = NANOARROW_TYPE_BINARY else: errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_TYPE, db_type_name=self.dbtype.name) diff --git a/src/oracledb/interchange/column.py b/src/oracledb/interchange/column.py index 3cf2d967..8701b7b4 100644 --- a/src/oracledb/interchange/column.py +++ b/src/oracledb/interchange/column.py @@ -45,9 +45,12 @@ NANOARROW_TIME_UNIT_MILLI, NANOARROW_TIME_UNIT_MICRO, NANOARROW_TIME_UNIT_NANO, + NANOARROW_TYPE_BINARY, NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FLOAT, NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING, NANOARROW_TYPE_STRING, NANOARROW_TYPE_TIMESTAMP, NANOARROW_TYPE_DECIMAL128, @@ -88,7 +91,13 @@ def _offsets_buffer(self): offsets_buffer = OracleColumnBuffer( size_in_bytes=size_bytes, address=address, buffer_type="offsets" ) - dtype = (DtypeKind.INT, 32, "i", "=") + if self.ora_arrow_array.arrow_type in ( + NANOARROW_TYPE_LARGE_STRING, + NANOARROW_TYPE_LARGE_BINARY, + ): + dtype = (DtypeKind.INT, 64, "l", "=") + else: + dtype = (DtypeKind.INT, 32, "i", "=") return offsets_buffer, dtype def _validity_buffer(self): @@ -149,6 +158,12 @@ def dtype(self) -> Dtype: f"d:{array.precision}.{array.scale}", "=", ) + elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_BINARY: + return (DtypeKind.STRING, 8, "z", "=") + elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_LARGE_BINARY: + return (DtypeKind.STRING, 8, "Z", "=") + elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_LARGE_STRING: + return (DtypeKind.STRING, 8, "U", "=") def get_buffers(self) -> ColumnBuffers: """ diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index cc6fd842..479fa7d0 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -48,10 +48,13 @@ cdef extern from "nanoarrow.h": cpdef enum ArrowType: NANOARROW_TYPE_BOOL + NANOARROW_TYPE_BINARY NANOARROW_TYPE_DECIMAL128 NANOARROW_TYPE_DOUBLE NANOARROW_TYPE_FLOAT NANOARROW_TYPE_INT64 + NANOARROW_TYPE_LARGE_BINARY + NANOARROW_TYPE_LARGE_STRING NANOARROW_TYPE_STRING NANOARROW_TYPE_TIMESTAMP diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 24272843..b0eabafd 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -302,7 +302,10 @@ cdef class OracleArrowArray: ArrowDecimalSetBytes(&decimal, ptr) _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) - elif array.arrow_type == NANOARROW_TYPE_STRING: + elif array.arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING + ): offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) data_buffer = ArrowArrayBuffer(array.arrow_array, 2) as_int32 = offsets_buffer.data @@ -316,6 +319,23 @@ cdef class OracleArrowArray: finally: cpython.PyMem_Free(temp) + elif array.arrow_type in ( + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING + ): + offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) + data_buffer = ArrowArrayBuffer(array.arrow_array, 2) + as_int64 = offsets_buffer.data + start_offset = as_int64[index] + end_offset = as_int64[index + 1] + temp = cpython.PyMem_Malloc(end_offset - start_offset) + memcpy(temp, &data_buffer.data[start_offset], + end_offset - start_offset) + try: + self.append_bytes(temp, end_offset - start_offset) + finally: + cpython.PyMem_Free(temp) + cdef int append_null(self) except -1: """ Append a null value to the array. diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 8c399e29..06c91e3e 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -466,8 +466,8 @@ def test_8015(self): self.assertEqual(col.null_count, 1) def test_8016(self): - "8016 - check unsupported error for LOBs" - statement = "select to_clob('test_8016') from dual" + "8016 - check unsupported error" + statement = "select cursor(select user from dual) from dual" with self.assertRaisesFullCode("DPY-3030"): self.conn.fetch_df_all(statement) @@ -538,6 +538,48 @@ def test_8022(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + def test_8023(self): + "8023 - fetch clob" + data = [("test_8023",)] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + "select to_clob('test_8023') from dual" + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + + def test_8024(self): + "8024 - fetch blob" + data = [(b"test_8024",)] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + + def test_8025(self): + "8025 - fetch raw" + data = [(b"test_8025",)] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + "select utl_raw.cast_to_raw('test_8025') from dual" + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 76ea43b7..f10fb85a 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -478,8 +478,8 @@ async def test_8115(self): self.assertEqual(col.null_count, 1) async def test_8116(self): - "8116 - check unsupported error for LOBs" - statement = "select to_clob('test_8116') from dual" + "8116 - check unsupported error" + statement = "select cursor(select user from dual) from dual" with self.assertRaisesFullCode("DPY-3030"): await self.conn.fetch_df_all(statement) From e0efd6f286fb4ecf07f0ccbe7857d995bd09a684 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:29:35 -0600 Subject: [PATCH 036/239] Documentation improvements. --- doc/src/api_manual/async_connection.rst | 8 +++---- doc/src/api_manual/connection.rst | 20 +++++++++++++---- doc/src/api_manual/dataframe.rst | 4 ++-- doc/src/release_notes.rst | 30 +++++++++++-------------- doc/src/user_guide/sql_execution.rst | 4 ++-- 5 files changed, 37 insertions(+), 29 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 913aa2a1..664087c4 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -146,8 +146,8 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and - may change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. .. versionadded:: 3.0.0 @@ -174,8 +174,8 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and - may change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index a5597de5..81c39416 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -146,12 +146,18 @@ Connection Methods :attr:`Cursor.prefetchrows` size is always set to the value of the explicit or default ``arraysize`` parameter value. + Any LOB fetched must be less than 1 GB. + See :ref:`dataframeformat` for the supported data types and examples. .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and - may change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. + + .. note:: + + This method is an extension to the DB API definition. .. versionadded:: 3.0.0 @@ -174,12 +180,18 @@ Connection Methods :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. + Any LOB fetched must be less than 1 GB. + See :ref:`dataframeformat` for the supported data types and examples. .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and - may change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. + + .. note:: + + This method is an extension to the DB API definition. .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index 4ba8b647..aa356edd 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -13,8 +13,8 @@ from Oracle Database types to Arrow data types. .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and may - change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. .. _oracledataframeobj: diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 997a3403..e26719c9 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,9 +19,9 @@ Thin Mode Changes #) Improved support for :ref:`Oracle Advanced Queuing `: - - added support for JSON payloads - - added support for bulk enqueuing and dequeuing - - added support for using AQ with asyncio + - Added support for JSON payloads + - Added support for bulk enqueuing and dequeuing + - Added support for using AQ with asyncio #) Improved error message when the cryptography package cannot be imported (`issue 455 `__). @@ -51,23 +51,19 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Improved the performance and memory management of - :meth:`Connection.fetch_df_all()` and - :meth:`Connection.fetch_df_batches()`: +#) Improvements to data frame fetching with :meth:`Connection.fetch_df_all()` + and :meth:`Connection.fetch_df_batches()`: - - more efficient processing when a significant amount of data is duplicated + - Added support for CLOB, BLOB and RAW data types + - Fixed bug when NUMBER data is fetched that does not have a precision or + scale specified and :attr:`defaults.fetch_decimals` is set to *True*. + - More efficient processing when a significant amount of data is duplicated from one row to the next - - avoid memory allocation/free cycles for decimal data - - eliminated memory leak if OracleDataFrame is not converted to an external + - Avoid memory allocation/free cycles for decimal data + - Eliminated memory leak if OracleDataFrame is not converted to an external data frame - - eliminated small memory leak with production of all data frames - -#) Added support for CLOB, BLOB and RAW data types in - :meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()` -#) Fixed bug when NUMBER data is fetched with - :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()` - that does not have precision or scale specified and - :attr:`defaults.fetch_decimals` is set to *True*. + - Eliminated small memory leak with production of each data frame + #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. #) An error message that links to :ref:`documentation ` on diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index b0835355..1b810e42 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -752,8 +752,8 @@ in `Apache Parquet `__ format. .. note:: - The data frame support in python-oracledb 3.0.0 is a pre-release and may - change in the next version. + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. The method :meth:`Connection.fetch_df_all()` fetches all rows from a query. The method :meth:`Connection.fetch_df_batches()` implements an iterator for From 627441e40733857d493c37f67427cdc6dc3477c7 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:29:52 -0600 Subject: [PATCH 037/239] Added support for scrollable cursors in thin mode. --- doc/src/api_manual/async_cursor.rst | 19 ++ doc/src/release_notes.rst | 1 + doc/src/user_guide/appendix_a.rst | 2 +- doc/src/user_guide/sql_execution.rst | 9 +- samples/scrollable_cursors.py | 7 +- src/oracledb/cursor.py | 31 ++- src/oracledb/errors.py | 5 + src/oracledb/impl/base/cursor.pyx | 2 +- src/oracledb/impl/thick/cursor.pyx | 2 +- src/oracledb/impl/thin/constants.pxi | 11 + src/oracledb/impl/thin/cursor.pyx | 110 +++++++- src/oracledb/impl/thin/messages/execute.pyx | 16 +- tests/test_4200_cursor_scrollable.py | 31 ++- tests/test_7300_unsupported_features_thin.py | 7 +- tests/test_8600_cursor_scrollable_async.py | 257 +++++++++++++++++++ 15 files changed, 468 insertions(+), 42 deletions(-) create mode 100644 tests/test_8600_cursor_scrollable_async.py diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 6733bcd6..07c85b8f 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -326,6 +326,25 @@ AsyncCursor Methods :meth:`AsyncCursor.callfunc()`, the first parameter in the list refers to the return value of the PL/SQL function. +.. method:: AsyncCursor.scroll(value=0, mode="relative") + + Scrolls the cursor in the result set to a new position according to the + mode. + + If mode is *relative* (the default value), the value is taken as an offset + to the current position in the result set. If set to *absolute*, value + states an absolute target position. If set to *first*, the cursor is + positioned at the first row and if set to *last*, the cursor is set to the + last row in the result set. + + An error is raised if the mode is *relative* or *absolute* and the scroll + operation would position the cursor outside of the result set. + + .. note:: + + This method is an extension to the DB API definition but it is + mentioned in PEP 249 as an optional extension. + .. method:: AsyncCursor.setoutputsize(size, [column]) This method does nothing and is retained solely for compatibility with the diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e26719c9..49785ca0 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,7 @@ oracledb 3.1.0 (TBD) Thin Mode Changes +++++++++++++++++ +#) Added support for :ref:`scrollable cursors `. #) Improved support for :ref:`Oracle Advanced Queuing `: - Added support for JSON payloads diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 91621696..6de5bb7f 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -260,7 +260,7 @@ see :ref:`driverdiff` and :ref:`compatibility`. - Yes - Yes * - Scrollable cursors (see :ref:`scrollablecursors`) - - No + - Yes - Yes - Yes * - Oracle Database startup and shutdown (see :ref:`startup`) diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 1b810e42..b09b5ee9 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -625,11 +625,6 @@ rows, and to move to a particular row in a query result set. The result set is cached on the database server until the cursor is closed. In contrast, regular cursors are restricted to moving forward. -.. note:: - - Scrollable cursors are only supported in the python-oracledb Thick mode. See - :ref:`enablingthick`. - A scrollable cursor is created by setting the parameter ``scrollable=True`` when creating the cursor. The method :meth:`Cursor.scroll()` is used to move to different locations in the result set. @@ -656,6 +651,10 @@ Examples are: cursor.scroll(-4) print("SKIP BACK 4 ROWS:", cursor.fetchone()) +See `samples/scrollable_cursors.py `__ for a runnable example. + + .. _fetchobjects: Fetching Oracle Database Objects and Collections diff --git a/samples/scrollable_cursors.py b/samples/scrollable_cursors.py index 90699842..a1341dd3 100644 --- a/samples/scrollable_cursors.py +++ b/samples/scrollable_cursors.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2023, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -38,8 +38,9 @@ import oracledb import sample_env -# this script is currently only supported in python-oracledb thick mode -oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) +# determine whether to use python-oracledb thin mode or thick mode +if not sample_env.get_is_thin(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( user=sample_env.get_main_user(), diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index a57f84c1..08fa1a51 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -846,17 +846,17 @@ def scroll(self, value: int = 0, mode: str = "relative") -> None: Scroll the cursor in the result set to a new position according to the mode. - If mode is “relative” (the default value), the value is taken as an - offset to the current position in the result set. If set to “absolute”, - value states an absolute target position. If set to “first”, the cursor - is positioned at the first row and if set to “last”, the cursor is set + If mode is "relative" (the default value), the value is taken as an + offset to the current position in the result set. If set to "absolute", + value states an absolute target position. If set to "first", the cursor + is positioned at the first row and if set to "last", the cursor is set to the last row in the result set. - An error is raised if the mode is “relative” or “absolute” and the + An error is raised if the mode is "relative" or "absolute" and the scroll operation would position the cursor outside of the result set. """ self._verify_open() - self._impl.scroll(self.connection, value, mode) + self._impl.scroll(self, value, mode) class AsyncCursor(BaseCursor): @@ -1081,3 +1081,20 @@ async def parse(self, statement: str) -> None: self._verify_open() self._prepare(statement) await self._impl.parse(self) + + async def scroll(self, value: int = 0, mode: str = "relative") -> None: + """ + Scroll the cursor in the result set to a new position according to the + mode. + + If mode is "relative" (the default value), the value is taken as an + offset to the current position in the result set. If set to "absolute", + value states an absolute target position. If set to "first", the cursor + is positioned at the first row and if set to "last", the cursor is set + to the last row in the result set. + + An error is raised if the mode is "relative" or "absolute" and the + scroll operation would position the cursor outside of the result set. + """ + self._verify_open() + await self._impl.scroll(self, value, mode) diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 82c95be4..c3eec753 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -282,6 +282,7 @@ def _raise_not_supported(feature: str) -> None: ERR_ARROW_C_API_ERROR = 2060 ERR_PARAMS_HOOK_HANDLER_FAILED = 2061 ERR_PAYLOAD_CANNOT_BE_ENQUEUED = 2062 +ERR_SCROLL_OUT_OF_RESULT_SET = 2063 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -428,6 +429,7 @@ def _raise_not_supported(feature: str) -> None: ERR_DPI_ERROR_XREF = { 1010: ERR_NOT_CONNECTED, 1024: (ERR_INVALID_COLL_INDEX_GET, r"at index (?P\d+) does"), + 1027: ERR_SCROLL_OUT_OF_RESULT_SET, 1043: ERR_INVALID_NUMBER, 1044: ERR_ORACLE_NUMBER_NO_REPR, 1063: ERR_EXECUTE_MODE_ONLY_FOR_DML, @@ -772,6 +774,9 @@ def _raise_not_supported(feature: str) -> None: ERR_PYTHON_VALUE_NOT_SUPPORTED: ( 'Python value of type "{type_name}" is not supported' ), + ERR_SCROLL_OUT_OF_RESULT_SET: ( + "scroll operation would go out of the result set" + ), ERR_SELF_BIND_NOT_SUPPORTED: "binding to self is not supported", ERR_CONNECTION_CLOSED: "the database or network closed the connection", ERR_SERVER_VERSION_NOT_SUPPORTED: ( diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index cc885bca..3a78fa40 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -664,7 +664,7 @@ cdef class BaseCursorImpl: """ self._prepare(statement, tag, cache_statement) - def scroll(self, conn, value, mode): + def scroll(self, cursor, value, mode): """ Scrolls a scrollable cursor. """ diff --git a/src/oracledb/impl/thick/cursor.pyx b/src/oracledb/impl/thick/cursor.pyx index 38a0e1a3..2373dace 100644 --- a/src/oracledb/impl/thick/cursor.pyx +++ b/src/oracledb/impl/thick/cursor.pyx @@ -510,7 +510,7 @@ cdef class ThickCursorImpl(BaseCursorImpl): if num_query_cols > 0: self._perform_define(cursor, num_query_cols) - def scroll(self, object conn, int32_t offset, object mode): + def scroll(self, object cursor, int32_t offset, object mode): cdef: uint32_t temp_buffer_row_index = 0, num_rows_in_buffer = 0 bint more_rows_to_fetch = False diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index f68b5e80..8c85b148 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -239,6 +239,17 @@ cdef enum: cdef enum: TNS_EXEC_FLAGS_DML_ROWCOUNTS = 0x4000 TNS_EXEC_FLAGS_IMPLICIT_RESULTSET = 0x8000 + TNS_EXEC_FLAGS_SCROLLABLE = 0x02 + +# fetch orientations +cdef enum: + TNS_FETCH_ORIENTATION_ABSOLUTE = 0x20 + TNS_FETCH_ORIENTATION_CURRENT = 0x01 + TNS_FETCH_ORIENTATION_FIRST = 0x04 + TNS_FETCH_ORIENTATION_LAST = 0x08 + TNS_FETCH_ORIENTATION_NEXT = 0x02 + TNS_FETCH_ORIENTATION_PRIOR = 0x10 + TNS_FETCH_ORIENTATION_RELATIVE = 0x40 # server side piggyback op codes cdef enum: diff --git a/src/oracledb/impl/thin/cursor.pyx b/src/oracledb/impl/thin/cursor.pyx index 8c37b1ab..cd9eef07 100644 --- a/src/oracledb/impl/thin/cursor.pyx +++ b/src/oracledb/impl/thin/cursor.pyx @@ -37,6 +37,8 @@ cdef class BaseThinCursorImpl(BaseCursorImpl): list _batcherrors list _dmlrowcounts list _implicit_resultsets + uint64_t _buffer_min_row + uint64_t _buffer_max_row uint32_t _num_columns uint32_t _last_row_index Rowid _lastrowid @@ -61,6 +63,64 @@ cdef class BaseThinCursorImpl(BaseCursorImpl): message.cursor_impl = self return message + cdef ExecuteMessage _create_execute_message(self, object cursor): + """ + Creates and returns the message used to execute a statement once. + """ + cdef ExecuteMessage message + message = self._create_message(ExecuteMessage, cursor) + message.num_execs = 1 + if self.scrollable: + message.fetch_orientation = TNS_FETCH_ORIENTATION_CURRENT + message.fetch_pos = 1 + return message + + cdef ExecuteMessage _create_scroll_message(self, object cursor, + object mode, int32_t offset): + """ + Creates a message object that is used to send a scroll request to the + database and receive back its response. + """ + cdef: + ExecuteMessage message + uint32_t orientation + uint64_t desired_row + + # check mode and calculate desired row + if mode == "relative": + if (self.rowcount + offset) < 1: + errors._raise_err(errors.ERR_SCROLL_OUT_OF_RESULT_SET) + orientation = TNS_FETCH_ORIENTATION_RELATIVE + desired_row = self.rowcount + offset + elif mode == "absolute": + orientation = TNS_FETCH_ORIENTATION_ABSOLUTE + desired_row = offset + elif mode == "first": + orientation = TNS_FETCH_ORIENTATION_FIRST + desired_row = 1 + elif mode == "last": + orientation = TNS_FETCH_ORIENTATION_LAST + else: + errors._raise_err(errors.ERR_WRONG_SCROLL_MODE) + + # determine if the server needs to be contacted at all + # for "last", the server is always contacted + if orientation != TNS_FETCH_ORIENTATION_LAST \ + and desired_row >= self._buffer_min_row \ + and desired_row < self._buffer_max_row: + self._buffer_index = \ + (desired_row - self._buffer_min_row) + self._buffer_rowcount = self._buffer_max_row - desired_row + self.rowcount = desired_row - 1 + return None + + # build message + message = self._create_message(ExecuteMessage, cursor) + message.scroll_operation = self._more_rows_to_fetch + message.fetch_orientation = orientation + message.fetch_pos = desired_row + return message + cdef BaseVarImpl _create_var_impl(self, object conn): cdef ThinVarImpl var_impl var_impl = ThinVarImpl.__new__(ThinVarImpl) @@ -125,6 +185,28 @@ cdef class BaseThinCursorImpl(BaseCursorImpl): errors._raise_err(errors.ERR_MISSING_BIND_VALUE, name=bind_info._bind_name) + cdef int _post_process_scroll(self, ExecuteMessage message) except -1: + """ + Called after a scroll operation has completed successfully. The row + count and buffer row counts and indices are updated as required. + """ + if self._buffer_rowcount == 0: + if message.fetch_orientation not in ( + TNS_FETCH_ORIENTATION_FIRST, + TNS_FETCH_ORIENTATION_LAST + ): + errors._raise_err(errors.ERR_SCROLL_OUT_OF_RESULT_SET) + self.rowcount = 0 + self._more_rows_to_fetch = False + self._buffer_index = 0 + self._buffer_min_row = 0 + self._buffer_max_row = 0 + else: + self.rowcount = message.error_info.rowcount - self._buffer_rowcount + self._buffer_min_row = self.rowcount + 1 + self._buffer_max_row = self._buffer_min_row + self._buffer_rowcount + self._buffer_index = 0 + cdef int _set_fetch_array_size(self, uint32_t value): """ Internal method for setting the fetch array size. This also ensures @@ -182,6 +264,8 @@ cdef class ThinCursorImpl(BaseThinCursorImpl): else: message = self._create_message(FetchMessage, cursor) protocol._process_single_message(message) + self._buffer_min_row = self.rowcount + 1 + self._buffer_max_row = self._buffer_min_row + self._buffer_rowcount def execute(self, cursor): cdef: @@ -189,8 +273,7 @@ cdef class ThinCursorImpl(BaseThinCursorImpl): object conn = cursor.connection MessageWithData message self._preprocess_execute(conn) - message = self._create_message(ExecuteMessage, cursor) - message.num_execs = 1 + message = self._create_execute_message(cursor) protocol._process_single_message(message) self.warning = message.warning if self._statement._is_query: @@ -242,6 +325,15 @@ cdef class ThinCursorImpl(BaseThinCursorImpl): message.parse_only = True protocol._process_single_message(message) + def scroll(self, object cursor, int32_t offset, object mode): + cdef: + Protocol protocol = self._conn_impl._protocol + ExecuteMessage message + message = self._create_scroll_message(cursor, mode, offset) + if message is not None: + protocol._process_single_message(message) + self._post_process_scroll(message) + cdef class AsyncThinCursorImpl(BaseThinCursorImpl): @@ -268,6 +360,7 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): else: message = self._create_message(FetchMessage, cursor) await self._conn_impl._protocol._process_single_message(message) + self._buffer_min_row = self.rowcount + 1 async def _preprocess_execute_async(self, object conn): """ @@ -293,8 +386,7 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): MessageWithData message protocol = self._conn_impl._protocol await self._preprocess_execute_async(conn) - message = self._create_message(ExecuteMessage, cursor) - message.num_execs = 1 + message = self._create_execute_message(cursor) await protocol._process_single_message(message) self.warning = message.warning if self._statement._is_query: @@ -378,3 +470,13 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): message = self._create_message(ExecuteMessage, cursor) message.parse_only = True await protocol._process_single_message(message) + + async def scroll(self, object cursor, int32_t offset, object mode): + cdef: + BaseAsyncProtocol protocol + MessageWithData message + protocol = self._conn_impl._protocol + message = self._create_scroll_message(cursor, mode, offset) + if message is not None: + await protocol._process_single_message(message) + self._post_process_scroll(message) diff --git a/src/oracledb/impl/thin/messages/execute.pyx b/src/oracledb/impl/thin/messages/execute.pyx index cc74d2bb..57447a22 100644 --- a/src/oracledb/impl/thin/messages/execute.pyx +++ b/src/oracledb/impl/thin/messages/execute.pyx @@ -32,6 +32,10 @@ @cython.final cdef class ExecuteMessage(MessageWithData): + cdef: + uint32_t fetch_orientation + uint32_t fetch_pos + bint scroll_operation cdef int _write_execute_message(self, WriteBuffer buf) except -1: """ @@ -52,7 +56,10 @@ cdef class ExecuteMessage(MessageWithData): options |= TNS_EXEC_OPTION_DEFINE elif not self.parse_only and stmt._sql is not None: exec_flags |= TNS_EXEC_FLAGS_IMPLICIT_RESULTSET - options |= TNS_EXEC_OPTION_EXECUTE + if not self.scroll_operation: + options |= TNS_EXEC_OPTION_EXECUTE + if cursor_impl.scrollable: + exec_flags |= TNS_EXEC_FLAGS_SCROLLABLE if stmt._cursor_id == 0 or stmt._is_ddl: options |= TNS_EXEC_OPTION_PARSE if stmt._is_query: @@ -161,8 +168,8 @@ cdef class ExecuteMessage(MessageWithData): buf.write_ub4(stmt._is_query) # al8i4[7] is query buf.write_ub4(0) # al8i4[8] buf.write_ub4(exec_flags) # al8i4[9] execute flags - buf.write_ub4(0) # al8i4[10] - buf.write_ub4(0) # al8i4[11] + buf.write_ub4(self.fetch_orientation) # al8i4[10] fetch orientation + buf.write_ub4(self.fetch_pos) # al8i4[11] fetch pos buf.write_ub4(0) # al8i4[12] if stmt._requires_define: self._write_column_metadata(buf, self.cursor_impl.fetch_var_impls) @@ -230,7 +237,8 @@ cdef class ExecuteMessage(MessageWithData): or self.parse_only \ or stmt._requires_define \ or stmt._is_ddl \ - or self.batcherrors: + or self.batcherrors \ + or self.cursor_impl.scrollable: self.function_code = TNS_FUNC_EXECUTE self._write_execute_message(buf) elif stmt._is_query and self.cursor_impl.prefetchrows > 0: diff --git a/tests/test_4200_cursor_scrollable.py b/tests/test_4200_cursor_scrollable.py index 686a0f51..897fb7b5 100644 --- a/tests/test_4200_cursor_scrollable.py +++ b/tests/test_4200_cursor_scrollable.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,14 +26,9 @@ 4200 - Module for testing scrollable cursors """ -import unittest - import test_env -@unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support scrollable cursors yet" -) class TestCase(test_env.BaseTestCase): def test_4200(self): "4200 - test creating a scrollable cursor" @@ -51,12 +46,13 @@ def test_4201(self): cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPI-1027"): + with self.assertRaisesFullCode("DPY-2063"): cursor.scroll(12, "absolute") def test_4202(self): "4202 - test scrolling absolute (when in buffers)" cursor = self.conn.cursor(scrollable=True) + cursor.prefetchrows = 0 cursor.arraysize = self.cursor.arraysize cursor.execute("select NumberCol from TestNumbers order by IntCol") cursor.fetchmany() @@ -83,6 +79,7 @@ def test_4204(self): "4204 - test scrolling to first row in result set (in buffers)" cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 cursor.execute("select NumberCol from TestNumbers order by IntCol") cursor.fetchmany() cursor.scroll(mode="first") @@ -94,6 +91,7 @@ def test_4205(self): "4205 - test scrolling to first row in result set (not in buffers)" cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 cursor.execute("select NumberCol from TestNumbers order by IntCol") cursor.fetchmany() cursor.fetchmany() @@ -117,7 +115,7 @@ def test_4207(self): cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPI-1027"): + with self.assertRaisesFullCode("DPY-2063"): cursor.scroll(15) def test_4208(self): @@ -125,13 +123,14 @@ def test_4208(self): cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPI-1027"): + with self.assertRaisesFullCode("DPY-2063"): cursor.scroll(-5) def test_4209(self): "4209 - test scrolling relative (when in buffers)" cursor = self.conn.cursor(scrollable=True) cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 cursor.execute("select NumberCol from TestNumbers order by IntCol") cursor.fetchmany() message = "array size must exceed 1 for this test to work correctly" @@ -164,7 +163,7 @@ def test_4211(self): self.assertEqual(cursor.fetchall(), []) cursor.scroll(mode="first") self.assertEqual(cursor.fetchall(), []) - with self.assertRaisesFullCode("DPI-1027"): + with self.assertRaisesFullCode("DPY-2063"): cursor.scroll(1, mode="absolute") def test_4212(self): @@ -210,6 +209,18 @@ def test_4213(self): with self.assertRaisesFullCode("DPY-2009"): cursor.scroll(mode="middle") + def test_4214(self): + "4214 - test scroll after fetching all rows" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = 5 + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchall() + cursor.scroll(5, mode="absolute") + (value,) = cursor.fetchone() + self.assertEqual(value, 6.25) + self.assertEqual(cursor.rowcount, 5) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7300_unsupported_features_thin.py b/tests/test_7300_unsupported_features_thin.py index 164bea29..34c31083 100644 --- a/tests/test_7300_unsupported_features_thin.py +++ b/tests/test_7300_unsupported_features_thin.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -47,11 +47,6 @@ def test_7300(self): with self.assertRaisesFullCode("DPY-3001"): pool.max_sessions_per_shard = 2 - def test_7301(self): - "7301 - test using a scrollable cursor" - with self.assertRaisesFullCode("DPY-3001"): - self.cursor.scroll(mode="last") - def test_7302(self): "7302 - test connection with sharding and supersharding keys" with self.assertRaisesFullCode("DPY-3001"): diff --git a/tests/test_8600_cursor_scrollable_async.py b/tests/test_8600_cursor_scrollable_async.py new file mode 100644 index 00000000..f99d8249 --- /dev/null +++ b/tests/test_8600_cursor_scrollable_async.py @@ -0,0 +1,257 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +8600 - Module for testing scrollable cursors with asyncio +""" + +import unittest + +import test_env + + +@unittest.skipUnless( + test_env.get_is_thin(), "asyncio not supported in thick mode" +) +class TestCase(test_env.BaseAsyncTestCase): + async def test_8600(self): + "8600 - test creating a scrollable cursor" + cursor = self.conn.cursor() + self.assertEqual(cursor.scrollable, False) + cursor = self.conn.cursor(True) + self.assertEqual(cursor.scrollable, True) + cursor = self.conn.cursor(scrollable=True) + self.assertEqual(cursor.scrollable, True) + cursor.scrollable = False + self.assertEqual(cursor.scrollable, False) + + async def test_8601(self): + "8601 - test scrolling absolute yields an exception (after result set)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + with self.assertRaisesFullCode("DPY-2063"): + await cursor.scroll(12, "absolute") + + async def test_8602(self): + "8602 - test scrolling absolute (when in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + self.assertTrue( + cursor.arraysize > 1, + "array size must exceed 1 for this test to work correctly", + ) + await cursor.scroll(1, mode="absolute") + (value,) = await cursor.fetchone() + self.assertEqual(value, 1.25) + self.assertEqual(cursor.rowcount, 1) + + async def test_8603(self): + "8603 - test scrolling absolute (when not in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.scroll(6, mode="absolute") + (value,) = await cursor.fetchone() + self.assertEqual(value, 7.5) + self.assertEqual(cursor.rowcount, 6) + + async def test_8604(self): + "8604 - test scrolling to first row in result set (in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + await cursor.scroll(mode="first") + (value,) = await cursor.fetchone() + self.assertEqual(value, 1.25) + self.assertEqual(cursor.rowcount, 1) + + async def test_8605(self): + "8605 - test scrolling to first row in result set (not in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + await cursor.fetchmany() + await cursor.scroll(mode="first") + (value,) = await cursor.fetchone() + self.assertEqual(value, 1.25) + self.assertEqual(cursor.rowcount, 1) + + async def test_8606(self): + "8606 - test scrolling to last row in result set" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.scroll(mode="last") + (value,) = await cursor.fetchone() + self.assertEqual(value, 12.5) + self.assertEqual(cursor.rowcount, 10) + + async def test_8607(self): + "8607 - test scrolling relative yields an exception (after result set)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + with self.assertRaisesFullCode("DPY-2063"): + await cursor.scroll(15) + + async def test_8608(self): + "8608 - test scrolling relative yields exception (before result set)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + with self.assertRaisesFullCode("DPY-2063"): + await cursor.scroll(-5) + + async def test_8609(self): + "8609 - test scrolling relative (when in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + cursor.prefetchrows = 0 + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + self.assertTrue(cursor.arraysize > 1, message) + await cursor.scroll(2 - cursor.rowcount) + (value,) = await cursor.fetchone() + self.assertEqual(value, 2.5) + self.assertEqual(cursor.rowcount, 2) + + async def test_8610(self): + "8610 - test scrolling relative (when not in buffers)" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + await cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + self.assertTrue(cursor.arraysize > 1, message) + await cursor.scroll(3 - cursor.rowcount) + (value,) = await cursor.fetchone() + self.assertEqual(value, 3.75) + self.assertEqual(cursor.rowcount, 3) + + async def test_8611(self): + "8611 - test scrolling when there are no rows" + await self.cursor.execute("truncate table TestTempTable") + cursor = self.conn.cursor(scrollable=True) + await cursor.execute("select * from TestTempTable") + await cursor.scroll(mode="last") + self.assertEqual(await cursor.fetchall(), []) + await cursor.scroll(mode="first") + self.assertEqual(await cursor.fetchall(), []) + with self.assertRaisesFullCode("DPY-2063"): + await cursor.scroll(1, mode="absolute") + + async def test_8612(self): + "8612 - test scrolling with differing array and fetch array sizes" + await self.cursor.execute("truncate table TestTempTable") + for i in range(30): + await self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, null) + """, + [i + 1], + ) + for arraysize in range(1, 6): + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = arraysize + await cursor.execute( + "select IntCol from TestTempTable order by IntCol" + ) + for num_rows in range(1, arraysize + 1): + await cursor.scroll(15, "absolute") + rows = await cursor.fetchmany(num_rows) + self.assertEqual(rows[0][0], 15) + self.assertEqual(cursor.rowcount, 15 + num_rows - 1) + await cursor.scroll(9) + rows = await cursor.fetchmany(num_rows) + num_rows_fetched = len(rows) + self.assertEqual(rows[0][0], 15 + num_rows + 8) + self.assertEqual( + cursor.rowcount, 15 + num_rows + num_rows_fetched + 7 + ) + await cursor.scroll(-12) + rows = await cursor.fetchmany(num_rows) + count = 15 + num_rows + num_rows_fetched - 5 + self.assertEqual(rows[0][0], count) + count = 15 + num_rows + num_rows_fetched + num_rows - 6 + self.assertEqual(cursor.rowcount, count) + + async def test_8613(self): + "8613 - test calling scroll() with invalid mode" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = self.cursor.arraysize + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchmany() + with self.assertRaisesFullCode("DPY-2009"): + await cursor.scroll(mode="middle") + + async def test_8614(self): + "8614 - test scroll after fetching all rows" + cursor = self.conn.cursor(scrollable=True) + cursor.arraysize = 5 + cursor.prefetchrows = 0 + await cursor.execute( + "select NumberCol from TestNumbers order by IntCol" + ) + await cursor.fetchall() + await cursor.scroll(5, mode="absolute") + (value,) = await cursor.fetchone() + self.assertEqual(value, 6.25) + self.assertEqual(cursor.rowcount, 5) + + +if __name__ == "__main__": + test_env.run_test_cases() From 911cbc38d711a44c7cd00624665d66a28c63f919 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:32:37 -0600 Subject: [PATCH 038/239] Drop support for Python 3.8; make use of license declarations for PEP 639. --- README.md | 2 +- doc/src/release_notes.rst | 5 +++-- doc/src/user_guide/installation.rst | 10 ++++++---- doc/src/user_guide/introduction.rst | 6 +++--- doc/src/user_guide/troubleshooting.rst | 15 ++++++++------- pyproject.toml | 6 +++--- setup.cfg | 7 ++----- src/oracledb/__init__.py | 2 +- tox.ini | 6 +++--- 9 files changed, 30 insertions(+), 29 deletions(-) diff --git a/README.md b/README.md index 096390d9..2441e5f2 100644 --- a/README.md +++ b/README.md @@ -18,7 +18,7 @@ See [python-oracledb Installation][installation]. ## Dependencies and Interoperability -- Python versions 3.8 through 3.13. +- Python versions 3.9 through 3.13. Pre-built packages are available on [PyPI][pypi] and other repositories. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 49785ca0..197144ce 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -52,8 +52,9 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Improvements to data frame fetching with :meth:`Connection.fetch_df_all()` - and :meth:`Connection.fetch_df_batches()`: +#) Dropped support for Python 3.8. +#) Improvements to data frame fetching with :meth:`Connection.fetch_df_all()` + and :meth:`Connection.fetch_df_batches()`: - Added support for CLOB, BLOB and RAW data types - Fixed bug when NUMBER data is fetched that does not have a precision or diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 4f410020..d776c37f 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -36,7 +36,9 @@ Python-oracledb is typically installed from Python's package repository 1. Install `Python 3 `__ if it is not already available. - Use any version from Python 3.8 through 3.13. + Use any version from Python 3.9 through 3.13. + + Previous versions of python-oracledb supported older Python versions. 2. Install python-oracledb, for example: @@ -154,7 +156,7 @@ Installation Requirements To use python-oracledb, you need: -- Python 3.8, 3.9, 3.10, 3.11, 3.12 or 3.13 +- Python 3.9, 3.10, 3.11, 3.12 or 3.13 - The Python cryptography package. This package is automatically installed as a dependency of python-oracledb. It is strongly recommended that you keep the @@ -705,8 +707,8 @@ Database 23ai. Installing python-oracledb on macOS =================================== -Python-oracledb is available as a Universal binary for Python 3.8, or later, on -Apple macOS Intel x86-64 and Apple macOS ARM64 (M1, M2, M3) architectures. +Python-oracledb is available as a Universal binary for Python 3.9, or later, on +Apple macOS Intel x86-64 and Apple macOS ARM64 (M1, M2, M3, M4) architectures. Install python-oracledb ----------------------- diff --git a/doc/src/user_guide/introduction.rst b/doc/src/user_guide/introduction.rst index bced3cc6..a7f5236b 100644 --- a/doc/src/user_guide/introduction.rst +++ b/doc/src/user_guide/introduction.rst @@ -18,9 +18,9 @@ The module is available from standard package repositories including `PyPI hosted at `github.com/oracle/python-oracledb `__. -This module is currently tested with Python 3.8, 3.9, 3.10, 3.11, 3.12 and 3.13 -against Oracle Database 23ai, 21c, 19c, 18c, 12c, and 11gR2. Older versions of -python-oracledb may support older versions of Python. +This module is currently tested with Python 3.9, 3.10, 3.11, 3.12, and 3.13 +against Oracle Database 23ai, 21c, 19c, 18c, 12c, and 11gR2. Previous versions +of python-oracledb supported older Python versions. Changes in python-oracledb releases can be found in the :ref:`release notes `. diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index 73992c34..028d6fd1 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -115,27 +115,28 @@ Some warnings may appear while using python-oracledb in Thick or Thin mode. .. _pythwarning: -Deprecated Python Versions 3.6 and 3.7 Warning ----------------------------------------------- +Deprecated Python Version Warning +--------------------------------- **Warning:** ``Python 3.6 is no longer supported by the Python core team. Therefore, support for it is deprecated in python-oracledb and will be removed in a future release.`` (A similar warning will also be displayed for Python -version 3.7.) +versions 3.7 and 3.8.) -**Cause:** ``import oracledb`` gives this warning because you are using Python -version 3.6 or 3.7. These versions are no longer maintained by the Python core -team. +**Cause:** ``import oracledb`` gives this warning because you are using a +version of Python that is longer maintained by the Python core team. **Action:** You can either: -- Upgrade your Python version to 3.8 or later. +- Upgrade your Python version to 3.9 or later. - Or you can temporarily suppress the warning by importing the `warnings `__ module and adding a call like ``warnings.filterwarnings(action='ignore', module="oracledb")`` *before* importing ``oracledb``. +- Install an older version of python-oracledb + .. _runtimetroubleshooting: Error Messages diff --git a/pyproject.toml b/pyproject.toml index 4a0e0f50..6c46d797 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,13 @@ [build-system] -requires = ["setuptools >= 40.6.0", "wheel", "cython"] +requires = ["setuptools >= 77.0.0", "wheel", "cython"] build-backend = "setuptools.build_meta" [tool.black] line-length = 79 -target-version = ["py37", "py38", "py39", "py310", "py311", "py312"] +target-version = ["py39", "py310", "py311", "py312", "py313"] required-version = 24 [tool.ruff] line-length = 79 -target-version = "py38" +target-version = "py39" exclude = ["templates"] diff --git a/setup.cfg b/setup.cfg index b79870ac..feddd7f1 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,7 +15,7 @@ project_urls = Release Notes = https://python-oracledb.readthedocs.io/en/latest/release_notes.html Issues = https://github.com/oracle/python-oracledb/issues Source = https://github.com/oracle/python-oracledb -license = Apache and/or UPL +license = UPL-1.0 OR Apache-2.0 license_files = LICENSE.txt THIRD_PARTY_LICENSES.txt @@ -23,12 +23,9 @@ license_files = classifiers = Development Status :: 5 - Production/Stable Intended Audience :: Developers - License :: OSI Approved :: Universal Permissive License (UPL) - License :: OSI Approved :: Apache Software License Natural Language :: English Operating System :: OS Independent Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.8 Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.10 Programming Language :: Python :: 3.11 @@ -40,7 +37,7 @@ classifiers = [options] zip_safe = false -python_requires = >=3.8 +python_requires = >=3.9 setup_requires = cython>=3.0.10 install_requires = cryptography>=3.2.1 test_suite = tests diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index afcf9da0..f15a5a79 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -32,7 +32,7 @@ import sys import warnings -if sys.version_info[:2] < (3, 8): +if sys.version_info[:2] < (3, 9): message = ( f"Python {sys.version_info[0]}.{sys.version_info[1]} is no longer " "supported by the Python core team. Therefore, support for it is " diff --git a/tox.ini b/tox.ini index 463ba42f..efe3aa71 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{38,39,310,311,312,313}-{thin,thick} +envlist = py{39,310,311,312,313}-{thin,thick} [testenv] commands = {envpython} -m unittest discover -v -s tests @@ -18,10 +18,10 @@ passenv = DPI_DEBUG_LEVEL ORACLE_HOME -[testenv:py{38,39,310,311,312,313}-thick] +[testenv:py{39,310,311,312,313}-thick] setenv = PYO_TEST_DRIVER_MODE=thick -[testenv:py{38,39,310,311,312,313}-thin] +[testenv:py{39,310,311,312,313}-thin] setenv = PYO_TEST_DRIVER_MODE=thin From e6a288682c83362395bc122e89282e7c4347feb1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:33:30 -0600 Subject: [PATCH 039/239] Fixed bug when multiple rows containing LOBs and DbObjects are returned in a DML RETURNING statement. --- doc/src/release_notes.rst | 2 + src/oracledb/impl/thick/var.pyx | 28 +++++++---- src/oracledb/impl/thin/messages/base.pyx | 4 +- tests/test_1600_dml_returning.py | 62 ++++++++++++++++++++++++ 4 files changed, 84 insertions(+), 12 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 197144ce..2e75bb84 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -68,6 +68,8 @@ Common Changes #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. +#) Fixed bug when multiple rows containing LOBs and DbObjects are returned in + a DML RETURNING statement. #) An error message that links to :ref:`documentation ` on setting up a protocol hook function is now returned by default for LDAP and LDAPS URL connection strings in python-oracledb thin mode, or when diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 74c7481c..407fe2f9 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -121,7 +121,7 @@ cdef class ThickVarImpl(BaseVarImpl): object cursor cursor = self._values[pos] if cursor is None: - cursor = self._values[pos] = self._conn.cursor() + cursor = self._conn.cursor() cursor_impl = cursor._impl if dpiStmt_addRef(dbvalue.asStmt) < 0: _raise_from_odpi() @@ -138,8 +138,9 @@ cdef class ThickVarImpl(BaseVarImpl): """ cdef: ThickDbObjectImpl obj_impl - object obj - obj = self._values[pos] + object obj = None + if not self._has_returned_data: + obj = self._values[pos] if obj is not None: obj_impl = obj._impl if obj_impl._handle == dbvalue.asObject: @@ -149,8 +150,7 @@ cdef class ThickVarImpl(BaseVarImpl): if dpiObject_addRef(dbvalue.asObject) < 0: _raise_from_odpi() obj_impl._handle = dbvalue.asObject - obj = self._values[pos] = PY_TYPE_DB_OBJECT._from_impl(obj_impl) - return obj + return PY_TYPE_DB_OBJECT._from_impl(obj_impl) cdef object _get_lob_value(self, dpiDataBuffer *dbvalue, uint32_t pos): """ @@ -160,16 +160,16 @@ cdef class ThickVarImpl(BaseVarImpl): """ cdef: ThickLobImpl lob_impl - object lob - lob = self._values[pos] + object lob = None + if not self._has_returned_data: + lob = self._values[pos] if lob is not None: lob_impl = lob._impl if lob_impl._handle == dbvalue.asLOB: return lob lob_impl = ThickLobImpl._create(self._conn_impl, self.metadata.dbtype, dbvalue.asLOB) - lob = self._values[pos] = PY_TYPE_LOB._from_impl(lob_impl) - return lob + return PY_TYPE_LOB._from_impl(lob_impl) cdef object _get_scalar_value(self, uint32_t pos): """ @@ -179,13 +179,21 @@ cdef class ThickVarImpl(BaseVarImpl): cdef: uint32_t num_returned_rows dpiData *returned_data + object value if self._has_returned_data: if dpiVar_getReturnedData(self._handle, pos, &num_returned_rows, &returned_data) < 0: _raise_from_odpi() return self._transform_array_to_python(num_returned_rows, returned_data) - return self._transform_element_to_python(pos, self._data) + value = self._transform_element_to_python(pos, self._data) + if self.metadata.dbtype._native_num in ( + DPI_NATIVE_TYPE_LOB, + DPI_NATIVE_TYPE_OBJECT, + DPI_NATIVE_TYPE_STMT, + ): + self._values[pos] = value + return value cdef int _on_reset_bind(self, uint32_t num_rows) except -1: """ diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index d82dc8d2..debc0da8 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -893,7 +893,7 @@ cdef class MessageWithData(Message): elif ora_type_num in (ORA_TYPE_NUM_CLOB, ORA_TYPE_NUM_BLOB, ORA_TYPE_NUM_BFILE): - if not self.in_fetch: + if self.cursor_impl._statement._is_plsql: column_value = var_impl._values[pos] column_value = buf.read_lob_with_length(self.conn_impl, metadata.dbtype, @@ -909,7 +909,7 @@ cdef class MessageWithData(Message): else: obj_impl = buf.read_dbobject(typ_impl) if obj_impl is not None: - if not self.in_fetch: + if self.cursor_impl._statement._is_plsql: column_value = var_impl._values[pos] if column_value is not None: column_value._impl = obj_impl diff --git a/tests/test_1600_dml_returning.py b/tests/test_1600_dml_returning.py index d19f20b0..fd7787a2 100644 --- a/tests/test_1600_dml_returning.py +++ b/tests/test_1600_dml_returning.py @@ -554,6 +554,68 @@ def test_1624(self): self.cursor.execute(sql, in_val=35, out_val=out_val) self.assertEqual(out_val.getvalue(), 70) + def test_1625(self): + "1625 - test DML returning with multiple LOBs returned" + lob_data = [ + "Short CLOB - 1625a", + "Short CLOB - 1625b", + "Short CLOB - 1625c", + "Short CLOB - 1625d", + ] + all_data = [(i + 1, d) for i, d in enumerate(lob_data)] + self.cursor.execute("delete from TestCLOBs") + self.cursor.executemany( + "insert into TestCLOBs (IntCol, ClobCol) values (:1, :2)", all_data + ) + ret_val = self.cursor.var(oracledb.DB_TYPE_CLOB) + self.cursor.execute( + """ + update TestCLOBs set + ExtraNumCol1 = 1 + where ExtraNumCol1 is null + returning ClobCol into :ret_val + """, + [ret_val], + ) + self.conn.commit() + ret_lob_data = [v.read() for v in ret_val.getvalue()] + ret_lob_data.sort() + self.assertEqual(ret_lob_data, lob_data) + + @unittest.skipUnless(test_env.get_is_thin(), "blocked by bug 37741324") + def test_1626(self): + "1626 - test DML returning with multiple DbObjects returned" + arrays = [ + (1626, 1627, 1628), + (1629, 1630, 1631), + (1632, 1633, 1634), + (1635, 1636, 1637), + ] + all_data = [(i + 4, v[0], v[1], v[2]) for i, v in enumerate(arrays)] + self.cursor.execute("delete from TestObjects where IntCol > 3") + self.cursor.executemany( + """ + insert into TestObjects (IntCol, ArrayCol) + values (:1, udt_Array(:1, :2, :3)) + """, + all_data, + ) + typ = self.conn.gettype("UDT_ARRAY") + ret_val = self.cursor.var(typ) + self.cursor.execute( + """ + update TestObjects set + ObjectCol = null + where IntCol > 3 + returning ArrayCol into :ret_val + """, + [ret_val], + ) + self.conn.commit() + ret_obj_data = [tuple(v) for v in ret_val.getvalue()] + ret_obj_data.sort() + self.assertEqual(ret_obj_data, arrays) + if __name__ == "__main__": test_env.run_test_cases() From ce08518a4973a5d851b30d9fe209e86451863db0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:34:03 -0600 Subject: [PATCH 040/239] Bump max client version. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 2441e5f2..e1d35ccc 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ See [python-oracledb Installation][installation]. available when optional Oracle Client libraries are loaded by python-oracledb. Libraries are available in the free [Oracle Instant Client][instantclient] packages. Python-oracledb can use Oracle Client - libraries 11.2 through 21c. + libraries 11.2 through 23ai. - Oracle Database From 41b60b7b33857cba3b36a6c89bf96efb56f1a33f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:35:24 -0600 Subject: [PATCH 041/239] Make the DB API Extension doc a simple italic line. --- doc/src/_ext/dbapi_extension.py | 64 +++++ doc/src/api_manual/aq.rst | 35 +-- doc/src/api_manual/async_connection.rst | 42 ++- doc/src/api_manual/async_connection_pool.rst | 5 +- doc/src/api_manual/async_cursor.rst | 9 +- doc/src/api_manual/async_lob.rst | 4 +- doc/src/api_manual/connect_params.rst | 8 +- doc/src/api_manual/connection.rst | 255 ++++++------------- doc/src/api_manual/connection_pool.rst | 6 +- doc/src/api_manual/cursor.rst | 107 +++----- doc/src/api_manual/dataframe.rst | 2 +- doc/src/api_manual/dbobject_type.rst | 23 +- doc/src/api_manual/fetch_info.rst | 6 +- doc/src/api_manual/lob.rst | 6 +- doc/src/api_manual/module.rst | 237 ++++++++--------- doc/src/api_manual/soda.rst | 37 +-- doc/src/api_manual/subscription.rst | 34 +-- doc/src/api_manual/variable.rst | 4 +- doc/src/conf.py | 7 +- 19 files changed, 402 insertions(+), 489 deletions(-) create mode 100644 doc/src/_ext/dbapi_extension.py diff --git a/doc/src/_ext/dbapi_extension.py b/doc/src/_ext/dbapi_extension.py new file mode 100644 index 00000000..43440c9d --- /dev/null +++ b/doc/src/_ext/dbapi_extension.py @@ -0,0 +1,64 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# dbapi_extension.py +# +# Used to document functionality that is an extension to the DB API definition. +# ----------------------------------------------------------------------------- + +from docutils import nodes +from docutils.parsers.rst import Directive + + +class DbApiExtension(Directive): + has_content = True + + def run(self): + text = f"{self.prefix} {' '.join(self.content)}" + result = [nodes.emphasis(text=text), nodes.paragraph()] + return result + + +class DbApiMethodExtension(DbApiExtension): + prefix = "This method is an extension to the DB API definition." + + +class DbApiAttributeExtension(DbApiExtension): + prefix = "This attribute is an extension to the DB API definition." + + +class DbApiConstantExtension(DbApiExtension): + prefix = "These constants are extensions to the DB API definition." + + +class DbApiObjectExtension(DbApiExtension): + prefix = "This object is an extension to the DB API definition." + + +def setup(app): + app.add_directive("dbapimethodextension", DbApiMethodExtension) + app.add_directive("dbapiattributeextension", DbApiAttributeExtension) + app.add_directive("dbapiconstantextension", DbApiConstantExtension) + app.add_directive("dbapiobjectextension", DbApiObjectExtension) diff --git a/doc/src/api_manual/aq.rst b/doc/src/api_manual/aq.rst index ba7501d5..6235f866 100644 --- a/doc/src/api_manual/aq.rst +++ b/doc/src/api_manual/aq.rst @@ -6,10 +6,6 @@ API: Advanced Queuing (AQ) See :ref:`aqusermanual` for more information about using AQ in python-oracledb. -.. note:: - - All of these objects are extensions to the DB API. - .. _queue: Queue Objects @@ -18,6 +14,8 @@ Queue Objects These objects are created using the :meth:`Connection.queue()` method and are used to enqueue and dequeue messages. +.. dbapiobjectextension:: + Queue Methods ------------- @@ -119,12 +117,10 @@ Queue Attributes Dequeue Options =============== -.. note:: - - These objects are used to configure how messages are dequeued from queues. - An instance of this object is found in the attribute - :attr:`Queue.deqOptions`. +These objects are used to configure how messages are dequeued from queues. +An instance of this object is found in the attribute :attr:`Queue.deqOptions`. +.. dbapiobjectextension:: .. attribute:: DeqOptions.condition @@ -214,12 +210,10 @@ Dequeue Options Enqueue Options =============== -.. note:: - - These objects are used to configure how messages are enqueued into queues. - An instance of this object is found in the attribute - :attr:`Queue.enqOptions`. +These objects are used to configure how messages are enqueued into queues. An +instance of this object is found in the attribute :attr:`Queue.enqOptions`. +.. dbapiobjectextension:: .. attribute:: EnqOptions.deliverymode @@ -249,14 +243,13 @@ Enqueue Options Message Properties ================== -.. note:: - - These objects are used to identify the properties of messages that are - enqueued and dequeued in queues. They are created by the method - :meth:`Connection.msgproperties()`. They are used by the methods - :meth:`Queue.enqone()` and :meth:`Queue.enqmany()` and - returned by the methods :meth:`Queue.deqone()` and :meth:`Queue.deqmany()`. +These objects are used to identify the properties of messages that are enqueued +and dequeued in queues. They are created by the method +:meth:`Connection.msgproperties()`. They are used by the methods +:meth:`Queue.enqone()` and :meth:`Queue.enqmany()` and returned by the methods +:meth:`Queue.deqone()` and :meth:`Queue.deqmany()`. +.. dbapiobjectextension:: .. attribute:: MessageProperties.attempts diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 664087c4..96354c25 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -8,8 +8,9 @@ An AsyncConnection object can be created with :meth:`oracledb.connect_async()` or with :meth:`AsyncConnectionPool.acquire()`. AsyncConnections support use of concurrent programming with `asyncio `__. Unless explicitly noted as synchronous, the AsyncConnection -methods should be used with ``await``. This object is an extension to the DB -API. +methods should be used with ``await``. + +.. dbapiobjectextension:: .. versionadded:: 2.0.0 @@ -647,20 +648,18 @@ AsyncConnection Attributes This read-only attribute specifies the session serial number associated with the connection. It is the same value returned by the SQL - ``SELECT SERIAL# FROM V$SESSION``. It is available only in python-oracledb - Thin mode. + ``SELECT SERIAL# FROM V$SESSION``. - .. versionadded:: 2.5.0 + It is available only in python-oracledb Thin mode. - .. note:: + For applications using :ref:`drcp`, the ``serial_num`` attribute may not + contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a round-trip. - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: - For applications using :ref:`drcp`, the ``serial_num`` attribute may - not contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a - round-trip. + .. versionadded:: 2.5.0 .. attribute:: AsyncConnection.service_name @@ -672,20 +671,19 @@ AsyncConnection Attributes This read-only attribute specifies the session identifier associated with the connection. It is the same value returned by the SQL - ``SELECT SID FROM V$SESSION``. It is available only in python-oracledb - Thin mode. + ``SELECT SID FROM V$SESSION``. - .. versionadded:: 2.5.0 + It is available only in python-oracledb Thin mode. - .. note:: + For applications using :ref:`drcp`, the ``session_id`` attribute may + not contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a + round-trip. - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: - For applications using :ref:`drcp`, the ``session_id`` attribute may - not contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a - round-trip. + .. versionadded:: 2.5.0 .. attribute:: AsyncConnection.stmtcachesize diff --git a/doc/src/api_manual/async_connection_pool.rst b/doc/src/api_manual/async_connection_pool.rst index db819d1f..c1e74d77 100644 --- a/doc/src/api_manual/async_connection_pool.rst +++ b/doc/src/api_manual/async_connection_pool.rst @@ -5,8 +5,9 @@ API: AsyncConnectionPool Objects ******************************** An AsyncConnectionPool object can be created with -:meth:`oracledb.create_pool_async()`. This object is an extension to the DB -API. +:meth:`oracledb.create_pool_async()`. + +.. dbapiobjectextension:: .. versionadded:: 2.0.0 diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 07c85b8f..35283468 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -6,7 +6,9 @@ API: AsyncCursor Objects An AsyncCursor object can be created with :meth:`AsyncConnection.cursor()`. Unless explicitly noted as synchronous, the AsyncCursor methods should be used -with ``await``. This object is an extension to the DB API. +with ``await``. + +.. dbapiobjectextension:: .. versionadded:: 2.0.0 @@ -340,11 +342,6 @@ AsyncCursor Methods An error is raised if the mode is *relative* or *absolute* and the scroll operation would position the cursor outside of the result set. - .. note:: - - This method is an extension to the DB API definition but it is - mentioned in PEP 249 as an optional extension. - .. method:: AsyncCursor.setoutputsize(size, [column]) This method does nothing and is retained solely for compatibility with the diff --git a/doc/src/api_manual/async_lob.rst b/doc/src/api_manual/async_lob.rst index 05b7b736..1194f8ba 100644 --- a/doc/src/api_manual/async_lob.rst +++ b/doc/src/api_manual/async_lob.rst @@ -6,7 +6,9 @@ API: AsyncLOB Objects An AsyncLOB object can be created with :meth:`AsyncConnection.createlob()`. Also, this object is returned whenever Oracle :data:`CLOB`, :data:`BLOB` and -:data:`BFILE` columns are fetched. This object is an extension to the DB API. +:data:`BFILE` columns are fetched. + +.. dbapiobjectextension:: See :ref:`lobdata` for more information about using LOBs. diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index c89892de..5e5237bd 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -4,12 +4,10 @@ API: ConnectParams Objects ************************** -.. note:: +The ConnectParams objects are created by :meth:`oracledb.ConnectParams()`. +See :ref:`usingconnparams` for more information. - This object is an extension to the DB API. - -These objects are created by :meth:`oracledb.ConnectParams()`. See -:ref:`usingconnparams` for more information. +.. dbapiobjectextension:: .. _connparamsmeth: diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 81c39416..4e20dbe4 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -20,18 +20,14 @@ Connection Methods The entry point for the connection as a context manager. It returns itself. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.__exit__() The exit point for the connection as a context manager. This will close the connection and roll back any uncommitted transaction. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.begin([formatId, transactionId, branchId]) @@ -48,26 +44,20 @@ Connection Methods Use the method :meth:`Connection.tpc_begin()` instead. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.cancel() Breaks a long-running statement. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.changepassword(oldpassword, newpassword) Changes the password for the user to which the connection is connected. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.close() @@ -102,9 +92,7 @@ Connection Methods The parameter ``data`` was added. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.cursor(scrollable=False) @@ -155,9 +143,7 @@ Connection Methods The data frame support in python-oracledb 3.1 is a pre-release and may change in a future version. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 3.0.0 @@ -189,9 +175,7 @@ Connection Methods The data frame support in python-oracledb 3.1 is a pre-release and may change in a future version. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 3.0.0 @@ -204,9 +188,7 @@ Connection Methods ctx=dblatest&id=GUID-BE42F8D3-B86B-43B4-B2A3-5760A4DF79FB>`__ for additional information on SODA. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.gettype(name) @@ -214,9 +196,7 @@ Connection Methods be used to create objects which can be bound to cursors created by this connection. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.is_healthy() @@ -247,9 +227,7 @@ Connection Methods Each of the parameters are optional. If specified, they act as a shortcut for setting each of the equivalently named properties. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.ping() @@ -261,9 +239,7 @@ Connection Methods Note connection pools will perform the same health check automatically, based on configuration settings. See :ref:`poolhealth`. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.prepare() @@ -275,9 +251,7 @@ Connection Methods Use the method :meth:`Connection.tpc_prepare()` instead. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.queue(name, payload_type=None) @@ -296,9 +270,7 @@ Connection Methods parameter ``payloadType`` was renamed to ``payload_type``. The old name will continue to work as a keyword parameter for a period of time. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.rollback() @@ -323,9 +295,7 @@ Connection Methods cursor.execute("alter database dismount") connection.shutdown(mode = oracledb.DBSHUTDOWN_FINAL) - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.startup(force=False, restrict=False, pfile=None) @@ -353,9 +323,7 @@ Connection Methods cursor.execute("alter database mount") cursor.execute("alter database open") - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Connection.subscribe(namespace=oracledb.SUBSCR_NAMESPACE_DBCHANGE, \ protocol=oracledb.SUBSCR_PROTO_OCI, callback=None, timeout=0, \ @@ -436,9 +404,7 @@ Connection Methods ``clientInitiated`` was renamed to ``client_initiated``. The old names will continue to work as keyword parameters for a period of time. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. note:: @@ -651,9 +617,7 @@ Connection Attributes is a string attribute but the value *None* is accepted and treated as an empty string. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.autocommit @@ -661,9 +625,7 @@ Connection Attributes When autocommit mode is on, all statements are committed as soon as they have completed executing. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.call_timeout @@ -671,6 +633,9 @@ Connection Attributes that a single round-trip to the database may take before a timeout will occur. A value of *0* means that no timeout will take place. + In python-oracledb Thick mode, this attribute is only available in Oracle + Client 18c or later. + If a timeout occurs, the error ``DPI-1067`` will be returned if the connection is still usable. Alternatively the error ``DPI-1080`` will be returned if the connection has become invalid and can no longer be used. @@ -680,28 +645,21 @@ Connection Attributes will continue to work for a period of time. The error ``DPI-1080`` was also introduced in this release. - .. note:: - - This attribute is an extension to the DB API definition and is only - available in Oracle Client 18c and higher. + .. dbapiattributeextension:: .. attribute:: Connection.client_identifier This write-only attribute sets the CLIENT_IDENTIFIER column in the V$SESSION view. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.clientinfo This write-only attribute sets the CLIENT_INFO column in the V$SESSION view. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.current_schema @@ -711,9 +669,7 @@ Connection Attributes the next call that does a round trip to the server. The value is placed before unqualified database objects in SQL statements you then execute. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.db_domain @@ -721,11 +677,9 @@ Connection Attributes associated with the connection. It is the same value returned by the SQL ``SELECT value FROM V$PARAMETER WHERE NAME = 'db_domain'``. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.db_name @@ -733,11 +687,9 @@ Connection Attributes the connection. It is the same value returned by the SQL ``SELECT NAME FROM V$DATABASE``. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.dbop @@ -745,18 +697,14 @@ Connection Attributes monitored. This can be viewed in the DBOP_NAME column of the V$SQL_MONITOR view. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.dsn This read-only attribute returns the TNS entry of the database to which a connection has been established. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.econtext_id @@ -769,18 +717,14 @@ Connection Attributes This read-only attribute gets the session edition and is only available with Oracle Database 11.2, or later. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.external_name This read-write attribute specifies the external name that is used by the connection when logging distributed transactions. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.handle @@ -790,9 +734,7 @@ Connection Attributes This property is only relevant in the python-oracledb Thick mode. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.inputtypehandler @@ -806,9 +748,7 @@ Connection Attributes See :ref:`inputtypehandlers`. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.instance_name @@ -816,20 +756,16 @@ Connection Attributes associated with the connection. It is the same value as the SQL expression ``sys_context('userenv', 'instance_name')``. - .. versionadded:: 1.4.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 1.4.0 .. attribute:: Connection.internal_name This read-write attribute specifies the internal name that is used by the connection when logging distributed transactions. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.ltxid @@ -838,15 +774,15 @@ Connection Attributes ensuring that transactions are not duplicated. See :ref:`tg` for more information. - .. versionchanged:: 3.0.0 + This is only available with Oracle Database 12.1 or later. In + python-oracledb Thick mode, it also requires Oracle Client libraries 12.1 + or later. - This attribute was added to python-oracledb Thin mode. + .. dbapiattributeextension:: - .. note:: + .. versionchanged:: 3.0.0 - This attribute is an extension to the DB API definition. It is only - available with Oracle Database 12.1 or later. In python-oracledb Thick - mode, it also requires Oracle Client libraries 12.1 or later. + This attribute was added to python-oracledb Thin mode. .. attribute:: Connection.max_identifier_length @@ -870,11 +806,9 @@ Connection Attributes using python-oracledb Thick mode, Oracle Client libraries 12.1 (or later) are required. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.module @@ -882,10 +816,7 @@ Connection Attributes The maximum length for this string is 48 and if you exceed this length you will get ``ORA-24960``. - .. note: - - This attribute is an extension to the DB API definition. - + .. dbapiattributeextension:: .. attribute:: Connection.outputtypehandler @@ -904,20 +835,16 @@ Connection Attributes ``handler(cursor, name, default_type, length, precision, scale)`` will still work but is deprecated and will be removed in a future version. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.proxy_user This read-only attribute returns the name of the user which was used as a proxy when creating the connection to the database. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.sdu @@ -927,11 +854,9 @@ Connection Attributes network configuration. It is available only in the python-oracledb Thin mode. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.serial_num @@ -940,17 +865,15 @@ Connection Attributes ``SELECT SERIAL# FROM V$SESSION WHERE SID=SYS_CONTEXT('USERENV', 'SID')``. It is available only in python-oracledb Thin mode. - .. versionadded:: 2.5.0 - .. note:: + For applications using :ref:`drcp`, the ``serial_num`` attribute may not + contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a round-trip. - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: - For applications using :ref:`drcp`, the ``serial_num`` attribute may - not contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a - round-trip. + .. versionadded:: 2.5.0 .. attribute:: Connection.service_name @@ -958,11 +881,9 @@ Connection Attributes associated with the connection. This is the same value returned by the SQL ``SELECT SYS_CONTEXT('USERENV', 'SERVICE_NAME') FROM DUAL``. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.session_id @@ -971,17 +892,14 @@ Connection Attributes ``SELECT SYS_CONTEXT('USERENV', 'SID') FROM DUAL``. It is available only in python-oracledb Thin mode. - .. versionadded:: 2.5.0 - - .. note:: + For applications using :ref:`drcp`, the ``session_id`` attribute may not + contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a round-trip. - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: - For applications using :ref:`drcp`, the ``session_id`` attribute may - not contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a - round-trip. + .. versionadded:: 2.5.0 .. attribute:: Connection.stmtcachesize @@ -993,9 +911,7 @@ Connection Attributes See :ref:`Statement Caching ` for more information. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.tag @@ -1010,9 +926,7 @@ Connection Attributes pool it will be used to retag the session. This value can be overridden in the call to :meth:`ConnectionPool.release()`. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.thin @@ -1020,44 +934,37 @@ Connection Attributes established with the python-oracledb Thin mode (*True*) or python-oracledb Thick mode (*False*). - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.transaction_in_progress This read-only attribute specifies whether a transaction is currently in progress on the database associated with the connection. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 .. attribute:: Connection.username This read-only attribute returns the name of the user which established the connection to the database. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Connection.version This read-only attribute returns the version of the database to which a connection has been established. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. note:: - If you connect to Oracle Database 18 or higher using Oracle Client - libraries 12.2 or lower you will only receive the base version (such as - 18.0.0.0.0) instead of the full version (such as 18.3.0.0.0). + If you connect to Oracle Database 18 (or higher) in python-oracledb + Thick mode using Oracle Client libraries 12.2 (or lower) you will only + receive the base version (such as 18.0.0.0.0) instead of the full + version (such as 18.3.0.0.0). .. attribute:: Connection.warning @@ -1084,8 +991,6 @@ Connection Attributes If no warning was generated the value *None* is returned. - .. versionadded:: 2.0.0 + .. dbapiattributeextension:: - .. note:: - - This attribute is an extension to the DB API definition. + .. versionadded:: 2.0.0 diff --git a/doc/src/api_manual/connection_pool.rst b/doc/src/api_manual/connection_pool.rst index 51662ff8..83bcdd4c 100644 --- a/doc/src/api_manual/connection_pool.rst +++ b/doc/src/api_manual/connection_pool.rst @@ -4,10 +4,6 @@ API: ConnectionPool Objects *************************** -.. note:: - - This object is an extension to the DB API. - The new ConnectionPool class is synonymous with SessionPool. The SessionPool class is deprecated in python-oracledb. The preferred function to create pools is now :meth:`oracledb.create_pool()`. (The name SessionPool came from the @@ -16,6 +12,8 @@ lookup?ctx=dblatest&id=GUID-F9662FFB-EAEF-495C-96FC-49C6D1D9625C>`__. This implementation is only used in the python-oracledb Thick mode and is not available in the Thin mode). +.. dbapiobjectextension:: + In python-oracledb, the type `pool` will show the class `oracledb.ConnectionPool`. This only affects the name. diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index a2b4d14d..3ebf2782 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -13,27 +13,19 @@ Cursor Methods The entry point for the cursor as a context manager. It returns itself. - .. note:: - - This method is an extension to the DB API definition. - + .. dbapimethodextension:: .. method:: Cursor.__exit__() The exit point for the cursor as a context manager. It closes the cursor. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. method:: Cursor.__iter__() Returns the cursor itself to be used as an iterator. - .. note:: - - This method is an extension to the DB API definition but it is - mentioned in PEP 249 as an optional extension. + .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. .. method:: Cursor.arrayvar(typ, value, [size]) @@ -53,18 +45,14 @@ Cursor Methods `example `__ needs to be used. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. method:: Cursor.bindnames() Returns the list of bind variable names bound to the statement. Note that a statement must have been prepared first. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. method:: Cursor.callfunc(name, return_type, parameters=[], \ keyword_parameters={}) @@ -88,9 +76,7 @@ Cursor Methods ``keywordParameters`` was renamed to ``keyword_parameters``. The old name will continue to work for a period of time. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. note:: @@ -262,10 +248,9 @@ Cursor Methods corresponding to the number of rows affected by the DML statement for each element of the array passed to :meth:`Cursor.executemany()`. - .. note:: + This method is only available for Oracle Database 12.1 and later. - The DB API definition does not define this method and it is only - available for Oracle 12.1 and later. + .. dbapimethodextension:: .. method:: Cursor.getbatcherrors() @@ -274,9 +259,7 @@ Cursor Methods list of Error objects, one error for each iteration that failed. The offset can be determined by looking at the offset attribute of the error object. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. method:: Cursor.getimplicitresults() @@ -292,12 +275,12 @@ Cursor Methods python-oracledb :ref:`Thick ` mode, Oracle Client 12.1 (or later) is additionally required. - .. note:: + .. dbapimethodextension:: - The DB API definition does not define this method. It is most like the - DB API method nextset(), but unlike that method (which requires that - the next result set overwrite the current result set), this method - returns cursors which can be fetched independently of each other. + It is most like the DB API method nextset(), but unlike that method + (which requires that the next result set overwrite the current result + set), this method returns cursors which can be fetched independently of + each other. .. method:: Cursor.parse(statement) @@ -305,9 +288,7 @@ Cursor Methods (parsing step is done automatically by Oracle when a statement is :meth:`executed `). - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. note:: @@ -333,9 +314,7 @@ Cursor Methods See :ref:`Statement Caching ` for more information. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. method:: Cursor.scroll(value=0, mode="relative") @@ -351,10 +330,7 @@ Cursor Methods An error is raised if the mode is *relative* or *absolute* and the scroll operation would position the cursor outside of the result set. - .. note:: - - This method is an extension to the DB API definition but it is - mentioned in PEP 249 as an optional extension. + .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. .. method:: Cursor.setinputsizes(*args, **keywordArgs) @@ -485,9 +461,7 @@ Cursor Methods The ``convert_nulls`` parameter was added. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: Cursor Attributes ================= @@ -519,19 +493,14 @@ Cursor Attributes name. Care should be taken when referencing this attribute. In particular, elements should not be removed or replaced. - .. note:: - - The DB API definition does not define this attribute. + .. dbapiattributeextension:: .. attribute:: Cursor.connection This read-only attribute returns a reference to the connection object on which the cursor was created. - .. note:: - - This attribute is an extension to the DB API definition but it is - mentioned in PEP 249 as an optional extension. + .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. .. attribute:: Cursor.description @@ -554,9 +523,7 @@ Cursor Attributes referencing this attribute. In particular, elements should not be removed or replaced. - .. note:: - - The DB API definition does not define this attribute. + .. dbapiattributeextension:: .. attribute:: Cursor.inputtypehandler @@ -570,9 +537,7 @@ Cursor Attributes See :ref:`inputtypehandlers`. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: .. attribute:: Cursor.lastrowid @@ -591,16 +556,14 @@ Cursor Attributes See :ref:`outputtypehandlers`. + .. dbapiattributeextension:: + .. versionchanged:: 1.4.0 The method signature was changed. The previous signature handler(cursor, name, default_type, length, precision, scale) will still work but is deprecated and will be removed in a future version. - .. note:: - - This attribute is an extension to the DB API definition. - .. attribute:: Cursor.prefetchrows This read-write attribute can be used to tune the number of rows that the @@ -618,9 +581,7 @@ Cursor Attributes See :ref:`Tuning Fetch Performance ` for more information. - .. note:: - - The DB API definition does not define this method. + .. dbapimethodextension:: .. attribute:: Cursor.rowcount @@ -640,9 +601,7 @@ Cursor Attributes See :ref:`rowfactories`. - .. note:: - - The DB API definition does not define this attribute. + .. dbapiattributeextension:: .. attribute:: Cursor.scrollable @@ -652,9 +611,7 @@ Cursor Attributes attribute is checked and the corresponding mode set in Oracle when calling the method :meth:`Cursor.execute()`. - .. note:: - - The DB API definition does not define this attribute. + .. dbapiattributeextension:: .. attribute:: Cursor.statement @@ -662,9 +619,7 @@ Cursor Attributes prepared with :meth:`Cursor.prepare()` or executed with :meth:`Cursor.execute()`. - .. note:: - - The DB API definition does not define this attribute. + .. dbapiattributeextension:: .. attribute:: Cursor.warning @@ -678,8 +633,6 @@ Cursor Attributes See :ref:`plsqlwarning` for more information. - .. versionadded:: 2.0.0 - - .. note:: + .. dbapiattributeextension:: - The DB API definition does not define this attribute. + .. versionadded:: 2.0.0 diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index aa356edd..ca45f7d1 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -28,7 +28,7 @@ Each column in OracleDataFrame exposes an `Apache Arrow PyCapsule `__ interface, giving access to the underlying Arrow array. -The OracleDataFrame object is an extension to the DB API. +.. dbapiobjectextension:: .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/dbobject_type.rst b/doc/src/api_manual/dbobject_type.rst index 1591a657..5743e25c 100644 --- a/doc/src/api_manual/dbobject_type.rst +++ b/doc/src/api_manual/dbobject_type.rst @@ -4,11 +4,11 @@ API: DbObjectType Objects ************************* -.. note:: +The DbObjectType object is returned by the :meth:`Connection.gettype()` call +and is available as the :data:`Variable.type` for variables containing Oracle +Database objects. - This object is an extension to the DB API. It is returned by the - :meth:`Connection.gettype()` call and is available as the - :data:`Variable.type` for variables containing Oracle Database objects. +.. dbapiobjectextension:: DbObjectType Methods ==================== @@ -70,12 +70,11 @@ DbObjectType Attributes DbObject Objects ================ -.. note:: +The DbObject object is returned by the :meth:`DbObjectType.newobject()` call +and can be bound to variables of type :data:`~oracledb.OBJECT`. Attributes can +be retrieved and set directly. - This object is an extension to the DB API. It is returned by the - :meth:`DbObjectType.newobject()` call and can be bound to variables of - type :data:`~oracledb.OBJECT`. Attributes can be retrieved and set - directly. +.. dbapiobjectextension:: DbObject Methods ++++++++++++++++ @@ -186,11 +185,9 @@ DbObject Attributes DbObjectAttribute Objects ========================= -.. note:: - - This object is an extension to the DB API. The elements of - :attr:`DbObjectType.attributes` are instances of this type. +The elements of :attr:`DbObjectType.attributes` are instances of this type. +.. dbapiobjectextension:: .. attribute:: DbObjectAttribute.max_size diff --git a/doc/src/api_manual/fetch_info.rst b/doc/src/api_manual/fetch_info.rst index 69b45c3b..9522560a 100644 --- a/doc/src/api_manual/fetch_info.rst +++ b/doc/src/api_manual/fetch_info.rst @@ -12,11 +12,9 @@ behaves as a 7-tuple containing the values for the attributes ``name``, and ``null_ok`` in that order. For example, if ``fetch_info`` is of type FetchInfo, then ``fetch_info[2]`` is the same as ``fetch_info.display_size``. -.. versionadded:: 1.4.0 - -.. note:: +.. dbapiobjectextension:: - This object is an extension the DB API. +.. versionadded:: 1.4.0 FetchInfo Attributes ==================== diff --git a/doc/src/api_manual/lob.rst b/doc/src/api_manual/lob.rst index 21b5ddf5..5fcdceb9 100644 --- a/doc/src/api_manual/lob.rst +++ b/doc/src/api_manual/lob.rst @@ -7,10 +7,10 @@ API: LOB Objects A LOB object can be created with :meth:`Connection.createlob()`. See :ref:`lobdata` for more information about using LOBs. -.. note:: +This object is returned by default whenever Oracle :data:`CLOB`, :data:`BLOB` +and :data:`BFILE` columns are fetched. - This object is an extension the DB API. It is returned whenever Oracle - :data:`CLOB`, :data:`BLOB` and :data:`BFILE` columns are fetched. +.. dbapiobjectextension:: LOB Methods =========== diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 27107c5b..e3d9ddf7 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -31,9 +31,7 @@ Oracledb Methods is, if :func:`oracledb.init_oracle_client()` is not called first, then an exception will be thrown. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. function:: connect(dsn=None, pool=None, pool_alias=None, conn_class=None, \ params=None, user=None, proxy_user=None, password=None, \ @@ -2093,10 +2091,7 @@ Oracledb Methods Constructor for creating a cursor. Returns a new :ref:`cursor object ` using the connection. - .. note:: - - This method is an extension to the DB API definition. - + .. dbapimethodextension:: .. function:: Date(year, month, day) @@ -2228,9 +2223,7 @@ Oracledb Methods - otherwise the value *None* is used. (Leaving :attr:`defaults.config_dir` unchanged). - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionchanged:: 3.0.0 @@ -2263,9 +2256,7 @@ Oracledb Methods mode. The attribute :attr:`ConnectionPool.thin` can be used to check a pool's mode. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 1.1.0 @@ -2282,9 +2273,7 @@ Oracledb Methods Use the :meth:`oracledb.ConnectParams()` method instead. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. function:: PoolParams(min=1, max=2, increment=1, connectiontype=None, \ getmode=oracledb.POOL_GETMODE_WAIT, homogeneous=True, timeout=0, \ @@ -2756,9 +2745,7 @@ Oracledb Methods See :ref:`registerparamshook`. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 3.0.0 @@ -2781,9 +2768,7 @@ Oracledb Methods See :ref:`registerpasswordtype`. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 3.0.0 @@ -2852,9 +2837,7 @@ Oracledb Methods See :ref:`registerprotocolhook` for more information. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 2.5.0 @@ -2894,9 +2877,7 @@ Oracledb Methods Unregisters a user parameter function that was earlier registered with a call to :meth:`oracledb.register_params_hook()`. - .. note:: - - This method is an extension to the DB API definition. + .. dbapimethodextension:: .. versionadded:: 3.0.0 @@ -2936,9 +2917,7 @@ Oracledb __future__ Object Special object that contains attributes which control the behavior of python-oracledb, allowing for opting in for new features. -.. note:: - - This method is an extension to the DB API definition. +.. dbapimethodextension:: .. _constants: @@ -2972,27 +2951,30 @@ General String constant stating the version of the module. Currently '|release|'. - .. note:: - - This attribute is an extension to the DB API definition. + .. dbapiattributeextension:: Advanced Queuing: Delivery Modes -------------------------------- -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~DeqOptions.deliverymode` attribute of the +The AQ Delivery mode constants are possible values for the +:attr:`~DeqOptions.deliverymode` attribute of the :ref:`dequeue options object ` passed as the ``options`` parameter -to the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods as well as the -:attr:`~EnqOptions.deliverymode` attribute of the +to the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.deqone()`, and :meth:`AsyncQueue.deqmany()` methods as well +as the :attr:`~EnqOptions.deliverymode` attribute of the :ref:`enqueue options object ` passed as the ``options`` parameter -to the :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` methods. They are also -possible values for the :attr:`~MessageProperties.deliverymode` attribute of -the :ref:`message properties object ` passed as the -``msgproperties`` parameter to the :meth:`Queue.deqone()` or -:meth:`Queue.deqmany()` and :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` -methods. - +to the :meth:`Queue.enqone()`, :meth:`Queue.enqmany()`, +:meth:`AsyncQueue.enqone()`, and :meth:`AsyncQueue.enqmany()` methods. They are +also possible values for the :attr:`~MessageProperties.deliverymode` attribute +of the :ref:`message properties object ` passed as the +``msgproperties`` parameter to the :meth:`Queue.deqone()`, +:meth:`Queue.deqmany()`, :meth:`AsyncQueue.deqone()`, or +:meth:`AsyncQueue.deqmany()`, and :meth:`Queue.enqone()`, +:meth:`Queue.enqmany()`, :meth:`AsyncQueue.enqone()`, or +:meth:`AsyncQueue.enqmany()` methods. + +.. dbapiconstantextension:: .. data:: MSG_BUFFERED @@ -3015,11 +2997,13 @@ methods. Advanced Queuing: Dequeue Modes ------------------------------- -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~DeqOptions.mode` attribute of the +The AQ Dequeue mode constants are possible values for the +:attr:`~DeqOptions.mode` attribute of the :ref:`dequeue options object `. This object is the ``options`` -parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. +parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.deqone()`, or :meth:`AsyncQueue.deqmany()` methods. +.. dbapiconstantextension:: .. data:: DEQ_BROWSE @@ -3050,11 +3034,13 @@ parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. Advanced Queuing: Dequeue Navigation Modes ------------------------------------------ -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~DeqOptions.navigation` attribute of the +The AQ Dequeue Navigation mode constants are possible values for the +:attr:`~DeqOptions.navigation` attribute of the :ref:`dequeue options object `. This object is the ``options`` -parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. +parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.deqone()`, or :meth:`AsyncQueue.deqmany()` methods. +.. dbapiconstantextension:: .. data:: DEQ_FIRST_MSG @@ -3083,11 +3069,13 @@ parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. Advanced Queuing: Dequeue Visibility Modes ------------------------------------------ -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~DeqOptions.visibility` attribute of the +The AQ Dequeue Visibility mode constants are possible values for the +:attr:`~DeqOptions.visibility` attribute of the :ref:`dequeue options object `. This object is the ``options`` -parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. +parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.deqone()`, or :meth:`AsyncQueue.deqmany()` methods. +.. dbapiconstantextension:: .. data:: DEQ_IMMEDIATE @@ -3104,11 +3092,13 @@ parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. Advanced Queuing: Dequeue Wait Modes ------------------------------------ -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~DeqOptions.wait` attribute of the +The AQ Dequeue Wait mode constants are possible values for the +:attr:`~DeqOptions.wait` attribute of the :ref:`dequeue options object `. This object is the ``options`` -parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. +parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, +:meth:`AsyncQueue.deqone()`, or :meth:`AsyncQueue.deqmany()` methods. +.. dbapiconstantextension:: .. data:: DEQ_NO_WAIT @@ -3125,11 +3115,13 @@ parameter for the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` methods. Advanced Queuing: Enqueue Visibility Modes ------------------------------------------ -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~EnqOptions.visibility` attribute of the +The AQ Enqueue Visibility mode constants are possible values for the +:attr:`~EnqOptions.visibility` attribute of the :ref:`enqueue options object `. This object is the ``options`` -parameter for the :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` methods. +parameter for the :meth:`Queue.enqone()`, :meth:`Queue.enqmany()`, +:meth:`AsyncQueue.enqone()`, or :meth:`AsyncQueue.enqmany()` methods. +.. dbapiconstantextension:: .. data:: ENQ_IMMEDIATE @@ -3146,13 +3138,16 @@ parameter for the :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` methods. Advanced Queuing: Message States -------------------------------- -These constants are extensions to the DB API definition. They are possible -values for the :attr:`~MessageProperties.state` attribute of the +The AQ Message state constants are possible values for the +:attr:`~MessageProperties.state` attribute of the :ref:`message properties object `. This object is the -``msgproperties`` parameter for the :meth:`Queue.deqone()` or -:meth:`Queue.deqmany()`, and :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` -methods. +``msgproperties`` parameter for the :meth:`Queue.deqone()`, +:meth:`Queue.deqmany()`, :meth:`AsyncQueue.deqone()` or +:meth:`AsyncQueue.deqmany()` and :meth:`Queue.enqone()`, +:meth:`Queue.enqmany()`, :meth:`AsyncQueue.enqone()`, or +:meth:`AsyncQueue.enqmany()` methods. +.. dbapiconstantextension:: .. data:: MSG_EXPIRED @@ -3177,12 +3172,12 @@ methods. reached. -Advanced Queuing: Other ------------------------ +Advanced Queuing: Other Constants +--------------------------------- -These constants are extensions to the DB API definition. They are special -constants used in advanced queuing. +This section contains other constants that are used for Advanced Queueing. +.. dbapiconstantextension:: .. data:: MSG_NO_DELAY @@ -3211,12 +3206,13 @@ constants used in advanced queuing. Connection Authorization Modes ------------------------------ -These constants belong to the enumeration called ``AuthMode``. They are -possible values for the ``mode`` parameters of :meth:`oracledb.connect()`, -:meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and -:meth:`oracledb.create_pool_async()`. They have deprecated the authorization -modes used in cx_Oracle 8.3. The constants are extensions to the DB API -definition. +The Connection Authorization mode constants belong to the enumeration called +``AuthMode``. They are possible values for the ``mode`` parameters of +:meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, +:meth:`oracledb.connect_async()`, and :meth:`oracledb.create_pool_async()`. +These constants have deprecated the authorization modes used in cx_Oracle 8.3. + +.. dbapiconstantextension:: .. versionchanged:: 2.3.0 @@ -3373,9 +3369,10 @@ definition. Pipeline Operation Types ------------------------ -These constants belong to the enumeration called ``PipelineOpType``. The -pipelining constants listed below are used to identify the type of operation -added. They are possible values for the :attr:`PipelineOp.op_type` attribute. +The Pipeline Operation type constants belong to the enumeration called +``PipelineOpType``. The pipelining constants listed below are used to identify +the type of operation added. They are possible values for the +:attr:`PipelineOp.op_type` attribute. .. versionadded:: 2.4.0 @@ -3447,9 +3444,10 @@ added. They are possible values for the :attr:`PipelineOp.op_type` attribute. Database Shutdown Modes ----------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``mode`` parameter of the :meth:`Connection.shutdown()` method. +The Database Shutdown mode constants are possible values for the ``mode`` +parameter of the :meth:`Connection.shutdown()` method. +.. dbapiconstantextension:: .. data:: DBSHUTDOWN_ABORT @@ -3490,10 +3488,11 @@ values for the ``mode`` parameter of the :meth:`Connection.shutdown()` method. Event Types ----------- -These constants are extensions to the DB API definition. They are possible -values for the :attr:`Message.type` attribute of the messages that are sent -for subscriptions created by the :meth:`Connection.subscribe()` method. +The Event type constants are possible values for the :attr:`Message.type` +attribute of the messages that are sent for subscriptions created by the +:meth:`Connection.subscribe()` method. +.. dbapiconstantextension:: .. data:: EVENT_AQ @@ -3548,12 +3547,13 @@ for subscriptions created by the :meth:`Connection.subscribe()` method. Operation Codes --------------- -These constants are extensions to the DB API definition. They are possible -values for the ``operations`` parameter for the :meth:`Connection.subscribe()` -method. One or more of these values can be OR'ed together. These values are -also used by the :attr:`MessageTable.operation` or -:attr:`MessageQuery.operation` attributes of the messages that are sent. +The Operation code constants are possible values for the ``operations`` +parameter for the :meth:`Connection.subscribe()` method. One or more of these +values can be OR'ed together. These values are also used by the +:attr:`MessageTable.operation` or :attr:`MessageQuery.operation` attributes of +the messages that are sent. +.. dbapiconstantextension:: .. data:: OPCODE_ALLOPS @@ -3603,12 +3603,13 @@ also used by the :attr:`MessageTable.operation` or Connection Pool Get Modes ------------------------- -These constants belong to the enumeration called ``PoolGetMode``. They are -possible values for the ``getmode`` parameters of +The Connection Pool Get mode constants belong to the enumeration called +``PoolGetMode``. They are possible values for the ``getmode`` parameters of :meth:`oracledb.create_pool()`, :meth:`oracledb.create_pool_async()`, -:meth:`PoolParams.set()`, and for related attributes. They have deprecated the -Session Pool mode constants that were used in cx_Oracle 8.3. The constants are -extensions to the DB API definition +:meth:`PoolParams.set()`, and for related attributes. These constants have +deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. + +.. dbapiconstantextension:: .. versionchanged:: 2.3.0 @@ -3680,12 +3681,14 @@ extensions to the DB API definition Connection Pool Purity Constants -------------------------------- -These constants belong to the enumeration called ``Purity``. They are possible -values for the :ref:`drcp` ``purity`` parameter of -:meth:`oracledb.create_pool()`, :meth:`ConnectionPool.acquire()`, and -:meth:`oracledb.connect()`. They have deprecated the Session Pool purity -constants that were used in cx_Oracle 8.3. The constants are extensions to the -DB API definition. +The Connection Pool Purity constants belong to the enumeration called +``Purity``. They are possible values for the :ref:`drcp` ``purity`` parameter +of :meth:`oracledb.create_pool()`, :meth:`ConnectionPool.acquire()`, +:meth:`oracledb.connect()`, :meth:`oracledb.create_pool_async()`, and +:meth:`oracledb.connect_async()`. These constants have deprecated the Session +Pool purity constants that were used in cx_Oracle 8.3. + +.. dbapiconstantextension:: .. versionchanged:: 2.3.0 @@ -3728,9 +3731,10 @@ DB API definition. Subscription Grouping Classes ----------------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``groupingClass`` parameter of the :meth:`Connection.subscribe()` -method. +The Subscription Grouping Class constants are possible values for the +``groupingClass`` parameter of the :meth:`Connection.subscribe()` method. + +.. dbapiconstantextension:: .. data:: SUBSCR_GROUPING_CLASS_TIME @@ -3741,9 +3745,10 @@ method. Subscription Grouping Types --------------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``groupingType`` parameter of the :meth:`Connection.subscribe()` -method. +The Subscription Grouping Type constants are possible values for the +``groupingType`` parameter of the :meth:`Connection.subscribe()` method. + +.. dbapiconstantextension:: .. data:: SUBSCR_GROUPING_TYPE_SUMMARY @@ -3763,9 +3768,10 @@ method. Subscription Namespaces ----------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``namespace`` parameter of the :meth:`Connection.subscribe()` -method. +The Subscription Namespace constants are possible values for the ``namespace`` +parameter of the :meth:`Connection.subscribe()` method. + +.. dbapiconstantextension:: .. data:: SUBSCR_NAMESPACE_AQ @@ -3783,9 +3789,10 @@ method. Subscription Protocols ---------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``protocol`` parameter of the :meth:`Connection.subscribe()` -method. +The Subscription Protocol constants are possible values for the ``protocol`` +parameter of the :meth:`Connection.subscribe()` method. + +.. dbapiconstantextension:: .. data:: SUBSCR_PROTO_CALLBACK @@ -3831,9 +3838,11 @@ method. Subscription Quality of Service ------------------------------- -These constants are extensions to the DB API definition. They are possible -values for the ``qos`` parameter of the :meth:`Connection.subscribe()` method. -One or more of these values can be OR'ed together. +The Subscription Quality of Service constants are possible values for the +``qos`` parameter of the :meth:`Connection.subscribe()` method. One or more of +these values can be OR'ed together. + +.. dbapiconstantextension:: .. data:: SUBSCR_QOS_BEST_EFFORT diff --git a/doc/src/api_manual/soda.rst b/doc/src/api_manual/soda.rst index 719e8c4e..8a7cb4cb 100644 --- a/doc/src/api_manual/soda.rst +++ b/doc/src/api_manual/soda.rst @@ -80,9 +80,10 @@ the specification in the collection metadata``. SodaDatabase Objects ==================== -The SODA Database object is an extension to the DB API. It is returned by the -method :meth:`Connection.getSodaDatabase()`. +SODA Database objects are returned by the method +:meth:`Connection.getSodaDatabase()`. +.. dbapiobjectextension:: SodaDatabase Methods -------------------- @@ -161,11 +162,12 @@ SodaDatabase Methods SodaCollection Objects ====================== -The SODA Collection object is an extension to the DB API. It is used to -represent SODA collections and is created by methods -:meth:`SodaDatabase.createCollection()` and +SODA Collection objects are used to represent SODA collections and is created +by methods :meth:`SodaDatabase.createCollection()` and :meth:`SodaDatabase.openCollection()`. +.. dbapiobjectextension:: + SodaCollection Methods ---------------------- @@ -370,11 +372,13 @@ SodaCollection Attributes SodaDoc Objects =============== -The SODA Document object is an extension to the DB API. It is returned by the -methods :meth:`SodaDatabase.createDocument()`, -:meth:`SodaOperation.getDocuments()` and :meth:`SodaOperation.getOne()` as +SODA Document objects are returned by the methods +:meth:`SodaDatabase.createDocument()`, :meth:`SodaOperation.getDocuments()` and +:meth:`SodaOperation.getOne()` as well as by iterating over :ref:`SODA document cursors `. +.. dbapiobjectextension:: + SodaDoc Methods --------------- @@ -447,10 +451,11 @@ SodaDoc Attributes SodaDocCursor Objects ===================== -The SODA Document Cursor object is an extension to the DB API. It is returned -by the method :meth:`SodaOperation.getCursor()` and implements the iterator -protocol. Each iteration will return a :ref:`SODA document object -`. +SODA Document Cursor objects are returned by the method +:meth:`SodaOperation.getCursor()` and implements the iterator protocol. Each +iteration will return a :ref:`SODA document object `. + +.. dbapiobjectextension:: SodaDocCursor Methods --------------------- @@ -467,9 +472,11 @@ SodaDocCursor Methods SodaOperation Objects ===================== -The SODA Operation Object is an extension to the DB API. It represents an -operation that will be performed on all or some of the documents in a SODA -collection. It is created by the method :meth:`SodaCollection.find()`. +A SODA Operation object represents an operation that will be performed on all +or some of the documents in a SODA collection. This object is created by the +method :meth:`SodaCollection.find()`. + +.. dbapiobjectextension:: SodaOperation Methods --------------------- diff --git a/doc/src/api_manual/subscription.rst b/doc/src/api_manual/subscription.rst index de9b5b9c..f4e639bf 100644 --- a/doc/src/api_manual/subscription.rst +++ b/doc/src/api_manual/subscription.rst @@ -4,9 +4,7 @@ API: Subscription Objects ************************* -.. note:: - - This object is an extension the DB API. +.. dbapiobjectextension:: Subscription Methods ==================== @@ -104,10 +102,8 @@ Subscription Attributes Message Objects --------------- -.. note:: - - This object is created internally when notification is received and passed - to the callback procedure specified when a subscription is created. +Message objects are created when a notification is received. They are passed to +the callback procedure specified when a subscription is created. .. attribute:: Message.consumer_name @@ -190,11 +186,9 @@ Message Objects MessageTable Objects -------------------- -.. note:: - - This object is created internally for each table changed when notification - is received and is found in the tables attribute of message objects, and - the tables attribute of message query objects. +MessageTable objects are created when a notification is received for each table +change. They are accessed in the tables attribute of message objects, and the +tables attribute of message query objects. .. attribute:: MessageTable.name @@ -219,11 +213,9 @@ MessageTable Objects MessageRow Objects ------------------ -.. note:: - - This object is created internally for each row changed on a table when - notification is received and is found in the rows attribute of message - table objects. +MessageRow objects are created when a notification is received for each row +changed in a table. They are found in the rows attribute of message table +objects. .. attribute:: MessageRow.operation @@ -240,11 +232,9 @@ MessageRow Objects MessageQuery Objects -------------------- -.. note:: - - This object is created internally for each query result set changed when - notification is received and is found in the queries attribute of message - objects. +A MessageQuery object is created when a notification is received for a query +result set change. This object is found in the queries attribute of message +objects. .. attribute:: MessageQuery.id diff --git a/doc/src/api_manual/variable.rst b/doc/src/api_manual/variable.rst index d6453ac5..0a14e611 100644 --- a/doc/src/api_manual/variable.rst +++ b/doc/src/api_manual/variable.rst @@ -7,9 +7,7 @@ API: Variable Objects Variable objects are created with :meth:`Cursor.var()` or :func:`Cursor.arrayvar()`. -.. note:: - - The DB API definition does not define this object. +.. dbapiobjectextension:: Variable Methods ================= diff --git a/doc/src/conf.py b/doc/src/conf.py index 4b36b77b..534ccf3a 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -22,7 +22,12 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. -extensions = ["table_with_summary", "oracle_deprecated", "sphinx_rtd_theme"] +extensions = [ + "table_with_summary", + "oracle_deprecated", + "dbapi_extension", + "sphinx_rtd_theme", +] # Add any paths that contain templates here, relative to this directory. templates_path = [".templates"] From ddb25729a0aeec0f26cf27deae49ee80b6515319 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:36:45 -0600 Subject: [PATCH 042/239] Documentation improvements. --- doc/src/api_manual/module.rst | 3 + doc/src/release_notes.rst | 9 ++- doc/src/user_guide/aq.rst | 6 +- doc/src/user_guide/lob_data.rst | 126 ++++++++++++++++++++++++++++---- 4 files changed, 125 insertions(+), 19 deletions(-) diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index e3d9ddf7..687cc2d1 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -3128,6 +3128,9 @@ parameter for the :meth:`Queue.enqone()`, :meth:`Queue.enqmany()`, This constant is used to specify that enqueue should perform its work as part of an independent transaction. + The use of this constant with :ref:`bulk enqueuing ` is only + supported in python-oracledb :ref:`Thick mode `. + .. data:: ENQ_ON_COMMIT diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2e75bb84..26e02ecf 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -42,11 +42,15 @@ Thick Mode Changes #) Fixed bug resulting in a segfault when unable to load the Oracle Client libraries (`ODPI-C `__ dependency update). +#) Fixed bug which resulted in error ``ORA-24328: illegal attribute value`` + when calling :meth:`Connection.gettype()` with Oracle Client 11.2 + libraries + (`ODPI-C `__ dependency update). #) Improved error message when getting :attr:`Connection.max_open_cursors` - when using Oracle Client libraries 11.2 + when using Oracle Client 11.2 libraries (`ODPI-C `__ dependency update). #) Improved error message when attempting to work with sparse vectors using - Oracle Client libraries 23.6 or earlier + Oracle Client 23.6 (or earlier) libraries (`ODPI-C `__ dependency update). Common Changes @@ -80,6 +84,7 @@ Common Changes python-oracledb Thick mode raised the error ``DPI-1071: payload type in message properties must match the payload type of the queue`` and thin mode raised an internal error. +#) Improved the test suite and documentation. oracledb 3.0.0 (March 2025) diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 8dfedb27..6c8fe91b 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -26,8 +26,9 @@ types. - JSON payloads require Oracle Database 21c (or later). In python-oracle Thick mode, Oracle Client libraries 21c (or later) are also needed. -JMS payloads and :ref:`Recipient Lists ` are only supported in -python-oracledb :ref:`Thick mode `. +The use of :data:`~oracledb.ENQ_IMMEDIATE` with bulk enqueuing, JMS payloads, +and :ref:`Recipient Lists ` are only supported in python-oracledb +:ref:`Thick mode `. There are examples of AQ Classic Queues in the `GitHub samples `__ directory. @@ -338,6 +339,7 @@ expiration of 60 seconds on a message: This means that if no dequeue operation occurs within 60 seconds then the message will be dropped from the queue. +.. _bulkenqdeq: Bulk Enqueue and Dequeue ======================== diff --git a/doc/src/user_guide/lob_data.rst b/doc/src/user_guide/lob_data.rst index d5432203..93ff141c 100644 --- a/doc/src/user_guide/lob_data.rst +++ b/doc/src/user_guide/lob_data.rst @@ -1,8 +1,8 @@ .. _lobdata: -************************ -Using CLOB and BLOB Data -************************ +*************************************** +Using CLOB, BLOB, NCLOB, and BFILE Data +*************************************** Oracle Database uses :ref:`LOB objects ` to store large data such as text, images, videos, and other multimedia formats. The maximum size of a LOB @@ -11,21 +11,20 @@ text, images, videos, and other multimedia formats. The maximum size of a LOB There are `four types of LOBs `__: - * BLOB - Binary Large Object, used for storing binary data. python-oracledb - uses the type :attr:`oracledb.DB_TYPE_BLOB`. * CLOB - Character Large Object, used for storing strings in the database character set format. python-oracledb uses the type :attr:`oracledb.DB_TYPE_CLOB`. + * BLOB - Binary Large Object, used for storing binary data. python-oracledb + uses the type :attr:`oracledb.DB_TYPE_BLOB`. * NCLOB - National Character Large Object, used for storing strings in the national character set format. python-oracledb uses the type :attr:`oracledb.DB_TYPE_NCLOB`. * BFILE - External Binary File, used for referencing a file stored on the host operating system outside of the database. python-oracledb uses the - type :attr:`oracledb.DB_TYPE_BFILE`. See `BFILEs `__ for more information. + type :attr:`oracledb.DB_TYPE_BFILE`. -LOBs can be streamed to, and from, Oracle Database. +LOBs can be permanent or temporary. They can be inserted into, and fetched +from, Oracle Database in chunks, as mecessary. LOBs up to 1 GB in length can be also be handled directly as strings or bytes in python-oracledb. This makes LOBs easy to work with, and has significant @@ -36,7 +35,7 @@ See `GitHub `__ for LOB examples. Simple Insertion of LOBs ------------------------- +======================== Consider a table with CLOB and BLOB columns: @@ -69,7 +68,7 @@ Note that with this approach, LOB data is limited to 1 GB in size. .. _directlobs: Fetching LOBs as Strings and Bytes ----------------------------------- +================================== CLOBs and BLOBs smaller than 1 GB can queried from the database directly as strings and bytes. This can be much faster than streaming a :ref:`LOB Object @@ -121,7 +120,7 @@ handler: connection.outputtypehandler = output_type_handler Streaming LOBs (Read) ---------------------- +===================== Without setting ``oracledb.defaults.fetch_lobs`` to False, or without using an output type handler, the CLOB and BLOB values are fetched as :ref:`LOB @@ -168,7 +167,7 @@ repeatedly until all of the data has been read, as shown below: Streaming LOBs (Write) ----------------------- +====================== If a row containing a LOB is being inserted or updated, and the quantity of data that is to be inserted or updated cannot fit in a single block of data, @@ -196,9 +195,8 @@ in the following code: offset += len(data) connection.commit() - Temporary LOBs --------------- +============== All of the examples shown thus far have made use of permanent LOBs. These are LOBs that are stored in the database. Oracle also supports temporary LOBs that @@ -214,3 +212,101 @@ procedure exceeds that which can fit in a single block of data, however, you can use the method :meth:`Connection.createlob()` to create a temporary LOB. This LOB can then be read and written just like in the examples shown above for persistent LOBs. + +.. _bfiles: + +Using BFILEs +============ + +`BFILEs `__ are objects stored in a directory in the +Oracle Database server file system, not in the database. The database column of +type BFILE stores a reference to this external binary file. Each BFILE column +can reference a single external file. BFILEs are read-only data types and +hence you cannot modify the file from within your application. + +Before using the BFILE data type, you must: + +- Create a `DIRECTORY `__ object which is an alias for the + full path to the directory containing BFILE data in the database server file + system. For example, you can create a DIRECTORY object by using: + + .. code-block:: sql + + create or replace directory my_bfile_dir as '/demo/bfiles' + + In the above example, "my_bfile_dir" is the directory alias. + "/demo/bfiles" is the physical directory in the database server file + system that contains the files. It is a string containing the full path name + of the directory and follows the operating system rules. + + To allow non-privileged users to access this directory, you can grant access + using: + + .. code-block:: sql + + grant read on directory my_bfile_dir to hr; + + Ensure that the Oracle Server processes have read access to the directory. + +- Store the physical binary file in the directory in the database server file + system. For example, the binary file "my_bfile.txt" is stored in the + directory "/demo/bfiles". + +Consider the file, "/demo/bfiles/my_bfile.txt", exists on the server and +contains the text, "This is my BFILE data". You can access the "my_bfile.txt" +file as detailed below. + +The following table will be used in the subsequent examples. + +.. code-block:: sql + + create table bfile_tbl( + id number, + bfile_data bfile + ); + +**Inserting BFILEs** + +You must use the `BFILENAME `__ function in an INSERT +statement to associate a file and a BFILE column. The ``BFILENAME`` function +takes two arguments, the directory alias and the file name. To insert a BFILE +reference, for example: + +.. code-block:: python + + cursor.execute(""" + insert into bfile_tbl (id, bfile_data) values + (:id, bfilename(:bfiledir, :bfilename))""", + id=102, bfiledir="my_bfile_dir", bfilename="my_bfile.txt") + + connection.commit() + +This inserts a reference to the file "my_bfile.txt" located in the directory +referenced by the alias "my_bfile_dir" into the bfile_tbl table. + +**Fetching BFILEs** + +To query the bfile_tbl table and fetch the BFILE LOB locator, you can use +the `BFILENAME `__ function as shown below: + +.. code-block:: python + + cursor.execute("select bfilename(:bfiledir, :bfilename) from bfile_tbl where id = :id", + id=102, bfiledir="my_bfile_dir", bfilename="my_bfile.txt") + bfile, = cursor.fetchone() + print(bfile.read()) + +This will display:: + + This is my BFILE data + +This fetched LOB can use :meth:`LOB.fileexists()` to check if the file +referenced by the BFILE type LOB exists. + +You can get the directory alias and file name of this fetched LOB by using +:meth:`LOB.getfilename()`. Also, you can set the directory alias and file name +for this fetched LOB by using :meth:`LOB.setfilename()`. From cc8e8e4813cdfba44476f4812d06929368626811 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:37:08 -0600 Subject: [PATCH 043/239] Satisfy static analysis tools. --- src/oracledb/impl/thin/cursor.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oracledb/impl/thin/cursor.pyx b/src/oracledb/impl/thin/cursor.pyx index cd9eef07..5133e030 100644 --- a/src/oracledb/impl/thin/cursor.pyx +++ b/src/oracledb/impl/thin/cursor.pyx @@ -82,9 +82,9 @@ cdef class BaseThinCursorImpl(BaseCursorImpl): database and receive back its response. """ cdef: + uint64_t desired_row = 0 + uint32_t orientation = 0 ExecuteMessage message - uint32_t orientation - uint64_t desired_row # check mode and calculate desired row if mode == "relative": From c8d6f5c34ee2fb6f6e0ed509aebfe089f7fb964b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:37:40 -0600 Subject: [PATCH 044/239] Test suite improvements. --- ...st_2700_aq.py => test_2700_aq_dbobject.py} | 164 ++---- ...t_2800_bulk_aq.py => test_2800_aq_bulk.py} | 2 +- tests/test_7800_aq_raw.py | 21 +- ...aq_async.py => test_8200_aq_bulk_async.py} | 0 tests/test_8300_aq_json.py | 418 +++++++++++++++ tests/test_8400_aq_dbobject_async.py | 484 ++++++++++++++++++ tests/test_8500_aq_json_async.py | 412 +++++++++++++++ 7 files changed, 1372 insertions(+), 129 deletions(-) rename tests/{test_2700_aq.py => test_2700_aq_dbobject.py} (86%) rename tests/{test_2800_bulk_aq.py => test_2800_aq_bulk.py} (99%) rename tests/{test_8200_bulk_aq_async.py => test_8200_aq_bulk_async.py} (100%) create mode 100644 tests/test_8300_aq_json.py create mode 100644 tests/test_8400_aq_dbobject_async.py create mode 100644 tests/test_8500_aq_json_async.py diff --git a/tests/test_2700_aq.py b/tests/test_2700_aq_dbobject.py similarity index 86% rename from tests/test_2700_aq.py rename to tests/test_2700_aq_dbobject.py index 28f792b5..de47fdd5 100644 --- a/tests/test_2700_aq.py +++ b/tests/test_2700_aq_dbobject.py @@ -23,10 +23,9 @@ # ----------------------------------------------------------------------------- """ -2700 - Module for testing AQ +2700 - Module for testing AQ with DbObject payloads. """ -import datetime import decimal import threading import unittest @@ -43,64 +42,6 @@ class TestCase(test_env.BaseTestCase): ("The Story of My Life", "Hellen Keller", decimal.Decimal("10.50")), ("The Chronicles of Narnia", "C.S. Lewis", decimal.Decimal("25.25")), ] - json_queue_name = "TEST_JSON_QUEUE" - json_data = [ - [ - 2.75, - True, - "Ocean Beach", - b"Some bytes", - {"keyA": 1.0, "KeyB": "Melbourne"}, - datetime.datetime(2022, 8, 1, 0, 0), - ], - [ - True, - False, - "String", - b"Some Bytes", - {}, - {"name": None}, - {"name": "John"}, - {"age": 30}, - {"Permanent": True}, - { - "employee": { - "name": "John", - "age": 30, - "city": "Delhi", - "Parmanent": True, - } - }, - {"employees": ["John", "Matthew", "James"]}, - { - "employees": [ - {"employee1": {"name": "John", "city": "Delhi"}}, - {"employee2": {"name": "Matthew", "city": "Mumbai"}}, - {"employee3": {"name": "James", "city": "Bangalore"}}, - ] - }, - ], - [ - datetime.datetime.today(), - datetime.datetime(2004, 2, 1, 3, 4, 5), - datetime.datetime(2020, 12, 2, 13, 29, 14), - datetime.timedelta(8.5), - datetime.datetime(2002, 12, 13, 9, 36, 0), - oracledb.Timestamp(2002, 12, 13, 9, 36, 0), - datetime.datetime(2002, 12, 13), - ], - dict(name="John", age=30, city="New York"), - [ - 0, - 1, - 25.25, - 6088343244, - -9999999999999999999, - decimal.Decimal("0.25"), - decimal.Decimal("10.25"), - decimal.Decimal("319438950232418390.273596"), - ], - ] def __deq_in_thread(self, results): with test_env.get_connection() as conn: @@ -537,33 +478,7 @@ def notification_callback(message): conn.unsubscribe(sub) def test_2721(self): - "2721 - test enqueuing and dequeuing JSON payloads" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.payload_type, "JSON") - for data in self.json_data: - props = self.conn.msgproperties(payload=data) - queue.enqone(props) - self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = queue.deqone() - if props is None: - break - results.append(props.payload) - self.conn.commit() - self.assertEqual(results, self.json_data) - - def test_2722(self): - "2722 - test enqueuing to a JSON queue without a JSON payload" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - string_message = "This is a string message" - props = self.conn.msgproperties(payload=string_message) - with self.assertRaisesFullCode("DPY-2062"): - queue.enqone(props) - - def test_2723(self): - "2723 - test message props enqtime" + "2721 - test message props enqtime" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -579,8 +494,8 @@ def test_2723(self): end_date = end_date.replace(microsecond=0) self.assertTrue(start_date <= props.enqtime <= end_date) - def test_2724(self): - "2724 - test message props declared attributes" + def test_2722(self): + "2722 - test message props declared attributes" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -597,29 +512,29 @@ def test_2724(self): for attr_name in values: self.assertEqual(getattr(props, attr_name), values[attr_name]) - def test_2725(self): - "2725 - test error for invalid type for payload_type" + def test_2723(self): + "2723 - test error for invalid type for payload_type" self.assertRaises( TypeError, self.conn.queue, "THE QUEUE", payload_type=4 ) - def test_2726(self): - "2726 - test setting bytes to payload" + def test_2724(self): + "2724 - test setting bytes to payload" props = self.conn.msgproperties() bytes_val = b"Hello there" props.payload = bytes_val self.assertEqual(props.payload, bytes_val) - def test_2727(self): - "2727 - test getting queue attributes" + def test_2725(self): + "2725 - test getting queue attributes" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) self.assertEqual(queue.name, self.book_queue_name) self.assertEqual(queue.connection, self.conn) - def test_2728(self): - "2728 - test getting write-only attributes" + def test_2726(self): + "2726 - test getting write-only attributes" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -628,8 +543,8 @@ def test_2728(self): with self.assertRaises(AttributeError): queue.deqoptions.deliverymode - def test_2729(self): - "2729 - test correlation deqoption" + def test_2727(self): + "2727 - test correlation deqoption" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -655,8 +570,8 @@ def test_2729(self): correlated_messages = queue.deqmany(num_messages + 1) self.assertEqual(len(correlated_messages), num_messages) - def test_2730(self): - "2730 - test correlation deqoption with pattern-matching characters" + def test_2728(self): + "2728 - test correlation deqoption with pattern-matching characters" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -672,8 +587,8 @@ def test_2730(self): messages = queue.deqmany(5) self.assertEqual(len(messages), 2) - def test_2731(self): - "2731 - test condition deqoption with priority" + def test_2729(self): + "2729 - test condition deqoption with priority" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -699,8 +614,8 @@ def test_2731(self): data = book.TITLE, book.AUTHORS, book.PRICE self.assertEqual(data, self.book_data[ix]) - def test_2732(self): - "2732 - test mode deqoption with DEQ_REMOVE_NODATA" + def test_2730(self): + "2730 - test mode deqoption with DEQ_REMOVE_NODATA" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) @@ -720,42 +635,39 @@ def test_2732(self): self.assertIsNone(message.payload.AUTHORS) self.assertIsNone(message.payload.PRICE) - def test_2733(self): - "2733 - test payload_type returns the correct value" + def test_2731(self): + "2731 - test payload_type returns the correct value" books_type = self.conn.gettype(self.book_type_name) queue = self.conn.queue(self.book_queue_name, books_type) self.assertEqual(queue.payload_type, books_type) - queue = self.conn.queue("TEST_RAW_QUEUE") - self.assertIsNone(queue.payload_type) - - def test_2734(self): - "2734 - test deprecated attributes (enqOptions, deqOptions)" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + def test_2732(self): + "2732 - test deprecated attributes (enqOptions, deqOptions)" + books_type = self.conn.gettype(self.book_type_name) + queue = self.conn.queue(self.book_queue_name, books_type) self.assertEqual(queue.enqOptions, queue.enqoptions) self.assertEqual(queue.deqOptions, queue.deqoptions) - def test_2735(self): - "2735 - test deprecated AQ methods (enqOne, deqOne)" - value = b"Test 2734" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.enqOne(self.conn.msgproperties(value)) + def test_2733(self): + "2733 - test deprecated AQ methods (enqOne, deqOne)" + books_type = self.conn.gettype(self.book_type_name) + queue = self.conn.queue(self.book_queue_name, books_type) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqOne(self.conn.msgproperties(book)) props = queue.deqOne() - self.assertEqual(props.payload, value) + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + self.assertEqual(results, self.book_data[0]) - def test_2736(self): - "2736 - test enqueuing to an object queue with the wrong payload" + def test_2734(self): + "2734 - test enqueuing to an object queue with the wrong payload" queue = self.get_and_clear_queue( self.book_queue_name, self.book_type_name ) props = self.conn.msgproperties(payload="A string") with self.assertRaisesFullCode("DPY-2062"): queue.enqone(props) - typ = self.conn.gettype("UDT_SUBOBJECT") - obj = typ.newobject() - props = self.conn.msgproperties(payload=obj) - with self.assertRaisesFullCode("DPY-2062"): - queue.enqone(props) if __name__ == "__main__": diff --git a/tests/test_2800_bulk_aq.py b/tests/test_2800_aq_bulk.py similarity index 99% rename from tests/test_2800_bulk_aq.py rename to tests/test_2800_aq_bulk.py index 5eef9e90..a5bdaced 100644 --- a/tests/test_2800_bulk_aq.py +++ b/tests/test_2800_aq_bulk.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index 44adbdfe..04bbc572 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -28,6 +28,7 @@ import oracledb import test_env +import threading class TestCase(test_env.BaseTestCase): @@ -40,6 +41,15 @@ class TestCase(test_env.BaseTestCase): b"sample raw data 6", ] + def __deq_in_thread(self, results): + with test_env.get_connection() as conn: + queue = conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = 10 + props = queue.deqone() + if props is not None: + results.append(props.payload) + conn.commit() + def __verify_attr(self, obj, attrName, value): setattr(obj, attrName, value) self.assertEqual(getattr(obj, attrName), value) @@ -107,10 +117,17 @@ def test_7804(self): self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) def test_7805(self): - "7805 - test errors for invalid values for enqueue" + "7805 - test waiting for dequeue" queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + results = [] + thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) + thread.start() value = self.raw_data[0] - self.assertRaises(TypeError, queue.enqone, value) + props = self.conn.msgproperties(payload=value) + queue.enqone(props) + self.conn.commit() + thread.join() + self.assertEqual(results, [value]) def test_7806(self): "7806 - test getting/setting message properties attributes" diff --git a/tests/test_8200_bulk_aq_async.py b/tests/test_8200_aq_bulk_async.py similarity index 100% rename from tests/test_8200_bulk_aq_async.py rename to tests/test_8200_aq_bulk_async.py diff --git a/tests/test_8300_aq_json.py b/tests/test_8300_aq_json.py new file mode 100644 index 00000000..de725bf5 --- /dev/null +++ b/tests/test_8300_aq_json.py @@ -0,0 +1,418 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +8300 - Module for testing AQ with JSON queues +""" + +import datetime +import decimal +import threading +import unittest + +import oracledb +import test_env + + +class TestCase(test_env.BaseTestCase): + json_queue_name = "TEST_JSON_QUEUE" + json_data = [ + [ + 2.75, + True, + "Ocean Beach", + b"Some bytes", + {"keyA": 1.0, "KeyB": "Melbourne"}, + datetime.datetime(2022, 8, 1, 0, 0), + ], + [ + True, + False, + "String", + b"Some Bytes", + {}, + {"name": None}, + {"name": "John"}, + {"age": 30}, + {"Permanent": True}, + { + "employee": { + "name": "John", + "age": 30, + "city": "Delhi", + "Parmanent": True, + } + }, + {"employees": ["John", "Matthew", "James"]}, + { + "employees": [ + {"employee1": {"name": "John", "city": "Delhi"}}, + {"employee2": {"name": "Matthew", "city": "Mumbai"}}, + {"employee3": {"name": "James", "city": "Bangalore"}}, + ] + }, + ], + [ + datetime.datetime.today(), + datetime.datetime(2004, 2, 1, 3, 4, 5), + datetime.datetime(2020, 12, 2, 13, 29, 14), + datetime.timedelta(8.5), + datetime.datetime(2002, 12, 13, 9, 36, 0), + oracledb.Timestamp(2002, 12, 13, 9, 36, 0), + datetime.datetime(2002, 12, 13), + ], + dict(name="John", age=30, city="New York"), + [ + 0, + 1, + 25.25, + 6088343244, + -9999999999999999999, + decimal.Decimal("0.25"), + decimal.Decimal("10.25"), + decimal.Decimal("319438950232418390.273596"), + ], + ] + + def __deq_in_thread(self, results): + with test_env.get_connection() as conn: + queue = conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = 10 + props = queue.deqone() + if props is not None: + results.append(props.payload) + conn.commit() + + def __verify_attr(self, obj, attrName, value): + setattr(obj, attrName, value) + self.assertEqual(getattr(obj, attrName), value) + + def test_8300(self): + "8300 - test dequeuing an empty JSON queue" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + self.assertIsNone(props) + + def test_8301(self): + "8301 - test enqueuing and dequeuing multiple JSON messages" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties() + for data in self.json_data: + props.payload = data + queue.enqone(props) + self.conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = queue.deqone() + if props is None: + break + results.append(props.payload) + self.conn.commit() + self.assertEqual(results, self.json_data) + + @unittest.skip("awaiting fix for bug 37746852") + def test_8302(self): + "8302 - test dequeuing with DEQ_REMOVE_NODATA option" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[1] + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = queue.deqone() + self.assertIsNotNone(props) + self.assertIsNone(props.payload) + + def test_8303(self): + "8303 - test getting/setting dequeue options attributes" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + options = queue.deqoptions + self.__verify_attr(options, "condition", "TEST_CONDITION") + self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") + self.__verify_attr(options, "correlation", "TEST_CORRELATION") + self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) + self.__verify_attr( + options, "navigation", oracledb.DEQ_NEXT_TRANSACTION + ) + self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + self.__verify_attr(options, "wait", 1287) + self.__verify_attr(options, "msgid", b"mID") + + def test_8304(self): + "8304 - test waiting for dequeue" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + results = [] + thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) + thread.start() + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + self.conn.commit() + thread.join() + self.assertEqual(results, [data]) + + def test_8305(self): + "8305 - test getting/setting enqueue options attributes" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + options = queue.enqoptions + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + def test_8306(self): + "8306 - test getting/setting message properties attributes" + props = self.conn.msgproperties() + self.__verify_attr(props, "correlation", "TEST_CORRELATION") + self.__verify_attr(props, "delay", 60) + self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + self.__verify_attr(props, "expiration", 30) + self.assertEqual(props.attempts, 0) + self.__verify_attr(props, "priority", 1) + self.assertEqual(props.state, oracledb.MSG_READY) + self.assertEqual(props.deliverymode, 0) + + def test_8307(self): + "8307 - test enqueue visibility options - ENQ_ON_COMMIT" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + self.assertIsNone(props) + self.conn.commit() + props = queue.deqone() + self.assertIsNotNone(props) + + def test_8308(self): + "8308 - test enqueue visibility option - ENQ_IMMEDIATE" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + data = props.payload + results = data + other_conn.commit() + self.assertEqual(results, self.json_data[0]) + + def test_8309(self): + "8309 - test enqueue/dequeue delivery modes identical - persistent" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + data = props.payload + results = data + other_conn.commit() + self.assertEqual(results, self.json_data[0]) + + def test_8310(self): + "8310 - test error for message with no payload" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties() + with self.assertRaisesFullCode("DPY-2000"): + queue.enqone(props) + + def test_8311(self): + "8311 - verify that the msgid property is returned correctly" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + self.assertIsNone(props.msgid) + queue.enqone(props) + self.cursor.execute("select msgid from JSON_QUEUE_TAB") + (actual_msgid,) = self.cursor.fetchone() + self.assertEqual(props.msgid, actual_msgid) + props = queue.deqone() + self.assertEqual(props.msgid, actual_msgid) + + def test_8312(self): + "8312 - test message props enqtime" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + self.cursor.execute("select current_timestamp from dual") + (start_date,) = self.cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + props = queue.deqone() + self.cursor.execute("select current_timestamp from dual") + (end_date,) = self.cursor.fetchone() + end_date = end_date.replace(microsecond=0) + self.assertTrue(start_date <= props.enqtime <= end_date) + + def test_8313(self): + "8313 - test message props declared attributes" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + values = dict( + payload=data, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = self.conn.msgproperties(**values) + for attr_name in values: + self.assertEqual(getattr(props, attr_name), values[attr_name]) + queue.enqone(props) + self.conn.commit() + prop = queue.deqone() + for attr_name in values: + self.assertEqual(getattr(prop, attr_name), values[attr_name]) + + def test_8314(self): + "8314 - test getting queue attributes" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.name, "TEST_JSON_QUEUE") + self.assertEqual(queue.connection, self.conn) + + def test_8315(self): + "8315 - test getting write-only attributes" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + for options in (queue.enqoptions, queue.deqoptions): + with self.assertRaises(AttributeError): + options.deliverymode + + def test_8316(self): + "8316 - test deqoption condition with priority" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + data = self.json_data[0] + props = self.conn.msgproperties(payload=data, priority=priority) + queue.enqone(props) + + queue.deqoptions.condition = "priority = 9" + results = [] + while True: + props = queue.deqone() + if props is None: + break + results.append(props.payload) + self.conn.commit() + self.assertEqual(len(results), 3) + + def test_8317(self): + "8317 - test deqoption correlation" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + data = self.json_data[0] + props = self.conn.msgproperties( + payload=data, correlation=correlation + ) + queue.enqone(props) + self.conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: + props = queue.deqone() + if props is None: + break + results.append(props.payload) + self.conn.commit() + self.assertEqual(len(results), 2) + + def test_8318(self): + "8318 - test deqoption msgid" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + queue.enqone(props) + queue.enqone(props) + self.conn.commit() + msgid = props.msgid + queue.enqone(props) + self.conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = queue.deqone() + self.conn.commit() + self.assertEqual(prop.msgid, msgid) + + def test_8319(self): + "8319 - test payload_type returns the correct value" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.payload_type, "JSON") + + def test_8320(self): + "8320 - test deprecated attributes (enqOptions, deqOptions)" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.enqOptions, queue.enqoptions) + self.assertEqual(queue.deqOptions, queue.deqoptions) + + def test_8321(self): + "8321 - test deprecated AQ methods (enqOne, deqOne)" + data = self.json_data[0] + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.enqOne(self.conn.msgproperties(payload=data)) + props = queue.deqOne() + self.assertEqual(props.payload, data) + + def test_8322(self): + "8322 - test wrong payload type" + queue = self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties(payload="A string") + with self.assertRaisesFullCode("DPY-2062"): + queue.enqone(props) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_8400_aq_dbobject_async.py b/tests/test_8400_aq_dbobject_async.py new file mode 100644 index 00000000..018c58d8 --- /dev/null +++ b/tests/test_8400_aq_dbobject_async.py @@ -0,0 +1,484 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +8400 - Module for testing AQ with DbObject payloads with asyncio. +""" + +import decimal +import unittest + +import oracledb +import test_env + + +@unittest.skipUnless( + test_env.get_is_thin(), "asyncio not supported in thick mode" +) +class TestCase(test_env.BaseAsyncTestCase): + book_type_name = "UDT_BOOK" + book_queue_name = "TEST_BOOK_QUEUE" + book_data = [ + ("Wings of Fire", "A.P.J. Abdul Kalam", decimal.Decimal("15.75")), + ("The Story of My Life", "Hellen Keller", decimal.Decimal("10.50")), + ("The Chronicles of Narnia", "C.S. Lewis", decimal.Decimal("25.25")), + ] + + def __verify_attr(self, obj, attrName, value): + setattr(obj, attrName, value) + self.assertEqual(getattr(obj, attrName), value) + + async def test_8400(self): + "8400 - test dequeuing an empty queue" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + + async def test_8401(self): + "8401 - test enqueuing and dequeuing multiple messages" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + props = self.conn.msgproperties() + for title, authors, price in self.book_data: + props.payload = book = queue.payload_type.newobject() + book.TITLE = title + book.AUTHORS = authors + book.PRICE = price + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + book = props.payload + row = (book.TITLE, book.AUTHORS, book.PRICE) + results.append(row) + await self.conn.commit() + self.assertEqual(results, self.book_data) + + async def test_8402(self): + "8402 - test dequeuing with DEQ_REMOVE_NODATA option" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[1] + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + self.assertIsNotNone(props) + self.assertIsNone(props.payload.TITLE) + + async def test_8403(self): + "8403 - test getting/setting dequeue options attributes" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + options = queue.deqoptions + self.__verify_attr(options, "condition", "TEST_CONDITION") + self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") + self.__verify_attr(options, "correlation", "TEST_CORRELATION") + self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) + self.__verify_attr( + options, "navigation", oracledb.DEQ_NEXT_TRANSACTION + ) + self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + self.__verify_attr(options, "wait", 1287) + self.__verify_attr(options, "msgid", b"mID") + + async def test_8404(self): + "8404 - test getting/setting enqueue options attributes" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + options = queue.enqoptions + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + async def test_8405(self): + "8405 - test errors for invalid values for enqueue" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + with self.assertRaises(TypeError): + await queue.enqone(book) + + async def test_8406(self): + "8406 - test getting/setting message properties attributes" + props = self.conn.msgproperties() + self.__verify_attr(props, "correlation", "TEST_CORRELATION") + self.__verify_attr(props, "delay", 60) + self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + self.__verify_attr(props, "expiration", 30) + self.assertEqual(props.attempts, 0) + self.__verify_attr(props, "priority", 1) + self.assertEqual(props.state, oracledb.MSG_READY) + self.assertEqual(props.deliverymode, 0) + + async def test_8407(self): + "8407 - test enqueue visibility option - ENQ_ON_COMMIT" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + await self.conn.commit() + props = await queue.deqone() + await other_conn.commit() + self.assertIsNotNone(props) + + async def test_8408(self): + "8408 - test enqueue visibility option - ENQ_IMMEDIATE" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + self.assertEqual(results, self.book_data[0]) + + async def test_8409(self): + "8409 - test enqueue/dequeue delivery modes identical - buffered" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + self.assertEqual(results, self.book_data[0]) + + async def test_8410(self): + "8410 - test enqueue/dequeue delivery modes identical - persistent" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + self.assertEqual(results, self.book_data[0]) + + async def test_8411(self): + "8411 - test enqueue/dequeue delivery modes the same" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + self.assertEqual(results, self.book_data[0]) + + async def test_8412(self): + "8412 - test enqueue/dequeue delivery modes different" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(self.book_type_name) + queue = other_conn.queue(self.book_queue_name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + + async def test_8413(self): + "8413 - test error for message with no payload" + books_type = await self.conn.gettype(self.book_type_name) + queue = self.conn.queue(self.book_queue_name, books_type) + props = self.conn.msgproperties() + with self.assertRaisesFullCode("DPY-2000"): + await queue.enqone(props) + + async def test_8414(self): + "8414 - verify that the msgid property is returned correctly" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + props = self.conn.msgproperties(payload=book) + self.assertIsNone(props.msgid) + await queue.enqone(props) + await self.cursor.execute("select msgid from book_queue_tab") + (actual_msgid,) = await self.cursor.fetchone() + self.assertEqual(props.msgid, actual_msgid) + props = await queue.deqone() + self.assertEqual(props.msgid, actual_msgid) + + async def test_8415(self): + "8415 - test message props enqtime" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + await self.cursor.execute("select current_timestamp from dual") + (start_date,) = await self.cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + props = await queue.deqone() + await self.cursor.execute("select current_timestamp from dual") + (end_date,) = await self.cursor.fetchone() + end_date = end_date.replace(microsecond=0) + self.assertTrue(start_date <= props.enqtime <= end_date) + + async def test_8416(self): + "8416 - test message props declared attributes" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + values = dict( + payload=book, + correlation="TEST_CORRELATION", + delay=7, + exceptionq="TEST_EXCEPTIONQ", + expiration=10, + priority=1, + ) + props = self.conn.msgproperties(**values) + for attr_name in values: + self.assertEqual(getattr(props, attr_name), values[attr_name]) + + async def test_8417(self): + "8417 - test error for invalid type for payload_type" + self.assertRaises( + TypeError, self.conn.queue, "THE QUEUE", payload_type=4 + ) + + async def test_8418(self): + "8418 - test getting queue attributes" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + self.assertEqual(queue.name, self.book_queue_name) + self.assertEqual(queue.connection, self.conn) + + async def test_8419(self): + "8419 - test getting write-only attributes" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + with self.assertRaises(AttributeError): + queue.enqoptions.deliverymode + with self.assertRaises(AttributeError): + queue.deqoptions.deliverymode + + async def test_8420(self): + "8420 - test correlation deqoption" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + correlations = ["Math", "Programming"] + num_messages = 3 + messages = [ + self.conn.msgproperties(payload=book, correlation=c) + for c in correlations + for i in range(num_messages) + ] + await queue.enqmany(messages) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = correlations[0] + correlated_messages = await queue.deqmany(num_messages + 1) + self.assertEqual(len(correlated_messages), num_messages) + + queue.deqoptions.correlation = correlations[1] + with self.assertRaisesFullCode("ORA-25241"): + await queue.deqone() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + correlated_messages = await queue.deqmany(num_messages + 1) + self.assertEqual(len(correlated_messages), num_messages) + + async def test_8421(self): + "8421 - test correlation deqoption with pattern-matching characters" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] + for correlation in ("PreCalculus-math1", "Calculus-Math2"): + props = self.conn.msgproperties( + payload=book, correlation=correlation + ) + await queue.enqone(props) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "%Calculus-%ath_" + messages = await queue.deqmany(5) + self.assertEqual(len(messages), 2) + + async def test_8422(self): + "8422 - test condition deqoption with priority" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + + priorities = [5, 10] + indexes = [0, 1] + for priority, ix in zip(priorities, indexes): + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = self.book_data[ix] + props = self.conn.msgproperties(payload=book, priority=priority) + await queue.enqone(props) + + queue.deqoptions.condition = "priority = 9" + messages = await queue.deqmany(3) + self.assertEqual(len(messages), 0) + + for priority, ix in zip(priorities, indexes): + queue.deqoptions.condition = f"priority = {priority}" + messages = await queue.deqmany(3) + self.assertEqual(len(messages), 1) + book = messages[0].payload + data = book.TITLE, book.AUTHORS, book.PRICE + self.assertEqual(data, self.book_data[ix]) + + async def test_8423(self): + "8423 - test mode deqoption with DEQ_REMOVE_NODATA" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + + book = queue.payload_type.newobject() + for data in self.book_data: + book.TITLE, book.AUTHORS, book.PRICE = data + props = self.conn.msgproperties(payload=book) + await queue.enqone(props) + + messages = await queue.deqmany(5) + self.assertEqual(len(messages), 3) + for message in messages: + self.assertIsNone(message.payload.TITLE) + self.assertIsNone(message.payload.AUTHORS) + self.assertIsNone(message.payload.PRICE) + + async def test_8424(self): + "8424 - test payload_type returns the correct value" + books_type = await self.conn.gettype(self.book_type_name) + queue = self.conn.queue(self.book_queue_name, books_type) + self.assertEqual(queue.payload_type, books_type) + + async def test_8425(self): + "8425 - test enqueuing to an object queue with the wrong payload" + queue = await self.get_and_clear_queue( + self.book_queue_name, self.book_type_name + ) + props = self.conn.msgproperties(payload="A string") + with self.assertRaisesFullCode("DPY-2062"): + await queue.enqone(props) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_8500_aq_json_async.py b/tests/test_8500_aq_json_async.py new file mode 100644 index 00000000..9375d4bb --- /dev/null +++ b/tests/test_8500_aq_json_async.py @@ -0,0 +1,412 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +8500 - Module for testing AQ with JSON queues with asyncio +""" + +import asyncio +import datetime +import decimal +import unittest + +import oracledb +import test_env + + +@unittest.skipUnless( + test_env.get_is_thin(), "asyncio not supported in thick mode" +) +class TestCase(test_env.BaseAsyncTestCase): + json_queue_name = "TEST_JSON_QUEUE" + json_data = [ + [ + 2.75, + True, + "Ocean Beach", + b"Some bytes", + {"keyA": 1.0, "KeyB": "Melbourne"}, + datetime.datetime(2022, 8, 1, 0, 0), + ], + [ + True, + False, + "String", + b"Some Bytes", + {}, + {"name": None}, + {"name": "John"}, + {"age": 30}, + {"Permanent": True}, + { + "employee": { + "name": "John", + "age": 30, + "city": "Delhi", + "Parmanent": True, + } + }, + {"employees": ["John", "Matthew", "James"]}, + { + "employees": [ + {"employee1": {"name": "John", "city": "Delhi"}}, + {"employee2": {"name": "Matthew", "city": "Mumbai"}}, + {"employee3": {"name": "James", "city": "Bangalore"}}, + ] + }, + ], + [ + datetime.datetime.today(), + datetime.datetime(2004, 2, 1, 3, 4, 5), + datetime.datetime(2020, 12, 2, 13, 29, 14), + datetime.timedelta(8.5), + datetime.datetime(2002, 12, 13, 9, 36, 0), + oracledb.Timestamp(2002, 12, 13, 9, 36, 0), + datetime.datetime(2002, 12, 13), + ], + dict(name="John", age=30, city="New York"), + [ + 0, + 1, + 25.25, + 6088343244, + -9999999999999999999, + decimal.Decimal("0.25"), + decimal.Decimal("10.25"), + decimal.Decimal("319438950232418390.273596"), + ], + ] + + async def __deq_in_task(self, results): + async with test_env.get_connection_async() as conn: + queue = conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = 10 + props = await queue.deqone() + if props is not None: + results.append(props.payload) + await conn.commit() + + def __verify_attr(self, obj, attrName, value): + setattr(obj, attrName, value) + self.assertEqual(getattr(obj, attrName), value) + + async def test_8500(self): + "8500 - test dequeuing an empty JSON queue" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + + async def test_8501(self): + "8501 - test enqueuing and dequeuing multiple JSON messages" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties() + for data in self.json_data: + props.payload = data + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await self.conn.commit() + self.assertEqual(results, self.json_data) + + @unittest.skip("awaiting fix for bug 37746852") + async def test_8502(self): + "8502 - test dequeuing with DEQ_REMOVE_NODATA option" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[1] + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + self.assertIsNotNone(props) + self.assertIsNone(props.payload) + + async def test_8503(self): + "8503 - test getting/setting dequeue options attributes" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + options = queue.deqoptions + self.__verify_attr(options, "condition", "TEST_CONDITION") + self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") + self.__verify_attr(options, "correlation", "TEST_CORRELATION") + self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) + self.__verify_attr( + options, "navigation", oracledb.DEQ_NEXT_TRANSACTION + ) + self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + self.__verify_attr(options, "wait", 1287) + self.__verify_attr(options, "msgid", b"mID") + + async def test_8504(self): + "8504 - test waiting for dequeue" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + results = [] + task = asyncio.create_task(self.__deq_in_task(results)) + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + await self.conn.commit() + await task + self.assertEqual(results, [data]) + + async def test_8505(self): + "8505 - test getting/setting enqueue options attributes" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + options = queue.enqoptions + self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + async def test_8506(self): + "8506 - test getting/setting message properties attributes" + props = self.conn.msgproperties() + self.__verify_attr(props, "correlation", "TEST_CORRELATION") + self.__verify_attr(props, "delay", 60) + self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + self.__verify_attr(props, "expiration", 30) + self.assertEqual(props.attempts, 0) + self.__verify_attr(props, "priority", 1) + self.assertEqual(props.state, oracledb.MSG_READY) + self.assertEqual(props.deliverymode, 0) + + async def test_8507(self): + "8507 - test enqueue visibility options - ENQ_ON_COMMIT" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + self.assertIsNone(props) + await self.conn.commit() + props = await queue.deqone() + self.assertIsNotNone(props) + + async def test_8508(self): + "8508 - test enqueue visibility option - ENQ_IMMEDIATE" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + data = props.payload + results = data + await other_conn.commit() + self.assertEqual(results, self.json_data[0]) + + async def test_8509(self): + "8509 - test enqueue/dequeue delivery modes identical - persistent" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(self.json_queue_name, "JSON") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + data = props.payload + results = data + await other_conn.commit() + self.assertEqual(results, self.json_data[0]) + + async def test_8510(self): + "8510 - test error for message with no payload" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties() + with self.assertRaisesFullCode("DPY-2000"): + await queue.enqone(props) + + async def test_8511(self): + "8511 - verify that the msgid property is returned correctly" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + self.assertIsNone(props.msgid) + await queue.enqone(props) + await self.cursor.execute("select msgid from JSON_QUEUE_TAB") + (actual_msgid,) = await self.cursor.fetchone() + self.assertEqual(props.msgid, actual_msgid) + props = await queue.deqone() + self.assertEqual(props.msgid, actual_msgid) + + async def test_8512(self): + "8512 - test message props enqtime" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + await self.cursor.execute("select current_timestamp from dual") + (start_date,) = await self.cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + props = await queue.deqone() + await self.cursor.execute("select current_timestamp from dual") + (end_date,) = await self.cursor.fetchone() + end_date = end_date.replace(microsecond=0) + self.assertTrue(start_date <= props.enqtime <= end_date) + + async def test_8513(self): + "8513 - test message props declared attributes" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + values = dict( + payload=data, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = self.conn.msgproperties(**values) + for attr_name in values: + self.assertEqual(getattr(props, attr_name), values[attr_name]) + await queue.enqone(props) + await self.conn.commit() + prop = await queue.deqone() + for attr_name in values: + self.assertEqual(getattr(prop, attr_name), values[attr_name]) + + async def test_8514(self): + "8514 - test getting queue attributes" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.name, "TEST_JSON_QUEUE") + self.assertEqual(queue.connection, self.conn) + + async def test_8515(self): + "8515 - test getting write-only attributes" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + for options in (queue.enqoptions, queue.deqoptions): + with self.assertRaises(AttributeError): + options.deliverymode + + async def test_8516(self): + "8516 - test deqoption condition with priority" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + data = self.json_data[0] + props = self.conn.msgproperties(payload=data, priority=priority) + await queue.enqone(props) + + queue.deqoptions.condition = "priority = 9" + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await self.conn.commit() + self.assertEqual(len(results), 3) + + async def test_8517(self): + "8517 - test deqoption correlation" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + data = self.json_data[0] + props = self.conn.msgproperties( + payload=data, correlation=correlation + ) + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await self.conn.commit() + self.assertEqual(len(results), 2) + + async def test_8518(self): + "8518 - test deqoption msgid" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + data = self.json_data[0] + props = self.conn.msgproperties(payload=data) + await queue.enqone(props) + await queue.enqone(props) + await self.conn.commit() + msgid = props.msgid + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = await queue.deqone() + await self.conn.commit() + self.assertEqual(prop.msgid, msgid) + + async def test_8519(self): + "8519 - test payload_type returns the correct value" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.payload_type, "JSON") + + async def test_8520(self): + "8520 - test deprecated attributes (enqOptions, deqOptions)" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + self.assertEqual(queue.enqOptions, queue.enqoptions) + self.assertEqual(queue.deqOptions, queue.deqoptions) + + async def test_8521(self): + "8521 - test wrong payload type" + queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + props = self.conn.msgproperties(payload="A string") + with self.assertRaisesFullCode("DPY-2062"): + await queue.enqone(props) + + +if __name__ == "__main__": + test_env.run_test_cases() From 43358dd493acad34df1a8e774239ce555317af7a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 25 Mar 2025 11:40:32 -0600 Subject: [PATCH 045/239] Remove Python 3.8 from builds. --- .github/workflows/build.yaml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 07ee2888..3628a3ca 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -33,7 +33,6 @@ jobs: - name: Generate script run: | echo export PYO_COMPILE_ARGS=-g0 > build.sh - echo /opt/python/cp38-cp38/bin/python3.8 -m build >> build.sh echo /opt/python/cp39-cp39/bin/python3.9 -m build >> build.sh echo /opt/python/cp310-cp310/bin/python3.10 -m build >> build.sh echo /opt/python/cp311-cp311/bin/python3.11 -m build >> build.sh @@ -66,7 +65,7 @@ jobs: strategy: matrix: os: [windows-latest, macos-latest] - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] arch: ['x86', ''] exclude: - os: macos-latest From 2303b9dda787a5729131578d46ab6868c7f3b92f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:07:52 -0600 Subject: [PATCH 046/239] Test suite improvements. --- tests/test_1100_connection.py | 5 ++++- tests/test_2200_number_var.py | 4 ++++ tests/test_2400_pool.py | 14 +++++++++++++- tests/test_2500_string_var.py | 3 +++ tests/test_8100_dataframe_async.py | 28 ++++++++++++++++++++++++++++ 5 files changed, 52 insertions(+), 2 deletions(-) diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index 1fff849e..89bec886 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -699,6 +699,9 @@ def test_1135(self): (instance_name,) = cursor.fetchone() self.assertEqual(conn.instance_name.upper(), instance_name) + @unittest.skipIf( + test_env.get_client_version() < (18, 1), "not supported on this client" + ) def test_1136(self): "1136 - test deprecated attributes" conn = test_env.get_connection() @@ -757,7 +760,7 @@ def test_1140(self): "select sys_context('userenv', 'service_name') from dual" ) (service_name,) = cursor.fetchone() - self.assertEqual(conn.service_name, service_name) + self.assertEqual(conn.service_name.upper(), service_name.upper()) def test_1141(self): "1141 - test transaction_in_progress" diff --git a/tests/test_2200_number_var.py b/tests/test_2200_number_var.py index 4a077713..173ca90d 100644 --- a/tests/test_2200_number_var.py +++ b/tests/test_2200_number_var.py @@ -27,6 +27,7 @@ """ import decimal +import unittest import oracledb import test_env @@ -73,6 +74,9 @@ def setUp(self): self.raw_data.append(data_tuple) self.data_by_key[i] = data_tuple + @unittest.skipIf( + test_env.get_client_version() < (12, 1), "not supported on this client" + ) def test_2200(self): "2200 - test binding in a boolean" result = self.cursor.callfunc( diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index 952ed446..08813d7b 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -167,6 +167,9 @@ def __verify_create_arg(self, arg_name, arg_value, sql): self.assertEqual(fetched_value, arg_value) pool.close() + @unittest.skipIf( + test_env.get_client_version() < (19, 1), "not supported on this client" + ) def test_2400(self): "2400 - test getting default pool parameters" pool = test_env.get_pool() @@ -231,6 +234,9 @@ def test_2401(self): self.assertEqual(user, test_env.get_proxy_user().upper()) conn.close() + @unittest.skipIf( + test_env.get_client_version() < (19, 1), "not supported on this client" + ) def test_2402(self): "2402 - test setting pool attributes" pool = test_env.get_pool() @@ -670,6 +676,9 @@ def test_2421(self): with self.assertRaisesFullCode("DPY-1002"): pool.acquire() + @unittest.skipIf( + test_env.get_client_version() < (19, 1), "not supported on this client" + ) def test_2422(self): "2422 - using the pool beyond max limit raises an error" pool = test_env.get_pool( @@ -857,7 +866,7 @@ def test_2437(self): test_env.get_server_version() < (12, 2), "not supported on this server" ) @unittest.skipIf( - test_env.get_client_version() < (12, 2), "not supported on this client" + test_env.get_client_version() < (19, 1), "not supported on this client" ) def test_2438(self): "2438 - ensure that timed wait times out with appropriate exception" @@ -867,6 +876,9 @@ def test_2438(self): with self.assertRaisesFullCode("DPY-4005"): pool.acquire() + @unittest.skipIf( + test_env.get_client_version() < (18, 1), "not supported on this client" + ) def test_2439(self): "2439 - ensure call timeout is reset on connections returned by pool" pool = test_env.get_pool(ping_timeout=1000, ping_interval=0) diff --git a/tests/test_2500_string_var.py b/tests/test_2500_string_var.py index cd4e7212..5b7cce4c 100644 --- a/tests/test_2500_string_var.py +++ b/tests/test_2500_string_var.py @@ -424,6 +424,9 @@ def test_2527(self): self.cursor.fetchall(), [(1, short_string), (2, long_string)] ) + @unittest.skipIf( + test_env.get_server_version() < (12, 2), "not supported on this server" + ) def test_2528(self): "2528 - test issue 50 - avoid error ORA-24816" cursor = self.conn.cursor() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index f10fb85a..8d29bc72 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -508,6 +508,34 @@ async def test_8119(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + async def test_8120(self): + "8120 - fetch clob" + data = [("test_8023",)] + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + "select to_clob('test_8023') from dual" + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + + async def test_8121(self): + "8121 - fetch blob" + data = [(b"test_8024",)] + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() From 6dcf2b4055fda69e1666454d360a41e83eabe5d5 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:08:15 -0600 Subject: [PATCH 047/239] Get the logical transaction id before the connection is closed. --- samples/transaction_guard.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/samples/transaction_guard.py b/samples/transaction_guard.py index a9f8fdf5..5a9c286a 100644 --- a/samples/transaction_guard.py +++ b/samples/transaction_guard.py @@ -93,6 +93,9 @@ input("Press ENTER when complete.") +ltxid = connection.ltxid +if not ltxid: + sys.exit("Logical transaction not available. Terminating.") try: connection.commit() # this should fail sys.exit("Session was not killed. Sample cannot continue.") @@ -101,9 +104,6 @@ print("Session is recoverable:", error_obj.isrecoverable) if not error_obj.isrecoverable: sys.exit("Session is not recoverable. Terminating.") -ltxid = connection.ltxid -if not ltxid: - sys.exit("Logical transaction not available. Terminating.") pool.drop(connection) # check if previous transaction completed From 1920c4220f56b33c761c499c046dbaaf93f253be Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:08:47 -0600 Subject: [PATCH 048/239] Correct support for booleans in data frames. --- doc/src/release_notes.rst | 1 + src/oracledb/impl/base/converters.pyx | 2 ++ src/oracledb/interchange/nanoarrow_bridge.pyx | 6 ++++ tests/test_8000_dataframe.py | 31 +++++++++++++++++++ tests/test_8100_dataframe_async.py | 27 ++++++++++++++++ 5 files changed, 67 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 26e02ecf..b413fe12 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -61,6 +61,7 @@ Common Changes and :meth:`Connection.fetch_df_batches()`: - Added support for CLOB, BLOB and RAW data types + - Fixed support for BOOLEAN data type - Fixed bug when NUMBER data is fetched that does not have a precision or scale specified and :attr:`defaults.fetch_decimals` is set to *True*. - More efficient processing when a significant amount of data is duplicated diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 7999f227..3614a2b1 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -232,6 +232,8 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, arrow_array.append_double(data.buffer.as_double) elif arrow_type == NANOARROW_TYPE_FLOAT: arrow_array.append_float(data.buffer.as_float) + elif arrow_type == NANOARROW_TYPE_BOOL: + arrow_array.append_int64(data.buffer.as_bool) elif arrow_type in ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_STRING, diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index b0eabafd..dd931d8c 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -89,6 +89,7 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowArrayViewSetArray(ArrowArrayView* array_view, const ArrowArray* array, ArrowError* error) + int8_t ArrowBitGet(const uint8_t* bits, int64_t i) void ArrowSchemaInit(ArrowSchema* schema) ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) ArrowErrorCode ArrowSchemaSetTypeDateTime(ArrowSchema* schema, @@ -277,6 +278,7 @@ cdef class OracleArrowArray: int32_t *as_int32 double *as_double float *as_float + int8_t as_bool int64_t index uint8_t *ptr void* temp @@ -295,6 +297,10 @@ cdef class OracleArrowArray: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_float = data_buffer.data self.append_double(as_float[index]) + elif array.arrow_type == NANOARROW_TYPE_BOOL: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_bool = ArrowBitGet(data_buffer.data, index) + self.append_int64(as_bool) elif array.arrow_type == NANOARROW_TYPE_DECIMAL128: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) ArrowDecimalInit(&decimal, 128, self.precision, self.scale) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 06c91e3e..d287b249 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -27,6 +27,7 @@ """ import datetime import decimal +import unittest import oracledb @@ -580,6 +581,36 @@ def test_8025(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + @unittest.skipUnless( + test_env.get_client_version() >= (23, 1), "unsupported client" + ) + @unittest.skipUnless( + test_env.get_server_version() >= (23, 1), "unsupported server" + ) + def test_8026(self): + "8026 - fetch boolean" + data = [(True,), (False,), (False,), (True,), (True,)] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + select true + union all + select false + union all + select false + union all + select true + union all + select true + """ + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 8d29bc72..5cebcbd0 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -536,6 +536,33 @@ async def test_8121(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + @unittest.skipUnless( + test_env.get_server_version() >= (23, 1), "unsupported server" + ) + async def test_8122(self): + "8122 - fetch boolean" + data = [(True,), (False,), (False,), (True,), (True,)] + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + """ + select true + union all + select false + union all + select false + union all + select true + union all + select true + """ + ) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() From 74a4edf36684ecd57e912fdce4ea3d5d39e8e904 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:09:44 -0600 Subject: [PATCH 049/239] Doc improvements. --- doc/src/api_manual/async_connection_pool.rst | 2 - doc/src/api_manual/connection.rst | 8 +- doc/src/api_manual/connection_pool.rst | 2 +- doc/src/api_manual/defaults.rst | 23 ++- doc/src/api_manual/deprecations.rst | 8 +- doc/src/api_manual/module.rst | 178 ++++++++----------- doc/src/user_guide/appendix_a.rst | 6 +- doc/src/user_guide/appendix_b.rst | 26 +-- doc/src/user_guide/appendix_c.rst | 13 +- doc/src/user_guide/bind.rst | 6 +- doc/src/user_guide/initialization.rst | 47 +++-- doc/src/user_guide/installation.rst | 22 +-- 12 files changed, 156 insertions(+), 185 deletions(-) diff --git a/doc/src/api_manual/async_connection_pool.rst b/doc/src/api_manual/async_connection_pool.rst index c1e74d77..ad3effad 100644 --- a/doc/src/api_manual/async_connection_pool.rst +++ b/doc/src/api_manual/async_connection_pool.rst @@ -163,8 +163,6 @@ AsyncConnectionPool Attributes forces a ping for every :meth:`AsyncConnectionPool.acquire()` and is not recommended. - Prior to cx_Oracle 8.2, the ping interval was fixed at *60* seconds. - .. attribute:: AsyncConnectionPool.soda_metadata_cache This read-write boolean attribute returns whether the SODA metadata cache diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 4e20dbe4..057e7755 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -219,6 +219,8 @@ Connection Methods This function performs a local check. To fully check a connection's health, use :meth:`Connection.ping()` which performs a round-trip to the database. + .. dbapimethodextension:: + .. method:: Connection.msgproperties(payload, correlation, delay, exceptionq, expiration, priority) Returns an object specifying the properties of messages used in advanced @@ -231,7 +233,9 @@ Connection Methods .. method:: Connection.ping() - Pings the database to verify if the connection is valid. + Pings the database to verify if the connection is valid. An exception is + thrown if it is not, in which case the connection should not be used by the + application and a new connection should be established instead. This function performs a :ref:`round-trip ` to the database, so it should not be used unnecessarily. @@ -239,6 +243,8 @@ Connection Methods Note connection pools will perform the same health check automatically, based on configuration settings. See :ref:`poolhealth`. + Also, see :meth:`Connection.is_healthy()` for a lightweight alternative. + .. dbapimethodextension:: .. method:: Connection.prepare() diff --git a/doc/src/api_manual/connection_pool.rst b/doc/src/api_manual/connection_pool.rst index 83bcdd4c..2de1ad9b 100644 --- a/doc/src/api_manual/connection_pool.rst +++ b/doc/src/api_manual/connection_pool.rst @@ -18,7 +18,7 @@ In python-oracledb, the type `pool` will show the class `oracledb.ConnectionPool This only affects the name. The following code will continue to work providing backward compatibility with -cx_Oracle: +the obsolete cx_Oracle driver: .. code-block:: python diff --git a/doc/src/api_manual/defaults.rst b/doc/src/api_manual/defaults.rst index 03464fb5..4174e881 100644 --- a/doc/src/api_manual/defaults.rst +++ b/doc/src/api_manual/defaults.rst @@ -83,12 +83,11 @@ Defaults Attributes This can help avoid issues with converting numbers from Oracle Database's decimal format to Python's binary format. - An output type handler such as previously required in cx_Oracle (see - `return_numbers_as_decimals.py `__) can alternatively be - used to adjust the returned type. If a type handler exists and returns a - variable (that is, ``cursor.var(...)``), then that return variable is used. - If the type handler returns *None*, then the value of + An output type handler such as previously required in the obsolete + cx_Oracle driver can alternatively be used to adjust the returned type. If + a type handler exists and returns a variable (that is, + ``cursor.var(...)``), then that return variable is used. If the type + handler returns *None*, then the value of ``oracledb.defaults.fetch_decimals`` is used to determine whether to return ``decimal.Decimal`` values. @@ -102,13 +101,11 @@ Defaults Attributes LOBs are larger than 1 GB, then this attribute should be set to *True* and the LOBs should be streamed. See :ref:`lobdata`. - An output type handler such as the one previously required in cx_Oracle - (see `return_lobs_as_strings.py `__) can - alternatively be used to adjust the returned type. If a type handler - exists and returns a variable (that is, `cursor.var(...)`), then that - return variable is used. If the type handler returns *None*, then the value - of ``oracledb.defaults.fetch_lobs`` is used. + An output type handler such as the one previously required in the obsolete + cx_Oracle driver can alternatively be used to adjust the returned type. If + a type handler exists and returns a variable (that is, `cursor.var(...)`), + then that return variable is used. If the type handler returns *None*, then + the value of ``oracledb.defaults.fetch_lobs`` is used. The value of ``oracledb.defaults.fetch_lobs`` does not affect LOBs returned as OUT binds. diff --git a/doc/src/api_manual/deprecations.rst b/doc/src/api_manual/deprecations.rst index bdaa44d0..92bb3dc3 100644 --- a/doc/src/api_manual/deprecations.rst +++ b/doc/src/api_manual/deprecations.rst @@ -11,8 +11,9 @@ and is no longer available in python-oracledb. The most recent deprecated and desupported features are listed first. The previous cx_Oracle deprecation announcements remain in force for -python-oracledb. The relevant functionality may be removed in a future version -of python-oracledb. +python-oracledb. The relevant functionality may be removed in a future version +of python-oracledb. The cx_Oracle driver itself is obsolete and should not be +used for new development. .. list-table-with-summary:: Deprecated in python-oracledb 3.0 :header-rows: 1 @@ -23,7 +24,8 @@ of python-oracledb. * - Name - Comments * - Parameter ``pool`` of :meth:`oracledb.connect()` and :meth:`oracledb.connect_async()` - - Use :meth:`ConnectionPool.acquire()` instead + - Use :meth:`ConnectionPool.acquire()`, or make use of the + :ref:`connection pool cache ` instead .. list-table-with-summary:: Desupported in python-oracledb 2.0 :header-rows: 1 diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 687cc2d1..8fb8dbda 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -22,14 +22,9 @@ Oracledb Methods values are the major version, minor version, update number, patch number, and port update number. - .. note:: - - This function can only be called when python-oracledb is in Thick - mode. See :ref:`enablingthick`. - - If ``clientversion()`` is called when in python-oracledb Thin mode, that - is, if :func:`oracledb.init_oracle_client()` is not called first, then an - exception will be thrown. + This function can only be called when python-oracledb is in Thick + mode. Using it in Thin mode will throw an exception. See + :ref:`enablingthick`. .. dbapimethodextension:: @@ -2847,8 +2842,8 @@ Oracledb Methods .. note:: - The time only data type is not supported by Oracle. Calling this - function will raise a NotSupportedError exception. + A time-only data type is not supported by Oracle Database. Calling this + function raises a NotSupportedError exception. .. function:: TimeFromTicks(ticks) @@ -2859,8 +2854,8 @@ Oracledb Methods .. note:: - The time only data type is not supported by Oracle. Calling this - function will raise a NotSupportedError exception. + A time-only data type is not supported by Oracle Database. Calling this + function raises a NotSupportedError exception. .. function:: Timestamp(year, month, day, hour, minute, second) @@ -3213,7 +3208,8 @@ The Connection Authorization mode constants belong to the enumeration called ``AuthMode``. They are possible values for the ``mode`` parameters of :meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and :meth:`oracledb.create_pool_async()`. -These constants have deprecated the authorization modes used in cx_Oracle 8.3. +These constants have deprecated the authorization modes used in the obsolete +cx_Oracle driver. .. dbapiconstantextension:: @@ -3227,145 +3223,119 @@ These constants have deprecated the authorization modes used in cx_Oracle 8.3. This constant is used to specify that default authentication is to take place. This is the default value if no mode is passed at all. - This enumerated value can also be identified by - ``oracledb.AuthMode.DEFAULT``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.DEFAULT``. - This constant deprecates the ``DEFAULT_AUTH`` constant that was used in - cx_Oracle 8.3, and was the default ``mode`` value. + This constant deprecates the ``DEFAULT_AUTH`` constant that was used in the + obsolete cx_Oracle driver, and was the default ``mode`` value. .. data:: AUTH_MODE_PRELIM This constant is used to specify that preliminary authentication is to be used. This is needed for performing database startup and shutdown. - This enumerated value can also be identified by - ``oracledb.AuthMode.PRELIM``. - - .. note:: + It can only be used in python-oracledb Thick mode for standalone + connections. - This constant can only be used in the python-oracledb Thick mode for - standalone connections. + Its enumerated value can also be identified by + ``oracledb.AuthMode.PRELIM``. - This constant deprecates the ``PRELIM_AUTH`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``PRELIM_AUTH`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSASM This constant is used to specify that SYSASM access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSASM``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSASM``. - This constant deprecates the ``SYSASM`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSASM`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSBKP This constant is used to specify that SYSBACKUP access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSBKP``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSBKP``. - This constant deprecates the ``SYSBKP`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSBKP`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSDBA This constant is used to specify that SYSDBA access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSDBA``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSDBA``. - This constant deprecates the ``SYSDBA`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSDBA`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSDGD This constant is used to specify that SYSDG access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSDGD``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSDGD``. - This constant deprecates the ``SYSDGD`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSDGD`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSKMT This constant is used to specify that SYSKM access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSKMT``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSKMT``. - This constant deprecates the ``SYSKMT`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSKMT`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSOPER This constant is used to specify that SYSOPER access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSOPER``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSOPER``. - This constant deprecates the ``SYSOPER`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSOPER`` constant that was used in the + obsolete cx_Oracle driver. .. data:: AUTH_MODE_SYSRAC This constant is used to specify that SYSRAC access is to be acquired. - This enumerated value can also be identified by - ``oracledb.AuthMode.SYSRAC``. - - .. note:: + It can be used for standalone and pooled connections in python-oracledb + Thin mode, and for standalone connections in Thick mode. - This constant can be used for standalone and pooled connections in the - python-oracledb Thin mode, and for standalone connections in the Thick - mode. + Its enumerated value can also be identified by + ``oracledb.AuthMode.SYSRAC``. - This constant deprecates the ``SYSRAC`` constant that was used in - cx_Oracle 8.3. + This constant deprecates the ``SYSRAC`` constant that was used in the + obsolete cx_Oracle driver. .. _pipeline-operation-types: @@ -3610,7 +3580,8 @@ The Connection Pool Get mode constants belong to the enumeration called ``PoolGetMode``. They are possible values for the ``getmode`` parameters of :meth:`oracledb.create_pool()`, :meth:`oracledb.create_pool_async()`, :meth:`PoolParams.set()`, and for related attributes. These constants have -deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. +deprecated the Session Pool mode constants that were used in the obsolete +cx_Oracle driver. .. dbapiconstantextension:: @@ -3635,7 +3606,7 @@ deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. ``oracledb.PoolGetMode.FORCEGET``. This constant deprecates the ``SPOOL_ATTRVAL_FORCEGET`` constant that was - used in cx_Oracle 8.3. + used in the obsolete cx_Oracle driver. .. data:: POOL_GETMODE_NOWAIT @@ -3650,7 +3621,8 @@ deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. ``oracledb.PoolGetMode.NOWAIT``. This constant deprecates the ``SPOOL_ATTRVAL_NOWAIT`` constant that was - used in cx_Oracle 8.3, and was the default ``getmode`` value. + used in the obsolete cx_Oracle driver, and was the default ``getmode`` + value. .. data:: POOL_GETMODE_WAIT @@ -3662,8 +3634,8 @@ deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. This enumerated value can also be identified by ``oracledb.PoolGetMode.WAIT``. - This constant deprecates the ``SPOOL_ATTRVAL_WAIT`` constant that was - used in cx_Oracle 8.3. + This constant deprecates the ``SPOOL_ATTRVAL_WAIT`` constant that was used + in the obsolete cx_Oracle driver. .. data:: POOL_GETMODE_TIMEDWAIT @@ -3677,7 +3649,7 @@ deprecated the Session Pool mode constants that were used in cx_Oracle 8.3. ``oracledb.PoolGetMode.TIMEDWAIT``. This constant deprecates the ``SPOOL_ATTRVAL_TIMEDWAIT`` constant that was - used in cx_Oracle 8.3. + used in the obsolete cx_Oracle driver. .. _drcppurityconsts: @@ -3689,7 +3661,7 @@ The Connection Pool Purity constants belong to the enumeration called of :meth:`oracledb.create_pool()`, :meth:`ConnectionPool.acquire()`, :meth:`oracledb.connect()`, :meth:`oracledb.create_pool_async()`, and :meth:`oracledb.connect_async()`. These constants have deprecated the Session -Pool purity constants that were used in cx_Oracle 8.3. +Pool purity constants that were used in the obsolete cx_Oracle driver. .. dbapiconstantextension:: @@ -3708,7 +3680,7 @@ Pool purity constants that were used in cx_Oracle 8.3. ``oracledb.Purity.DEFAULT``. This constant deprecates the ``ATTR_PURITY_DEFAULT`` constant that was used - in cx_Oracle 8.3, and was the default ``purity`` value. + in the obsolete cx_Oracle driver, and was the default ``purity`` value. .. data:: PURITY_NEW @@ -3718,7 +3690,7 @@ Pool purity constants that were used in cx_Oracle 8.3. This enumerated value can also be identified by ``oracledb.Purity.NEW``. This constant deprecates the ``ATTR_PURITY_NEW`` constant that was used in - cx_Oracle 8.3. + the obsolete cx_Oracle driver. .. data:: PURITY_SELF @@ -3729,7 +3701,7 @@ Pool purity constants that were used in cx_Oracle 8.3. This enumerated value can also be identified by ``oracledb.Purity.SELF``. This constant deprecates the ``ATTR_PURITY_SELF`` constant that was used in - cx_Oracle 8.3. + the obsolete cx_Oracle driver. Subscription Grouping Classes ----------------------------- diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 6de5bb7f..5560ff54 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -14,14 +14,14 @@ Database API v2.0 Specification `PEP 249 enable Thick mode. The following table summarizes the Oracle Database features supported by -python-oracledb Thin and Thick modes, and by cx_Oracle 8.3. For more details -see :ref:`driverdiff` and :ref:`compatibility`. +python-oracledb Thin and Thick modes, and by the obsolete cx_Oracle driver. +For more details see :ref:`driverdiff` and :ref:`compatibility`. .. list-table-with-summary:: Features Supported by python-oracledb and cx_Oracle 8.3 :header-rows: 1 :class: wy-table-responsive :align: center - :summary: The first column displays the Oracle feature. The second column indicates whether the feature is supported in the python-oracledb Thin mode. The third column indicates whether the feature is supported in the python-oracledb Thick mode. The fourth column indicates if the feature is supported in cx_Oracle 8.3. + :summary: The first column displays the Oracle feature. The second column indicates whether the feature is supported in python-oracledb Thin mode. The third column indicates whether the feature is supported in python-oracledb Thick mode. The fourth column indicates if the feature is supported in the obsolete cx_Oracle driver. * - Oracle Feature - python-oracledb Thin Mode diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index c1e2ae51..98748205 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -42,7 +42,8 @@ mode. However, only one of these modes can be used in each Python process: :func:`oracledb.connect()` and :func:`oracledb.create_pool()` are keyword and not positional. This makes the python-oracledb driver compliant with the Python Database API specification PEP 249. The old positional usage - possible in cx_Oracle will cause an error, see :ref:`connerrors`. + possible in the obsolete cx_Oracle driver will cause an error, see + :ref:`connerrors`. Connections to a Local Database ------------------------------- @@ -159,13 +160,13 @@ differs from the python-oracledb Thick mode in the following ways: established. Note that this behavior may also be true of recent versions of the Oracle Call Interface (OCI) Session Pool used in the Thin mode. - If the new ``getmode`` default value of :data:`~oracledb.POOL_GETMODE_WAIT` is - used, then this behavior will not be an issue. With this new default value, any - immediate :meth:`ConnectionPool.acquire()` calls will wait for the connections - to be created by the daemon thread. This improves the application start up time - when compared to the python-oracledb Thick mode and cx_Oracle 8.3 driver, where - :func:`oracledb.create_pool()` will not return control to the application until - all ``pool.min`` connections have been created. + If the new ``getmode`` default value of :data:`~oracledb.POOL_GETMODE_WAIT` + is used, then this behavior will not be an issue. With this new default + value, any immediate :meth:`ConnectionPool.acquire()` calls will wait for the + connections to be created by the daemon thread. This improves application + start up time when compared to the python-oracledb Thick mode, where + :func:`oracledb.create_pool()` will not return control to the application + until all ``pool.min`` connections have been created. If the old default value ``POOL_GETMODE_NOWAIT`` is required, then the application could check if :attr:`ConnectionPool.opened` has reached :attr:`ConnectionPool.min` @@ -176,11 +177,10 @@ differs from the python-oracledb Thick mode in the following ways: current SQL executions to complete before closing the connections. All of the connections are then dropped from the pool and the pool is closed. Closing the pool in python-oracledb Thick mode could stop responding indefinitely, - depending on the network and Oracle Net Services timeout parameters. This is - also applicable to the cx_Oracle 8.3 driver. In python-oracledb Thin mode, - the parameter ``force=True`` disconnects each connection's socket using a - background thread, leaving the database to clean up its end of the - connections. + depending on the network and Oracle Net Services timeout parameters. In + python-oracledb Thin mode, the parameter ``force=True`` disconnects each + connection's socket using a background thread, leaving the database to clean + up its end of the connections. * In python-oracledb Thin mode, the ``cclass`` parameter value is not used to tag connections in the application connection pool. It is only used for :ref:`drcp`. diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index 79fa7925..64deb980 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -5,16 +5,17 @@ Appendix C: The python-oracledb and cx_Oracle Drivers ***************************************************** The python-oracledb driver is the renamed, major version successor to cx_Oracle -8.3. As a major release, the python-oracledb driver has :ref:`new features -` and some :ref:`deprecations`. Also see :ref:`upgrading83`. -The cx_Oracle driver is obsolete and should not be used for new development. +8.3. The python-oracledb driver has many :ref:`new features ` and +some :ref:`deprecations` compared with cx_Oracle. Also, see +:ref:`upgrading83`. The cx_Oracle driver is obsolete and should not be used +for new development. .. _compatibility: Differences between the python-oracledb and cx_Oracle Drivers ============================================================= -The differences between the cx_Oracle 8.3 and python-oracledb drivers are +The differences between python-oracledb and the obsolete cx_Oracle driver are listed here. Mode differences from cx_Oracle @@ -372,8 +373,8 @@ Example error messages are: Upgrading from cx_Oracle 8.3 to python-oracledb =============================================== -This section provides the detailed steps needed to upgrade from cx_Oracle 8.3 -to python-oracledb. +This section provides the detailed steps needed to upgrade from the obsolete +cx_Oracle driver to python-oracledb. Things to Know Before the Upgrade --------------------------------- diff --git a/doc/src/user_guide/bind.rst b/doc/src/user_guide/bind.rst index cc757f0e..0abc88a5 100644 --- a/doc/src/user_guide/bind.rst +++ b/doc/src/user_guide/bind.rst @@ -639,9 +639,9 @@ This produces the output:: Note the use of :meth:`Object.aslist()` which returns the collection element values in index order as a simple Python list. The indices themselves are lost -in this approach. Starting from cx_Oracle 7.0, the associative array can be -turned into a Python dictionary using :meth:`Object.asdict()`. If that value -was printed in the previous example instead, the output would be:: +in this approach. The associative array can be turned into a Python dictionary +using :meth:`Object.asdict()`. If that value was printed in the previous +example instead, the output would be:: {-1048576: 'First element', -576: 'Second element', 284: 'Third element', 8388608: 'Fourth element'} diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index 72c1d459..f057f8a9 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -14,8 +14,8 @@ supporting the Python Database API v2.0 Specification. All connections in an application use the same mode. See :ref:`vsessconinfo` to verify which mode is in use. -If you are upgrading a cx_Oracle application to python-oracledb, then refer to -:ref:`upgrading83` for changes that may be needed. +If you are upgrading from the obsolete cx_Oracle driver to python-oracledb, +then refer to :ref:`upgrading83` for changes that may be needed. .. _enablingthick: @@ -606,24 +606,19 @@ The common environment variables listed below are supported in python-oracledb. - Purpose - Python-oracledb Mode * - LD_LIBRARY_PATH - - The library search path for platforms like Linux should include the - Oracle libraries, for example ``$ORACLE_HOME/lib`` or - ``/opt/instantclient_23_5``. This variable is not needed if the - libraries are located by an alternative method, such as with - ``ldconfig``. On other UNIX platforms, you may need to set an OS - specific equivalent such as ``LIBPATH`` or ``SHLIB_PATH``. + - The library search path for platforms like Linux should include the Oracle libraries, for example ``$ORACLE_HOME/lib`` or ``/opt/instantclient_23_5``. + + This variable is not needed if the libraries are located by an alternative method, such as with ``ldconfig``. On other UNIX platforms, you may need to set an OS specific equivalent such as ``LIBPATH`` or ``SHLIB_PATH``. - Thick * - NLS_DATE_FORMAT, NLS_TIMESTAMP_FORMAT - - Often set in Python applications to force a consistent date format - independent of the locale. The variables are ignored if the environment - variable ``NLS_LANG`` is not set. + - Often set in Python applications to force a consistent date format independent of the locale. + + These variables are ignored if the environment variable ``NLS_LANG`` is not set. - Thick * - NLS_LANG - - Determines the 'national language support' globalization options for - python-oracledb. Note that from cx_Oracle 8, the character set component is - ignored and only the language and territory components of ``NLS_LANG`` - are used. The character set can instead be specified during connection - or connection pool creation. See :ref:`globalization`. + - Determines the 'national language support' globalization options for python-oracledb. + + Note that from cx_Oracle 8, the character set component is ignored and only the language and territory components of ``NLS_LANG`` are used. The character set can instead be specified during connection or connection pool creation. See :ref:`globalization`. - Thick * - ORA_SDTZ - The default session time zone. @@ -632,21 +627,19 @@ The common environment variables listed below are supported in python-oracledb. - The name of the Oracle time zone file to use. See :ref:`timezonefiles`. - Thick * - ORACLE_HOME - - The directory containing the Oracle Database software. The directory - and various configuration files must be readable by the Python process. - This variable should not be set if you are using Oracle Instant Client. + - The directory containing the Oracle Database software. + + The directory and various configuration files must be readable by the Python process. This variable should not be set if you are using Oracle Instant Client. - Thick * - PATH - - The library search path for Windows should include the location where - ``OCI.DLL`` is found. Not needed if you set ``lib_dir`` in a call to - :meth:`oracledb.init_oracle_client()`. + - The library search path for Windows should include the location where ``OCI.DLL`` is found. + + This variable is not needed if you set ``lib_dir`` in a call to :meth:`oracledb.init_oracle_client()`. - Thick * - TNS_ADMIN - - The directory of optional Oracle Client configuration files such as - ``tnsnames.ora`` and ``sqlnet.ora``. Generally not needed if the - configuration files are in a default location, or if ``config_dir`` was - not used in :meth:`oracledb.init_oracle_client()`. See - :ref:`optnetfiles`. + - The directory of optional Oracle Client configuration files such as ``tnsnames.ora`` and ``sqlnet.ora``. + + Generally not needed if the configuration files are in a default location, or if ``config_dir`` was not used in :meth:`oracledb.init_oracle_client()`. See :ref:`optnetfiles`. - Both .. _otherinit: diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index d776c37f..000c9de4 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -980,17 +980,17 @@ Building a python-oracledb package locally 3. With the source code available, build a python-oracledb package by running:: cd python-oracledb # the name may vary depending on the download - python -m pip install build + python -m pip install build --upgrade # export PYO_COMPILE_ARGS='-g0' # optionally set any compilation arguments python -m build A python-oracledb wheel package is created in the ``dist`` subdirectory. For example when using Python 3.12 on macOS you might have the file - ``dist/oracledb-2.5.0-cp312-cp312-macosx_10_9_universal2.whl``. + ``dist/oracledb-3.1.0-cp312-cp312-macosx_14_0_arm64.whl``. 4. Install this package:: - python -m pip install dist/oracledb-2.5.0-cp312-cp312-macosx_10_9_universal2.whl + python -m pip install dist/oracledb-3.1.0-cp312-cp312-macosx_14_0_arm64.whl The package can also be installed on any computer which has the same architecture and Python version as the build machine. @@ -1009,22 +1009,24 @@ Python versions. `ODPI-C repository `__, keeping the default name. -2. In your python-oracledb fork, go to the Actions tab +2. Optionally edit ``.github/workflows/build.yaml`` and remove platforms and + versions that you are not interested in. Building all packages can take some + time. + +3. In your python-oracledb fork, go to the Actions tab ``https://github.com//python-oracledb/actions/``. If this is your first time using Actions, confirm enabling them. -3. In the "All workflows" list on the left-hand side, select the "build" entry. +4. In the "All workflows" list on the left-hand side, select the "build" entry. -4. Navigate to the "Run workflow" drop-down, select the branch to build from +5. Navigate to the "Run workflow" drop-down, select the branch to build from (for example, "main"), and run the workflow. - This builds packages for all supported architectures and Python versions. - -5. When the build has completed, download the "python-oracledb-wheels" +6. When the build has completed, download the "python-oracledb-wheels" artifact, unzip it, and install the one for your architecture and Python version. For example, when using Python 3.12 on macOS, install:: - python -m pip install oracledb-2.5.0-cp312-cp312-macosx_10_13_universal2.whl + python -m pip install oracledb-3.1.0-cp312-cp312-macosx_10_13_universal2.whl .. _configprovidermodules: From 88881277b260fa87258d0be8861ac1190949dc15 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:10:02 -0600 Subject: [PATCH 050/239] Fixed bug when connecting to an AC-enabled service (#476). --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/capabilities.pyx | 2 ++ src/oracledb/impl/thin/constants.pxi | 2 ++ src/oracledb/impl/thin/messages/base.pyx | 6 ++++++ src/oracledb/impl/thin/messages/data_types.pyx | 4 +++- 5 files changed, 15 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b413fe12..8f1b24b5 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -35,6 +35,8 @@ Thin Mode Changes #) Fixed bug when an error is reported by the server in the middle of a response to a client request (`issue 472 `__). +#) Fixed bug when connecting to an AC-enabled service + (`issue 476 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/capabilities.pyx b/src/oracledb/impl/thin/capabilities.pyx index 3d287ac6..3cf319c4 100644 --- a/src/oracledb/impl/thin/capabilities.pyx +++ b/src/oracledb/impl/thin/capabilities.pyx @@ -132,6 +132,8 @@ cdef class Capabilities: self.compile_caps[TNS_CCAP_TTC2] = TNS_CCAP_ZLNP self.compile_caps[TNS_CCAP_OCI2] = TNS_CCAP_DRCP self.compile_caps[TNS_CCAP_CLIENT_FN] = TNS_CCAP_CLIENT_FN_MAX + self.compile_caps[TNS_CCAP_SESS_SIGNATURE_VERSION] = \ + TNS_CCAP_FIELD_VERSION_12_2 self.compile_caps[TNS_CCAP_TTC4] = TNS_CCAP_INBAND_NOTIFICATION | \ TNS_CCAP_EXPLICIT_BOUNDARY self.compile_caps[TNS_CCAP_TTC5] = TNS_CCAP_VECTOR_SUPPORT | \ diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index 8c85b148..c08f4f39 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -261,6 +261,7 @@ cdef enum: TNS_SERVER_PIGGYBACK_LTXID = 7 TNS_SERVER_PIGGYBACK_AC_REPLAY_CONTEXT = 8 TNS_SERVER_PIGGYBACK_EXT_SYNC = 9 + TNS_SERVER_PIGGYBACK_SESS_SIGNATURE = 10 # session return constants cdef enum: @@ -416,6 +417,7 @@ cdef enum: TNS_CCAP_OCI2 = 31 TNS_CCAP_CLIENT_FN = 34 TNS_CCAP_TTC3 = 37 + TNS_CCAP_SESS_SIGNATURE_VERSION = 39 TNS_CCAP_TTC4 = 40 TNS_CCAP_LOB2 = 42 TNS_CCAP_TTC5 = 44 diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index debc0da8..9a9d5aef 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -387,6 +387,12 @@ cdef class Message: self.conn_impl._drcp_establish_session = False buf.read_ub4(&self.conn_impl._session_id) buf.read_ub2(&self.conn_impl._serial_num) + elif opcode == TNS_SERVER_PIGGYBACK_SESS_SIGNATURE: + buf.skip_ub2() # number of dtys + buf.skip_ub1() # length of dty + buf.skip_ub8() # signature flags + buf.skip_ub8() # client signature + buf.skip_ub8() # server signature else: errors._raise_err(errors.ERR_UNKNOWN_SERVER_PIGGYBACK, opcode=opcode) diff --git a/src/oracledb/impl/thin/messages/data_types.pyx b/src/oracledb/impl/thin/messages/data_types.pyx index c0b9addd..ba764247 100644 --- a/src/oracledb/impl/thin/messages/data_types.pyx +++ b/src/oracledb/impl/thin/messages/data_types.pyx @@ -326,6 +326,7 @@ cdef enum: TNS_DATA_TYPE_UD21 = 639 TNS_DATA_TYPE_TNP = 640 TNS_DATA_TYPE_OAC = 646 + TNS_DATA_TYPE_SESSSIGN = 647 TNS_DATA_TYPE_OER = 652 TNS_DATA_TYPE_PLEND = 660 TNS_DATA_TYPE_PLBGN = 661 @@ -340,7 +341,7 @@ cdef enum: TNS_TYPE_REP_ORACLE = 10 -cdef DataType[319] DATA_TYPES = [ +cdef DataType[320] DATA_TYPES = [ [ORA_TYPE_NUM_VARCHAR, ORA_TYPE_NUM_VARCHAR, TNS_TYPE_REP_UNIVERSAL], [ORA_TYPE_NUM_NUMBER, ORA_TYPE_NUM_NUMBER, TNS_TYPE_REP_ORACLE], [ORA_TYPE_NUM_LONG, ORA_TYPE_NUM_LONG, TNS_TYPE_REP_UNIVERSAL], @@ -661,6 +662,7 @@ cdef DataType[319] DATA_TYPES = [ [TNS_DATA_TYPE_TNP, TNS_DATA_TYPE_TNP, TNS_TYPE_REP_UNIVERSAL], [TNS_DATA_TYPE_OER, TNS_DATA_TYPE_OER, TNS_TYPE_REP_UNIVERSAL], [TNS_DATA_TYPE_OAC, TNS_DATA_TYPE_OAC, TNS_TYPE_REP_UNIVERSAL], + [TNS_DATA_TYPE_SESSSIGN, TNS_DATA_TYPE_SESSSIGN, TNS_TYPE_REP_UNIVERSAL], [ORA_TYPE_NUM_VECTOR, ORA_TYPE_NUM_VECTOR, TNS_TYPE_REP_UNIVERSAL], [TNS_DATA_TYPE_PLEND, TNS_DATA_TYPE_PLEND, TNS_TYPE_REP_UNIVERSAL], [TNS_DATA_TYPE_PLBGN, TNS_DATA_TYPE_PLBGN, TNS_TYPE_REP_UNIVERSAL], From a622afa83cbb12d128682e367bee43c4a7ed450d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:11:15 -0600 Subject: [PATCH 051/239] Fixed bug when using temporary LOBs with implicit pooling. --- doc/src/release_notes.rst | 1 + src/oracledb/impl/thin/lob.pyx | 5 ++--- src/oracledb/impl/thin/messages/lob_op.pyx | 18 ++++-------------- 3 files changed, 7 insertions(+), 17 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 8f1b24b5..9c5977de 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -37,6 +37,7 @@ Thin Mode Changes (`issue 472 `__). #) Fixed bug when connecting to an AC-enabled service (`issue 476 `__). +#) Fixed bug when using temporary LOBs with implicit pooling. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/lob.pyx b/src/oracledb/impl/thin/lob.pyx index b9ecc1dc..14323e69 100644 --- a/src/oracledb/impl/thin/lob.pyx +++ b/src/oracledb/impl/thin/lob.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -56,8 +56,7 @@ cdef class BaseThinLobImpl(BaseLobImpl): self._locator = bytes(40) message = self._conn_impl._create_message(LobOpMessage) message.operation = TNS_LOB_OP_CREATE_TEMP - message.amount = TNS_DURATION_SESSION - message.send_amount = True + message.dest_length = TNS_DURATION_SESSION message.source_lob_impl = self message.source_offset = self.dbtype._csfrm message.dest_offset = self.dbtype._ora_type_num diff --git a/src/oracledb/impl/thin/messages/lob_op.pyx b/src/oracledb/impl/thin/messages/lob_op.pyx index 99dcfd73..4caea324 100644 --- a/src/oracledb/impl/thin/messages/lob_op.pyx +++ b/src/oracledb/impl/thin/messages/lob_op.pyx @@ -35,9 +35,9 @@ cdef class LobOpMessage(Message): cdef: uint32_t operation BaseThinLobImpl source_lob_impl - BaseThinLobImpl dest_lob_impl uint64_t source_offset uint64_t dest_offset + uint32_t dest_length int64_t amount bint send_amount bint bool_flag @@ -75,13 +75,9 @@ cdef class LobOpMessage(Message): num_bytes = len(self.source_lob_impl._locator) ptr = buf.read_raw_bytes(num_bytes) self.source_lob_impl._locator = ptr[:num_bytes] - if self.dest_lob_impl is not None: - num_bytes = len(self.dest_lob_impl._locator) - ptr = buf.read_raw_bytes(num_bytes) - self.dest_lob_impl._locator = ptr[:num_bytes] if self.operation == TNS_LOB_OP_CREATE_TEMP: buf.skip_ub2() # skip character set - buf.skip_raw_bytes(3) # skip trailing flags, amount + buf.skip_ub1() # skip trailing flags elif self.send_amount: buf.read_sb8(&self.amount) if self.operation in (TNS_LOB_OP_IS_OPEN, @@ -99,12 +95,8 @@ cdef class LobOpMessage(Message): else: buf.write_uint8(1) # source pointer buf.write_ub4(len(self.source_lob_impl._locator)) - if self.dest_lob_impl is None: - buf.write_uint8(0) # dest pointer - buf.write_ub4(0) # dest length - else: - buf.write_uint8(1) # dest pointer - buf.write_ub4(len(self.dest_lob_impl._locator)) + buf.write_uint8(0) # dest pointer + buf.write_ub4(self.dest_length) buf.write_ub4(0) # short source offset buf.write_ub4(0) # short dest offset if self.operation == TNS_LOB_OP_CREATE_TEMP: @@ -132,8 +124,6 @@ cdef class LobOpMessage(Message): buf.write_uint16be(0) if self.source_lob_impl is not None: buf.write_bytes(self.source_lob_impl._locator) - if self.dest_lob_impl is not None: - buf.write_bytes(self.dest_lob_impl._locator) if self.operation == TNS_LOB_OP_CREATE_TEMP: if self.source_lob_impl.dbtype._csfrm == CS_FORM_NCHAR: buf._caps._check_ncharset_id() From 37144e03415caa737f688016db1204555e2e021c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:11:40 -0600 Subject: [PATCH 052/239] Fixed bug when fetching nested cursors. --- doc/src/release_notes.rst | 1 + src/oracledb/impl/thin/messages/base.pyx | 2 ++ src/oracledb/impl/thin/statement.pyx | 3 +- src/oracledb/impl/thin/statement_cache.pyx | 4 +-- tests/test_1300_cursor_var.py | 41 ++++++++++++++++++++++ tests/test_6300_cursor_other_async.py | 41 ++++++++++++++++++++++ 6 files changed, 89 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9c5977de..5fafd586 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -38,6 +38,7 @@ Thin Mode Changes #) Fixed bug when connecting to an AC-enabled service (`issue 476 `__). #) Fixed bug when using temporary LOBs with implicit pooling. +#) Fixed bug when fetching nested cursors. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 9a9d5aef..9e1efb9c 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -896,6 +896,8 @@ cdef class MessageWithData(Message): column_value = self._create_cursor_from_describe(buf, column_value) cursor_impl = column_value._impl buf.read_ub2(&cursor_impl._statement._cursor_id) + if self.in_fetch: + cursor_impl._statement._is_nested = True elif ora_type_num in (ORA_TYPE_NUM_CLOB, ORA_TYPE_NUM_BLOB, ORA_TYPE_NUM_BFILE): diff --git a/src/oracledb/impl/thin/statement.pyx b/src/oracledb/impl/thin/statement.pyx index 680b297a..3e926dfa 100644 --- a/src/oracledb/impl/thin/statement.pyx +++ b/src/oracledb/impl/thin/statement.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -305,6 +305,7 @@ cdef class Statement: bint _no_prefetch bint _requires_define bint _return_to_cache + bint _is_nested bint _in_use cdef Statement copy(self): diff --git a/src/oracledb/impl/thin/statement_cache.pyx b/src/oracledb/impl/thin/statement_cache.pyx index 2134ba7f..bfe35937 100644 --- a/src/oracledb/impl/thin/statement_cache.pyx +++ b/src/oracledb/impl/thin/statement_cache.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -45,7 +45,7 @@ cdef class StatementCache: Add the statement's cursor to the list of cursors that need to be closed. """ - if stmt._cursor_id != 0: + if stmt._cursor_id != 0 and not stmt._is_nested: self._cursors_to_close[self._num_cursors_to_close] = \ stmt._cursor_id self._num_cursors_to_close += 1 diff --git a/tests/test_1300_cursor_var.py b/tests/test_1300_cursor_var.py index fe785b9c..3304cc3a 100644 --- a/tests/test_1300_cursor_var.py +++ b/tests/test_1300_cursor_var.py @@ -459,6 +459,47 @@ def test_1319(self): with self.assertRaisesFullCode("DPY-3027"): self.cursor.execute(sql, [ref_cursor]) + def test_1320(self): + "1320 - test fetching nested cursors repeatedly" + sql = """ + select + s.Description, + cursor(select 'Nested String for ' || s.Description from dual) + from + ( + select 'Top Level String 1' as Description + from dual + union all + select 'Top Level String 2' + from dual + union all + select 'Top Level String 3' + from dual + union all + select 'Top Level String 4' + from dual + union all + select 'Top Level String 5' + from dual + ) s""" + + for i in range(3): + with self.conn.cursor() as cursor: + cursor.arraysize = 10 + cursor.execute(sql) + desc, nested1 = cursor.fetchone() + self.assertEqual(desc, "Top Level String 1") + nested_rows = nested1.fetchall() + self.assertEqual( + nested_rows, [("Nested String for Top Level String 1",)] + ) + desc, nested2 = cursor.fetchone() + self.assertEqual(desc, "Top Level String 2") + nested_rows = nested2.fetchall() + self.assertEqual( + nested_rows, [("Nested String for Top Level String 2",)] + ) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index 7946d4d0..dee39758 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -872,6 +872,47 @@ async def test_6351(self): await self.cursor.execute("select * from TestJsonCols order by IntCol") self.assertEqual(await self.cursor.fetchall(), expected_data) + async def test_6352(self): + "6352 - test fetching nested cursors repeatedly" + sql = """ + select + s.Description, + cursor(select 'Nested String for ' || s.Description from dual) + from + ( + select 'Top Level String 1' as Description + from dual + union all + select 'Top Level String 2' + from dual + union all + select 'Top Level String 3' + from dual + union all + select 'Top Level String 4' + from dual + union all + select 'Top Level String 5' + from dual + ) s""" + + for i in range(3): + with self.conn.cursor() as cursor: + cursor.arraysize = 10 + await cursor.execute(sql) + desc, nested1 = await cursor.fetchone() + self.assertEqual(desc, "Top Level String 1") + nested_rows = await nested1.fetchall() + self.assertEqual( + nested_rows, [("Nested String for Top Level String 1",)] + ) + desc, nested2 = await cursor.fetchone() + self.assertEqual(desc, "Top Level String 2") + nested_rows = await nested2.fetchall() + self.assertEqual( + nested_rows, [("Nested String for Top Level String 2",)] + ) + if __name__ == "__main__": test_env.run_test_cases() From d7aa5fe5d9f6cdc9722f188d40d6ce8252d23912 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 28 Mar 2025 20:12:07 -0600 Subject: [PATCH 053/239] Made the Azure App Centralized Configuration Provider connection string suffix ".azconfig.io" optional. --- doc/src/release_notes.rst | 3 +++ doc/src/user_guide/connection_handling.rst | 2 +- src/oracledb/plugins/azure_config_provider.py | 4 +++- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 5fafd586..e6977346 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -75,6 +75,9 @@ Common Changes data frame - Eliminated small memory leak with production of each data frame +#) Made the :ref:`Azure App Centralized Configuration Provider + ` connection string suffix ".azconfig.io" + optional. #) Fixed bug when binding a variable that was previously bound as an output variable in a DML RETURNING statement. #) Fixed bug when multiple rows containing LOBs and DbObjects are returned in diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index ef2dc278..a87fd0c6 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -1423,7 +1423,7 @@ The elements of the connection string are detailed in the table below. - Indicates that the configuration provider is Azure App Configuration. - Required * - - - The URL of the Azure App Configuration endpoint. + - The URL of the Azure App Configuration endpoint. The suffix ".azconfig.io" in the name is optional. - Required * - key= - A key prefix to identify the connection. You can organize configuration information under a prefix as per application requirements. diff --git a/src/oracledb/plugins/azure_config_provider.py b/src/oracledb/plugins/azure_config_provider.py index 69ac35f0..c7cb7ca2 100644 --- a/src/oracledb/plugins/azure_config_provider.py +++ b/src/oracledb/plugins/azure_config_provider.py @@ -188,7 +188,9 @@ def _parse_parameters(protocol_arg: str) -> dict: parameters = { key.lower(): value[0] for key, value in parsed_values.items() } - parameters["appconfigname"] = protocol_arg[:pos] + parameters["appconfigname"] = ( + protocol_arg[:pos].rstrip("/").rstrip(".azconfig.io") + ".azconfig.io" + ) return parameters From 9d04d8bc62b0085b7435c41f0268186253572a4a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 31 Mar 2025 21:16:44 -0600 Subject: [PATCH 054/239] Documentation improvements. --- doc/src/api_manual/module.rst | 6 +- doc/src/user_guide/aq.rst | 90 +++++++++++++--------- doc/src/user_guide/connection_handling.rst | 10 +-- 3 files changed, 61 insertions(+), 45 deletions(-) diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 8fb8dbda..59c8d2b5 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2973,9 +2973,11 @@ of the :ref:`message properties object ` passed as the .. data:: MSG_BUFFERED - This constant is used to specify that enqueue/dequeue operations should - enqueue or dequeue buffered messages. + This constant is used to specify that enqueue or dequeue operations should + enqueue or dequeue buffered messages, respectively. + This mode is not supported for bulk array operations in python-oracledb + Thick mode. .. data:: MSG_PERSISTENT diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 6c8fe91b..c99fa7d2 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -15,43 +15,54 @@ receiving of various payloads, such as RAW values, JSON, JMS, and objects. Transactional Event Queues use a highly optimized implementation of Advanced Queuing. They were previously called AQ Sharded Queues. -Python-oracledb API calls are the same for Transactional Event Queues and -Classic Queues, however there are differences in support for some payload -types. +.. note:: -**Classic Queue Support** + Transactional Event Queues are only supported in python-oracledb + :ref:`Thick mode `. -- RAW, named Oracle objects, JSON, and JMS payloads are supported. +Python-oracledb API calls are the same for Transactional Event Queues and +Classic Queues, however there are differences in support for some payload +types which are detailed below. + +.. list-table-with-summary:: Payload Differences Between Classic Queues and Transactional Event Queues + :header-rows: 1 + :class: wy-table-responsive + :widths: 10 20 20 + :summary: The first column displays the payload type. The second column displays whether the payload type is supported in Classic Queues. The third column displays whether the payload type is supported in Transactional Event Queues. + + * - Payload Type + - Classic Queues + - Transactional Event Queues + * - RAW + - Supported + - Supported for single and array message enqueuing and dequeuing when using Oracle Client 19c (or later) and connected to Oracle Database 19c (or later). + * - Named Oracle Objects + - Supported + - Supported for single and array message enqueuing and dequeuing when using Oracle Client 19c (or later) and connected to Oracle Database 19c (or later). + * - JSON + - Supported when using Oracle Database 21c (or later). In python-oracle Thick mode, Oracle Client libraries 21c (or later) are also needed. + - Supported for single message enqueuing and dequeuing when using Oracle Client libraries 21c (or later) and Oracle Database 21c (or later). + + Array enqueuing and dequeuing is not supported for JSON payloads. + * - JMS + - Supported + - Supported for single and array message enqueuing and dequeuing when using Oracle Client 19c (or later) and Oracle Database 23ai. + +**Usage Notes** + +For classic queues, the use of :data:`oracledb.ENQ_IMMEDIATE` with bulk +enqueuing, JMS payloads, and :ref:`Recipient Lists ` are only +supported in python-oracledb :ref:`Thick mode `. -- JSON payloads require Oracle Database 21c (or later). In python-oracle Thick - mode, Oracle Client libraries 21c (or later) are also needed. +Transactional Event Queues do not support :attr:`EnqOptions.transformation`, +:attr:`DeqOptions.transformation`, or :ref:`Recipient Lists `. -The use of :data:`~oracledb.ENQ_IMMEDIATE` with bulk enqueuing, JMS payloads, -and :ref:`Recipient Lists ` are only supported in python-oracledb -:ref:`Thick mode `. +The delivery mode :data:`oracledb.MSG_BUFFERED` is not supported for bulk array +operations in python-oracledb Thick mode. There are examples of AQ Classic Queues in the `GitHub samples `__ directory. -**Transactional Event Queue Support** - -Transactional Event Queues are only supported in python-oracledb :ref:`Thick -mode `. - -- RAW and named Oracle object payloads are supported for single and array - message enqueuing and dequeuing when using Oracle Client 19c (or later) and - connected to Oracle Database 19c (or later). - -- JMS payloads are supported for single and array message enqueuing and - dequeuing when using Oracle Client 19c (or later) and Oracle Database 23ai. - -- JSON payloads are supported for single message enqueuing and dequeuing when - using Oracle Client libraries 21c (or later) and Oracle Database 21c (or - later). Array enqueuing and dequeuing is not supported for JSON payloads. - -Transactional Event Queues do not support :attr:`EnqOptions.transformation`, -:attr:`DeqOptions.transformation`, or :ref:`Recipient Lists `. - Creating a Queue ================ @@ -120,7 +131,7 @@ payload type by using :meth:`Connection.queue()` or queue = connection.queue("DEMO_RAW_QUEUE") Now messages can be queued using :meth:`Queue.enqone()` or -:meth:`AsyncQueue.enqone()`. To send three messages: +:meth:`AsyncQueue.enqone()`. For example, to send three messages: .. code-block:: python @@ -144,14 +155,16 @@ is committed. This default behavior can be altered, see :ref:`aqoptions`. **Enqueuing JSON Payloads** You can connect to the database and get the queue that was created with JSON -payload type by using: +payload type by using :meth:`Connection.queue()` or +:meth:`AsyncConnection.queue()`. For example: .. code-block:: python # The argument "JSON" indicates the queue is of JSON payload type queue = connection.queue("DEMO_JSON_QUEUE", "JSON") -Now the message can be enqueued using :meth:`~Queue.enqone()`. +Now the message can be enqueued using :meth:`Queue.enqone()` or +:meth:`AsyncQueue.enqone()`, for example: .. code-block:: python @@ -206,7 +219,8 @@ to show the :attr:`~MessageProperties.msgid` of a dequeued message: **Dequeuing JSON Payloads** -To dequeue a message, call the method :meth:`Queue.deqone()`, for example: +To dequeue a message, call the method :meth:`Queue.deqone()` or +:meth:`AsyncQueue.deqone()`, for example: .. code-block:: python @@ -257,7 +271,7 @@ You can enqueue messages using :meth:`Queue.enqone()` or connection.commit() Dequeuing can be done with :meth:`Queue.deqone()` or -:meth:`AsyncQueue.deqone()` like this: +:meth:`AsyncQueue.deqone()`, for example: .. code-block:: python @@ -348,8 +362,8 @@ The :meth:`Queue.enqmany()`, :meth:`Queue.deqmany()`, :meth:`AsyncQueue.enqmany()`, and :meth:`AsyncQueue.deqmany()` methods can be used for efficient bulk message handling. -The :meth:`~Queue.enqmany()` method is similar to :meth:`~Queue.enqone()` but -accepts an array of messages: +The bulk enqmany methods are similar to single message enqueue methods but +accept an array of messages, for example: .. code-block:: python @@ -374,8 +388,8 @@ accepts an array of messages: affected. To dequeue multiple messages at one time, use :meth:`Queue.deqmany()` or -:meth:`AsyncQueue.deqmany()`. This takes an argument specifying the maximum -number of messages to dequeue at one time: +:meth:`AsyncQueue.deqmany()`. These take an argument specifying the maximum +number of messages to dequeue at one time, for example: .. code-block:: python diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index a87fd0c6..95d0d788 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -414,11 +414,11 @@ TNS Aliases for Connection Strings :ref:`Connect Descriptors ` are commonly stored in a :ref:`tnsnames.ora ` file and associated with a TNS Alias. This -:ref:alias can be used directly for the data source name parameter ``dsn`` of -:ref::meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, -:ref::meth:`oracledb.connect_async()`, and -:ref::meth:`oracledb.create_pool_async()`. For example, given a file -:ref:``/opt/oracle/config/tnsnames.ora`` with the following contents:: +alias can be used directly for the data source name parameter ``dsn`` of +:meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, +:meth:`oracledb.connect_async()`, and :meth:`oracledb.create_pool_async()`. +For example, given a file ``/opt/oracle/config/tnsnames.ora`` with the +following contents:: ORCLPDB = (DESCRIPTION = From 5311008c919d33c4f359c00900ed3811428e1851 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 2 Apr 2025 13:22:33 -0600 Subject: [PATCH 055/239] Tweak release notes for consistency. --- doc/src/release_notes.rst | 90 +++++++++++++++++++-------------------- 1 file changed, 45 insertions(+), 45 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e6977346..5c3920d8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -64,7 +64,7 @@ Common Changes #) Improvements to data frame fetching with :meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()`: - - Added support for CLOB, BLOB and RAW data types + - Added support for CLOB, BLOB, and RAW data types - Fixed support for BOOLEAN data type - Fixed bug when NUMBER data is fetched that does not have a precision or scale specified and :attr:`defaults.fetch_decimals` is set to *True*. @@ -84,13 +84,13 @@ Common Changes a DML RETURNING statement. #) An error message that links to :ref:`documentation ` on setting up a protocol hook function is now returned by default for LDAP and - LDAPS URL connection strings in python-oracledb thin mode, or when + LDAPS URL connection strings in python-oracledb Thin mode, or when :attr:`defaults.thick_mode_dsn_passthrough` is *False*. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, python-oracledb Thick mode raised the error ``DPI-1071: payload type in - message properties must match the payload type of the queue`` and thin mode + message properties must match the payload type of the queue`` and Thin mode raised an internal error. #) Improved the test suite and documentation. @@ -243,7 +243,7 @@ Common Changes (`issue 458 `__). #) Error ``DPY-2053: python-oracledb thin mode cannot be used because thick mode has already been enabled`` is now raised when attempting to use - asyncio in thick mode + asyncio in Thick mode (`issue 448 `__). #) Error ``DPY-2056: registered handler for protocol "{protocol}" failed for arg "{arg}"`` is now raised when an exception occurs when calling the @@ -458,8 +458,8 @@ Common Changes different connection. Previously, the attempt may have succeeded or may have failed with a number of different unexpected exceptions. #) Error ``DPY-1006: cursor is not open`` is now raised consistently when - attempting to bind a closed cursor. Previously, thin mode would result in a - segfault and thick mode would result in unusual errors. + attempting to bind a closed cursor. Previously, Thin mode would result in a + segfault and Thick mode would result in unusual errors. oracledb 2.3.0 (July 2024) @@ -546,7 +546,7 @@ Common Changes :data:`oracledb.POOL_GETMODE_TIMEDWAIT` and the timeout expires. Previously ``asyncio.TimeoutError`` was being raised when using :ref:`asyncio ` and ``ORA-24457: OCISessionGet() could not find a - free session in the specified timeout period`` was being raised in thick + free session in the specified timeout period`` was being raised in Thick mode. #) If both the ``sid`` and ``service_name`` parameters are specified to :meth:`oracledb.makedsn()`, now only the ``service_name`` parameter is @@ -912,7 +912,7 @@ Common Changes #) Error ``DPY-4029: errors in array DML exceed 65535`` is now raised when the number of batch errors exceeds 65535 when calling :meth:`Cursor.executemany()` with the parameter ``batcherrors`` set to the - value ``True``. Note that in thick mode this error is not raised unless the + value *True*. Note that in Thick mode this error is not raised unless the number of batch errors is a multiple of 65536; instead, the number of batch errors returned is modulo 65536 (`issue 262 `__). @@ -963,8 +963,8 @@ Common Changes (`issue 217 `__). #) SQL statement parsing now raises ``DPY-2041: missing ending quote (') in string`` or ``DPY-2042: missing ending quote (") in identifier`` for - statements with the noted invalid syntax. Previously, thick mode gave - ``ORA-1756`` or ``ORA-1740``, respectively, while thin mode did not throw + statements with the noted invalid syntax. Previously, Thick mode gave + ``ORA-1756`` or ``ORA-1740``, respectively, while Thin mode did not throw an error. #) Added missing ">" to ``repr()`` of :ref:`sodadb`. @@ -1059,7 +1059,7 @@ Common Changes #) Added support for fetching VARCHAR2 and LOB columns which contain JSON (and have the "IS JSON" check constraint enabled) in the same way as columns of type JSON (which requires Oracle Database 21c or higher) are fetched. In - thick mode this requires Oracle Client 19c or higher. The attribute + Thick mode this requires Oracle Client 19c or higher. The attribute ``oracledb.__future__.old_json_col_as_obj`` must be set to the value ``True`` for this behavior to occur. In version 2.0 this will become the normal behavior and setting this attribute will no longer be needed. @@ -1162,7 +1162,7 @@ Thin Mode Changes #) Added support for connecting to databases that accept passwords longer than 30 UTF-8 encoded bytes. #) Detect the time zone on the OS and set the session timezone using this - value to be consistent with thick mode + value to be consistent with Thick mode (`issue 144 `__). #) Improved BOOLEAN handling. #) Error ``DPY-6005: cannot connect to database`` is now raised for all @@ -1204,7 +1204,7 @@ Thin Mode Changes of times any session callback must be invoked, and allow connections to be timed out. - Removed packet for negotiating network services which are not supported - in thin mode. + in Thin mode. - Removed unneeded packet for changing the password of the connected user. @@ -1229,22 +1229,22 @@ Common Changes #) Added method :meth:`ConnectParams.parse_dsn_with_credentials()` for parsing a DSN that contains credentials. #) Error ``DPY-2038: element at index {index} does not exist`` is now raised - whenever an element in a database collection is missing. Previously, thick - mode raised ``DPI-1024: element at index {index} does not exist`` and thin + whenever an element in a database collection is missing. Previously, Thick + mode raised ``DPI-1024: element at index {index} does not exist`` and Thin mode raised ``KeyError`` or ``IndexError``. #) Error ``DPY-2039: given index {index} must be in the range of {min_index} to {max_index}`` is now raised whenever an element in a database collection - is set outside the bounds of the collection. Previously, thick mode raised + is set outside the bounds of the collection. Previously, Thick mode raised ``OCI-22165: given index [{index}] must be in the range of [{min_index}] to - [{max_index}]`` and thin mode raised ``IndexError``. + [{max_index}]`` and Thin mode raised ``IndexError``. #) Error ``DPY-2040: parameters "batcherrors" and "arraydmlrowcounts" may only be true when used with insert, update, delete and merge statements`` is now - raised when either of the parameters `batcherrors` and `arraydmlrowcounts` - is set to the value `True` when calling :meth:`Cursor.executemany()`. - Previously, thick mode raised ``DPI-1063: modes DPI_MODE_EXEC_BATCH_ERRORS - and DPI_MODE_EXEC_ARRAY_DML_ROWCOUNTS can only be used with insert, update, - delete and merge statements`` and thin mode raised - ``ORA-03137: malformed TTC packet from client rejected`` + raised when either of the parameters ``batcherrors`` and + ``arraydmlrowcounts`` is set to the value `True` when calling + :meth:`Cursor.executemany()`. Previously, Thick mode raised ``DPI-1063: + modes DPI_MODE_EXEC_BATCH_ERRORS and DPI_MODE_EXEC_ARRAY_DML_ROWCOUNTS can + only be used with insert, update, delete and merge statements`` and Thin + mode raised ``ORA-03137: malformed TTC packet from client rejected`` (`issue 128 `__). #) Internal changes to ensure that errors taking place while raising exceptions are handled more gracefully. @@ -1406,7 +1406,7 @@ Thin Mode Changes #) Added support for getting the LOB chunk size (`issue 14 `__). -#) The error `DPY-2030: LOB offset must be greater than zero` is now raised +#) The error ``DPY-2030: LOB offset must be greater than zero`` is now raised when the offset parameter to :func:`LOB.read()` is zero or negative (`issue 13 `__). #) Internally, before a connection is returned from a pool, check for control @@ -1421,8 +1421,8 @@ Thin Mode Changes when connecting to a database that the listener configuration file states exists but actually doesn't (`issue 51 `__). -#) The error `DPY-3016: python-oracledb thin mode cannot be used because the - cryptography package is not installed` is now raised when the cryptography +#) The error ``DPY-3016: python-oracledb thin mode cannot be used because the + cryptography package is not installed`` is now raised when the cryptography package is not installed, instead of an ImportError. This allows platforms that are not capable of building the cryptography package to still use Thick mode. @@ -1458,8 +1458,8 @@ oracledb 1.0.3 (August 2022) Thin Mode Changes +++++++++++++++++ -#) The error `DPY-3015: password verifier type is not supported by - python-oracledb in thin mode` is now raised when +#) The error ``DPY-3015: password verifier type is not supported by + python-oracledb in thin mode`` is now raised when the database sends a password challenge with a verifier type that is not recognized, instead of `ORA-01017: invalid username/password` (`issue 26 `__). @@ -1525,7 +1525,7 @@ Thin Mode Changes #) Fixed connection retry count handling to work in cases where the database listener is running but the service is down (`issue 3 `__). -#) Return the same value for TIMESTAMP WITH TIME ZONE columns as thick mode +#) Return the same value for TIMESTAMP WITH TIME ZONE columns as Thick mode (`issue 7 `__). #) Fixed order in which bind data is sent to the server when LONG and non-LONG column data is interspersed @@ -1692,10 +1692,10 @@ cx_Oracle 8.2 (May 2021) Initial work was done in `PR 549 `__. #) Enhanced dead connection detection. If an Oracle Database error indicates - that a connection is no longer usable, the error `DPI-1080: connection was - closed by ORA-%d` is now returned. The `%d` will be the Oracle error + that a connection is no longer usable, the error ``DPI-1080: connection was + closed by ORA-%d`` is now returned. The `%d` will be the Oracle error causing the connection to be closed. Using the connection after this will - give `DPI-1010: not connected`. This behavior also applies for + give ``DPI-1010: not connected``. This behavior also applies for :data:`Connection.call_timeout` errors that result in an unusable connection. #) Eliminated a memory leak when calling :meth:`SodaOperation.filter()` with a @@ -2177,8 +2177,8 @@ cx_Oracle 6.4.1 (July 2018) Oracle number format (`ODPI-C issue 67 `__). -#) Prevent error "cx_Oracle.ProgrammingError: positional and named binds - cannot be intermixed" when calling cursor.setinputsizes() without any +#) Prevent error ``cx_Oracle.ProgrammingError: positional and named binds + cannot be intermixed`` when calling cursor.setinputsizes() without any parameters and then calling cursor.execute() with named bind parameters (`issue 199 `__). @@ -2221,7 +2221,7 @@ cx_Oracle 6.4 (July 2018) (`issue 193 `__). - If the statement should be deleted from the statement cache, first check to see that there is a statement cache currently being used; otherwise, - the error "ORA-24300: bad value for mode" will be raised under certain + the error ``ORA-24300: bad value for mode`` will be raised under certain conditions. #) Added support for using the cursor as a context manager @@ -2278,8 +2278,8 @@ cx_Oracle 6.3 (April 2018) - Fixed binding of LONG data (values exceeding 32KB) when using the function :meth:`Cursor.executemany()`. - Added code to verify that a CQN subscription is open before permitting it - to be used. Error "DPI-1060: subscription was already closed" will now be - raised if an attempt is made to use a subscription that was closed + to be used. Error ``DPI-1060: subscription was already closed`` will now + be raised if an attempt is made to use a subscription that was closed earlier. - Stopped attempting to unregister a CQN subscription before it was completely registered. This prevents errors encountered during @@ -2345,8 +2345,8 @@ cx_Oracle 6.2 (March 2018) `__. - - eliminate error "DPI-1054: connection cannot be closed when open - statements or LOBs exist" (`issue 138 + - eliminate error ``DPI-1054: connection cannot be closed when open + statements or LOBs exist`` (`issue 138 `__). - avoid a round trip to the database when a connection is released back to the pool by preventing a rollback from being called when no transaction @@ -2463,7 +2463,7 @@ cx_Oracle 6.0.3 (November 2017) - Prevent use of uninitialized data in certain cases (`issue 77 `__). - Attempting to ping a database earlier than 10g results in error - "ORA-1010: invalid OCI operation", but that implies a response from the + ``ORA-1010: invalid OCI operation``, but that implies a response from the database and therefore a successful ping, so treat it that way! - Correct handling of conversion of some numbers to NATIVE_FLOAT. - Prevent use of NaN with Oracle numbers since it produces corrupt data @@ -2536,11 +2536,11 @@ cx_Oracle 6.0 (August 2017) #version-2-0-august-14-2017>`__. - Prevent closing the connection when there are any open statements or - LOBs and add new error "DPI-1054: connection cannot be closed when open - statements or LOBs exist" when this situation is detected; this is - needed to prevent crashes under certain conditions when statements or - LOBs are being acted upon while at the same time (in another thread) a - connection is being closed; it also prevents leaks of statements and + LOBs and add new error ``DPI-1054: connection cannot be closed when + open statements or LOBs exist`` when this situation is detected; this + is needed to prevent crashes under certain conditions when statements + or LOBs are being acted upon while at the same time (in another thread) + a connection is being closed; it also prevents leaks of statements and LOBs when a connection is returned to a session pool. - On platforms other than Windows, if the regular method for loading the Oracle Client libraries fails, try using $ORACLE_HOME/lib/libclntsh.so From 2ebf0927fc20992802857d6bce75f9fd08e95b55 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 2 Apr 2025 13:22:47 -0600 Subject: [PATCH 056/239] Use order by clause to ensure expected order. --- tests/test_7600_pipelining_async.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/test_7600_pipelining_async.py b/tests/test_7600_pipelining_async.py index 4c3e65d8..9d776864 100644 --- a/tests/test_7600_pipelining_async.py +++ b/tests/test_7600_pipelining_async.py @@ -798,7 +798,9 @@ async def test_7639(self): pipeline2 = oracledb.create_pipeline() pipeline2.add_execute("insert into TestTempTable (IntCol) values (2)") pipeline2.add_commit() - pipeline2.add_fetchall("select IntCol from TestTempTable") + pipeline2.add_fetchall( + "select IntCol from TestTempTable order by IntCol" + ) await conn1.run_pipeline(pipeline1) results = await conn2.run_pipeline(pipeline2) From 73e50635068da1fa286dfb5ba73a415a4cd9e9e6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 2 Apr 2025 13:23:37 -0600 Subject: [PATCH 057/239] Preparing to release python-oracledb 3.1.0. --- doc/src/release_notes.rst | 4 ++-- src/oracledb/version.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 5c3920d8..ef0acd26 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,8 +11,8 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. -oracledb 3.1.0 (TBD) --------------------- +oracledb 3.1.0 (April 2025) +--------------------------- Thin Mode Changes +++++++++++++++++ diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 34b0487b..ad72b1ea 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.1.0b1" +__version__ = "3.1.0" From 3e5c7b69537b1c171fff45f61dc657d3b1bd4c98 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 15:54:32 -0600 Subject: [PATCH 058/239] Bump version in preparation for more changes. --- doc/src/release_notes.rst | 13 +++++++++++++ src/oracledb/version.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ef0acd26..2b884368 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,6 +11,19 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. +oracledb 3.2.0 (TBD) +-------------------- + +Thin Mode Changes ++++++++++++++++++ + +Thick Mode Changes +++++++++++++++++++ + +Common Changes +++++++++++++++ + + oracledb 3.1.0 (April 2025) --------------------------- diff --git a/src/oracledb/version.py b/src/oracledb/version.py index ad72b1ea..0aa11aed 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.1.0" +__version__ = "3.2.0b1" From 3ebf833d61f5704a8fbf6df3f14f46da38db365e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 15:54:52 -0600 Subject: [PATCH 059/239] Added support for using Queue.deqmany() with JSON payloads using Oracle Database 21c. --- doc/src/release_notes.rst | 3 +++ src/oracledb/aq.py | 10 +++++++++- src/oracledb/impl/base/queue.pyx | 9 ++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2b884368..e61ab7c4 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,9 @@ oracledb 3.2.0 (TBD) Thin Mode Changes +++++++++++++++++ +#) Added support for using :meth:`Queue.deqmany()` with JSON payloads using + Oracle Database 21c. + Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index a1238c42..2e290d35 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -146,7 +146,15 @@ def deqmany(self, max_num_messages: int) -> list: Dequeues up to the specified number of messages from the queue and returns a list of these messages. """ - message_impls = self._impl.deq_many(max_num_messages) + if self._impl._supports_deq_many(self._connection._impl): + message_impls = self._impl.deq_many(max_num_messages) + else: + message_impls = [] + while len(message_impls) < max_num_messages: + message_impl = self._impl.deq_one() + if message_impl is None: + break + message_impls.append(message_impl) return [MessageProperties._from_impl(impl) for impl in message_impls] def deqMany(self, max_num_messages: int) -> List["MessageProperties"]: diff --git a/src/oracledb/impl/base/queue.pyx b/src/oracledb/impl/base/queue.pyx index 1949675f..0bd26e37 100644 --- a/src/oracledb/impl/base/queue.pyx +++ b/src/oracledb/impl/base/queue.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -31,6 +31,13 @@ cdef class BaseQueueImpl: + def _supports_deq_many(self, BaseConnImpl conn_impl): + """ + Returns a boolean indicating if array dequeue is supported or not. JSON + payloads are not supported by array dequeue until Oracle Database 23ai. + """ + return not self.is_json or conn_impl.server_version[0] >= 23 + def deq_many(self, uint32_t max_num_messages): errors._raise_not_supported("dequeuing multiple messages") From 6b86d9bc2a50c1128c8d0b99b8c18d0846150ea5 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 15:55:31 -0600 Subject: [PATCH 060/239] Doc tweaks. --- doc/src/user_guide/asyncio.rst | 6 +- doc/src/user_guide/connection_handling.rst | 88 +++++++++++++++++++--- 2 files changed, 79 insertions(+), 15 deletions(-) diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index 76c220c8..c806c1d1 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -24,9 +24,9 @@ useful tips. The python-oracledb asynchronous API is a part of the standard python-oracledb module. All the synchronous methods that require a round-trip to the database -now have corresponding asynchronous counterparts. You can choose whether to -use the synchronous API or the asynchronous API in your code. It is -recommended to *not* use both at the same time in your application. +have corresponding asynchronous counterparts. You can choose whether to use the +synchronous API or the asynchronous API in your code. It is recommended to +*not* use both at the same time in your application. The asynchronous API classes are :ref:`AsyncConnection `, :ref:`AsyncConnectionPool `, diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 95d0d788..6c66b165 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -2060,12 +2060,77 @@ dblatest&id=GUID-06022729-9210-4895-BF04-6177713C65A7>`__. Connection Pooling ================== -Python-oracledb's connection pooling lets applications create and maintain a -pool of open connections to the database. Connection pooling is available in -both Thin and :ref:`Thick ` modes. Connection pooling is -important for performance and scalability when applications need to handle a -large number of users who do database work for short periods of time but have -relatively long periods when the connections are not needed. The high +Connection pooling can significantly improve application performance and +scalability, allows resource sharing, and lets applications use advanced Oracle +High Availability features. + +The pooling solutions available to python-oracledb applications are: + +- :ref:`Driver Connection Pools `: These are managed by the + driver layer. They provide readily available database connections that can be + shared by multiple users and are quick for applications to obtain. They help + make applications scalable and highly available. They are created with + :meth:`oracledb.create_pool()` or :meth:`oracledb.create_pool_async()`. + + The main use case is for applications that hold connections for relatively + short durations while doing database work, and that acquire and release + connections back to the pool as needed to do those database operations. + Using a driver pool is recommended for applications that need to support + multiple users. High availability benefits also make driver pools useful for + single-user applications that do infrequent database operations. + +- :ref:`drcp`: This is pooling of server processes on the database host so they + can be shared between application connections. This reduces the number of + server processes that the database host needs to manage. + + DRCP is useful if there are large number of application connections, + typically from having multiple application processes, and those applications + do frequent connection acquire and release calls as needed to do database + operations. It is recommended to use DRCP in conjunction with a driver + connection pool, since this reduces the number of re-authentications and + session memory re-allocations. + +- `Proxy Resident Connection Pooling (PRCP) + `__: This is connection pooling handled by a dedicated + mid-tier connection proxy, `CMAN-TDM `__. + + This is useful for applications taking advantage of CMAN-TDM. + +- :ref:`implicitconnpool`: This can add pooling benefits to applications that + connect when they start, and only close the connection when the application + terminates — but relatively infrequently do database work. It makes use of + DRCP or PRCP, but instead of relying on the application to explicitly acquire + and release connections, Implicit Connection Pooling automatically detects + when applications are not performing database work. It then allows the + associated database server process to be used by another connection that + needs to do a database operation. + + Implicit Connection Pooling is useful for legacy applications or third-party + code that cannot be updated to use a driver connection pool. + +Python-oracledb :ref:`driver connection pools ` are the first +choice for performance, scalability, and high availability. If your database +is under memory pressure from having too many applications opening too many +connections, then consider either :ref:`DRCP ` or :ref:`Implicit +Connection Pooling `, depending on your application’s +connection life-cycle. If you are utilizing CMAN-TDM, then using `PRCP +`__ can be considered. + +.. _driverconnpool: + +Driver Connection Pooling +------------------------- + +Python-oracledb's driver connection pooling lets applications create and +maintain a pool of open connections to the database. Connection pooling is +available in both Thin and :ref:`Thick ` modes. Connection +pooling is important for performance and scalability when applications need to +handle a large number of users who do database work for short periods of time +but have relatively long periods when the connections are not needed. The high availability features of pools also make small pools useful for applications that want a few connections available for infrequent use and requires them to be immediately usable when acquired. Applications that would benefit from @@ -2081,8 +2146,8 @@ Oracle Database features, for example some advanced :ref:`high availability .. note:: - Python-oracledb connection pools must be created, used and closed within - the same process. Sharing pools or connections across processes has + Python-oracledb driver connection pools must be created, used, and closed + within the same process. Sharing pools or connections across processes has unpredictable behavior. Using connection pools in multi-threaded architectures is supported. @@ -2090,11 +2155,10 @@ Oracle Database features, for example some advanced :ref:`high availability Multi-process architectures that cannot be converted to threading may get some benefit from :ref:`drcp`. - Creating a Connection Pool --------------------------- +++++++++++++++++++++++++++ -A connection pool is created by calling :meth:`oracledb.create_pool()`. +A driver connection pool is created by calling :meth:`oracledb.create_pool()`. Various pool options can be specified as described in :meth:`~oracledb.create_pool()` and detailed below. @@ -2786,7 +2850,7 @@ sharing for applications which use a large number of connections that run in multiple client processes or run on multiple middle-tier application servers. By default, each connection from Python will use one database server process. DRCP allows pooling of these server processes. This reduces the amount of -memory required on the database host. The DRCP pool can be shared by multiple +memory required on the database host. A DRCP pool can be shared by multiple applications. DRCP is useful for applications which share the same database credentials, have From 3fb231baec78031311c7b1f06cbc3f934b00ad1c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 16:12:42 -0600 Subject: [PATCH 061/239] Update tutorial. --- .pre-commit-config.yaml | 1 + ...le-Database-The-New-Wave-of-Scripting.html | 2757 ++++++++++++----- samples/tutorial/aq.py | 4 +- samples/tutorial/async_gather.py | 79 + samples/tutorial/bind_sdo.py | 4 +- samples/tutorial/connect_params2.py | 4 +- samples/tutorial/create_user.py | 6 +- samples/tutorial/db_config.py | 4 +- samples/tutorial/db_config_sys.py | 4 +- samples/tutorial/db_config_thick.py | 6 +- samples/tutorial/drcp_query.py | 18 +- samples/tutorial/json_insert.py | 41 + samples/tutorial/pipelining.py | 73 + samples/tutorial/query_pandas.py | 42 + samples/tutorial/query_scroll.py | 6 +- samples/tutorial/rowfactory.py | 5 +- samples/tutorial/run_sql_script.py | 6 +- samples/tutorial/soda.py | 4 +- samples/tutorial/solutions/aq-dequeue.py | 4 +- samples/tutorial/solutions/aq-enqueue.py | 4 +- samples/tutorial/solutions/aq-queuestart.py | 4 +- samples/tutorial/solutions/bind_sdo.py | 4 +- samples/tutorial/solutions/connect_pool2.py | 10 +- samples/tutorial/solutions/json_insert.py | 53 + samples/tutorial/solutions/query_pandas.py | 55 + samples/tutorial/solutions/query_scroll.py | 6 +- samples/tutorial/solutions/rowfactory.py | 4 +- samples/tutorial/solutions/soda.py | 4 +- samples/tutorial/solutions/subclass.py | 4 +- samples/tutorial/solutions/vector.py | 52 + samples/tutorial/solutions/vector_numpy.py | 92 + samples/tutorial/sql/db_config.sql | 4 +- samples/tutorial/sql/setup_tutorial.sql | 37 +- samples/tutorial/subclass.py | 4 +- samples/tutorial/type_input.py | 2 +- samples/tutorial/type_input_named_obj.py | 10 +- samples/tutorial/type_output_named_obj.py | 4 +- samples/tutorial/vector.py | 43 + samples/tutorial/vector_numpy.py | 48 + 39 files changed, 2610 insertions(+), 902 deletions(-) create mode 100644 samples/tutorial/async_gather.py create mode 100644 samples/tutorial/json_insert.py create mode 100644 samples/tutorial/pipelining.py create mode 100644 samples/tutorial/query_pandas.py create mode 100644 samples/tutorial/solutions/json_insert.py create mode 100644 samples/tutorial/solutions/query_pandas.py create mode 100644 samples/tutorial/solutions/vector.py create mode 100644 samples/tutorial/solutions/vector_numpy.py create mode 100644 samples/tutorial/vector.py create mode 100644 samples/tutorial/vector_numpy.py diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8110b62..09e53feb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,3 +14,4 @@ repos: rev: v0.0.291 hooks: - id: ruff + exclude: ^samples/ diff --git a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html index 42c78154..8734268f 100644 --- a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html +++ b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html @@ -27,14 +27,14 @@

Python and Oracle Database Tutorial: The New Wave of Scr -

-

Contents

Overview

-

This tutorial is a primary guide on using Python with Oracle Database. It contains both beginner and advanced materials. Choose the content that interests you and your skill level. The tutorial has scripts to run and modify, and has suggested solutions.

- -

Python is a popular general purpose dynamic scripting language. The python-oracledb driver provides Python APIs to access Oracle Database. It is an upgrade for the hugely popular cx_Oracle interface. -

+

This tutorial is a guide on using Python with Oracle Database. It contains + both beginner and advanced materials. Choose the content that interests you + and your skill level. The tutorial has scripts to run and modify, and has + suggested solutions.

+

Python is a popular general purpose dynamic scripting language. The + python-oracledb driver provides access to Oracle Database from Python + scripts. It is the successor to the obsolete cx_Oracle interface.

-

If you are new to Python, review the Appendix: Python Primer to gain an understanding of the language.

-

When you have finished this tutorial, we recommend reviewing the python-oracledb documentation.

+

If you are new to Python, review the Appendix: Python + Primer to gain an understanding of the language.

When you have + finished this tutorial, we recommend reviewing the python-oracledb documentation.

The original copy of these instructions that you are reading is Overview

Python-oracledb Architecture

-

The python-oracledb driver enables access to Oracle Database using either -one of two modes. Both modes have comprehensive functionality supporting the -Python Database API v2.0 Specification. By default, python-oracledb runs in a -"thin" mode, which connects directly to Oracle Database. This mode -does not need Oracle Client libraries. However, some additional features are -available when python-oracledb uses them. Python-oracledb applications that -load the Oracle Client libraries via an application script runtime option are -said to be in "thick" mode. This tutorial has examples in both -modes.

+

By default, python-oracledb runs in a "Thin" mode, which connects +directly to Oracle Database. This mode does not need Oracle Client +libraries. However, some additional features are available when python-oracledb +uses them. Python-oracledb applications that load the Oracle Client libraries +via an application script runtime option are said to be in "Thick" +mode. Both modes have comprehensive functionality supporting the Python +Database API v2.0 Specification, allowing python-oracledb to be use by popular +frameworks, ORMs, and other libraries.

-

Python python-oracledb architecture

-

The database can be on the same machine as Python, or it can be remote.

-

Setup

+

Python python-oracledb
+architecture

The database can be on the same machine as +Python, or it can be remote.

Setup

    +
  • +

    Get Access to Oracle Database

    + +

    This tutorial assumes you have DBA access to Oracle Database. This is + needed to grant some privileges and roles.

    + +

    Some examples require the latest version of Oracle Database, but most + will work with older database versions.

    + +

    If you need a database, you can install Oracle Database 23ai Free on + Linux or Windows from oracle-database-software-downloads.html#db_free.

    + +

    Alternatively use a container from container-registry.oracle.com:

    + +
    podman run -p 1521:1521 --name free -e ORACLE_PWD=mysecret
    +      container-registry.oracle.com/database/free:latest
    -
  • Install software

    +

    Variants of the container can also be found at github.com/gvenzl/oci-oracle-free/blob/main

    + +

    When the locally installed Free database or container is running, the + privileged user name is "SYSTEM" and the connection string is + "localhost/freepdb1". For examples that additionally need to connect to + the root container database (not to be confused with a database running + in a Docker container), the root connection string is + "localhost/free".

    + +
  • + +
  • +

    Install Python and python-oracledb

    Install Python 3 if not already available. It can be obtained from your operating system package library or from python.org. Use Python 3.7 or + href="https://www.python.org/">python.org. Use Python 3.9 or later.

    Install python-oracledb with - a command like pip install oracledb --upgrade

    - -

    Ensure you can access an Oracle Database.

    + href="https://pypi.org/project/oracledb/">python-oracledb with a + command like:

    python -m pip install oracledb --upgrade
    +

    or

    python3 -m pip install oracledb --upgrade
    -
  • Download the tutorial scripts

    -

    The Python scripts used in this example are in the python-oracledb GitHub repository.

    +

    The Python scripts used in this example are in the python-oracledb GitHub repository.

    -

    Download a zip file of the repository from here and unzip it. Alternatively you can use 'git' to clone the repository.

    -

    git clone https://github.com/oracle/python-oracledb.git

    +

    Download a zip file of the repository from here and unzip it. Alternatively you can use 'git' to clone the + repository:

    -

    The samples/tutorial directory has scripts to run and modify. The samples/tutorial/solutions directory has scripts with suggested code changes. The samples/tutorial/sql directory has all the SQL scripts used by the Python files to create database tables and other objects.

    +
    git clone https://github.com/oracle/python-oracledb.git
    -
  • -
  • -

    Review the privileged database credentials used for creating the schema

    +

    The samples/tutorial directory has scripts to run and + modify. The samples/tutorial/solutions directory has + scripts with suggested code changes. The + samples/tutorial/sql directory has all the SQL scripts + used by the Python files to create database tables and other + objects.

    -

    Review db_config_sys.py in the tutorial directory. This file is included in other Python files for creating and dropping the tutorial user.

    +
  • -

    Edit db_config_sys.py file and change the default values to match the system connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "SYSTEM" unless the environment variable "PYTHON_SYSUSER" contains a different username. The default system connection string is for the "orclpdb" database service on the same machine as Python. In Python Database API terminology, the connection string parameter is called the "data source name", or "dsn". Using environment variables is convenient because you will not be asked to re-enter the password when you run scripts:

    +
  • +

    Review the privileged database credentials used for creating the + schema

    + +

    Review db_config_sys.py in the tutorial + directory. This file is included in other Python files for creating and + dropping the tutorial user.

    + +

    Edit db_config_sys.py file and change the default + values to match the system connection information for your environment. + Alternatively, you can set the given environment variables in your + terminal window. For example, the default username is "SYSTEM" + unless the environment variable "PYTHON_SYSUSER" contains a + different username. The default system connection string is for the + "freepdb1" database service on the same machine as Python. In + Python Database API terminology, the connection string parameter is + called the "data source name", or "dsn". Using environment variables + is convenient because you will not be asked to re-enter the password + when you run scripts. The file db_config_sys.py looks + like:

     user = os.environ.get("PYTHON_SYSUSER", "SYSTEM")
     
    -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb")
    +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1")
     
     pw = os.environ.get("PYTHON_SYSPASSWORD")
     if pw is None:
         pw = getpass.getpass("Enter password for %s: " % user)
     
    -

    Substitute the admin values for your environment. If you are using Oracle Autonomous Database (ADB), use the ADMIN user instead of SYSTEM. The tutorial instructions may need adjusting, depending on how you have set up your environment.

    +

    Substitute the admininstator values for your environment. If you are + using Oracle Autonomous Database (ADB), use the ADMIN user + instead of SYSTEM. The tutorial instructions may need + adjusting, depending on how you have set up your environment.

    +
  • -
  • Create a database user

    +
  • +

    Create a database user

    -

    If you have an existing user, you may be able to use it for most examples (some examples may require extra permissions).

    +

    If you have an existing user, you may be able to use it for most + examples (though some examples may require extra permissions).

    -

    If you need to create a new user for this tutorial, review the grants created in samples/tutorial/sql/create_user.sql by opening it in your favorite text editor. Then open a terminal window and run create_user.py to execute the create_user.sql script and create the sample user. This tutorial uses the name pythondemo:

    -
    -python create_user.py
    +

    If you need to create a new user for this tutorial, review the + grants created in samples/tutorial/sql/create_user.sql by + opening it in your favorite text editor. Then open a terminal window + and run create_user.py to execute the + create_user.sql script and create the sample user. This + tutorial uses the name pythondemo by default:

    + +
    python create_user.py
    + +

    The example above connects as the SYSTEM (or ADMIN for + ADB) user using db_config_sys file discussed in + the earlier section. The connection string is + "localhost/freepdb1", meaning use the database service + "freepdb1" running on localhost (the computer you are running + your Python scripts on).

    + +

    If it runs successfully, you will see something similar below:

    -

    The example above connects as the SYSTEM (or ADMIN for ADB) user using db_config_sys file discussed in the earlier section. The connection string is "localhost/orclpdb", meaning use the database service "orclpdb" running on localhost (the computer you are running your Python scripts on).

    -

    If it runs successfully, you will see something similar below:

    Enter password for SYSTEM:
     Enter password for pythondemo:
     Creating user...
    -SQL File Name:  D:\python-oracledb\samples\tutorial\sql\create_user.sql
    +SQL File Name:  python-oracledb\samples\tutorial\sql\create_user.sql
     Done.
    -

    The new user pythondemo is created.

    -

    When the tutorial is finished, ensure that all the database sessions connected to the tutorial user pythondemo are closed and then run drop_user.py to remove the tutorial user.

    + +

    The new user pythondemo is created.

    + +

    When the tutorial is finished, ensure that all the database sessions + connected to the tutorial user pythondemo are closed and then + run python drop_user.py to remove the tutorial user.

    +
  • Install the tables and other database objects for the tutorial

    -

    Once you have a database user, then you can create the key tutorial tables and database objects for the tutorial by running setup_tutorial.py (the environment setup file), using your values for the tutorial username, password and connection string:

    +

    Once you have a database user, then you can create the key tutorial + tables and database objects for the tutorial by running + setup_tutorial.py (the environment setup file), using your + values for the tutorial username, password and connection string:

    -
    -python setup_tutorial.py
    -

    On successful completion of the run, You will see something like:

    -
    Setting up the sample tables and other DB objects for the tutorial...
    -SQL File Name:  D:\python-oracledb\samples\tutorial\sql\setup_tutorial.sql
    +      
    python setup_tutorial.py
    + +

    On successful completion of the run, You will see something like:

    + +
    Setting up the sample tables and other DB objects for the tutorial...
    +SQL File Name:  python-oracledb/samples/tutorial/sql/setup_tutorial.sql
     Done.
    -

    This will call the setup_tutorial.sql file from tutorials/sql directory to setup some sample tables and database objects required for running the examples in the tutorial.

    -
  • -
  • -

    Review the connection credentials used by the tutorial scripts

    +

    This will call the setup_tutorial.sql file from + tutorials/sql directory to setup some sample tables and + database objects required for running the examples in the tutorial.

    -

    Review db_config.py (thin mode), and db_config.sql files in the tutorial and tutorial/sql directories respectively. These are included in other Python and SQL files for setting up the database connection.

    +
  • -

    Edit db_config.py file and change the default values to match the connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "pythondemo" unless the environment variable "PYTHON_USER" contains a different username. The default connection string is for the 'orclpdb' database service on the same machine as Python. In Python Database API terminology, the connection string parameter is called the "data source name", or "dsn". Using environment variables is convenient because you will not be asked to re-enter the password when you run scripts:

    +
  • +

    Review the connection credentials used by the tutorial scripts

    + +

    Review db_config.py (Thin mode), and + db_config.sql files in the tutorial and + tutorial/sql directories respectively. These are included + in other Python and SQL files for setting up the database + connection.

    + +

    Edit db_config.py file and change the default values to + match the connection information for your environment. Alternatively, + you can set the given environment variables in your terminal window. For + example, the default username is "pythondemo" unless the + environment variable "PYTHON_USER" contains a different + username. The default connection string is for the 'freepdb1' + database service on the same machine as Python. In Python Database API + terminology, the connection string parameter is called the "data source + name", or "dsn". Using environment variables is convenient because you + will not be asked to re-enter the password when you run scripts:

     user = os.environ.get("PYTHON_USER", "pythondemo")
     
    -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb")
    +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1")
     
     pw = os.environ.get("PYTHON_PASSWORD")
     if pw is None:
         pw = getpass.getpass("Enter password for %s: " % user)
     
    -

    Also, change the database username and connection string in the SQL configuration file db_config.sql based on your environment settings:

    +

    Also, change the database username and connection string in the SQL + configuration file db_config.sql based on your environment + settings:

     -- Default database username
     def user = "pythondemo"
     
     -- Default database connection string
    -def connect_string = "localhost/orclpdb"
    +def connect_string = "localhost/freepdb1"
     
     -- Prompt for the password
     accept pw char prompt 'Enter database password for &user: ' hide
     
    -

    The tutorial instructions may need adjusting, depending on how you have set up your environment.

    -
  • - -
  • Runtime Naming

    - -

    At runtime, the module name of the python-oracledb package is oracledb:

    +

    The tutorial instructions may need adjusting, depending on how you + have set up your environment.

    -
    import oracledb
  • - -
  • Python-oracledb defaults

    -

    A singleton oracledb.defaults contains attributes that can be used to adjust the default behavior of python-oracledb. Attributes not supported in a mode (thin or thick) will be ignored in that mode.

    -

    Open defaults.py in an editor. This will look like:

    -
    import oracledb
    +     
  • -print("Default array size:", oracledb.defaults.arraysize) -Run the script: -
    python defaults.py
    -It displays: -
    Default array size: 100
    -

    This gives the default array size tuning parameter that will be useful in Section 3.4 of this tutorial.

    -

    The default values can also be edited using the defaults attribute. All the default values that can be set and read with defaults attribute are available in the python-oracledb documentation.

1. Connecting to Oracle

-

You can connect from Python to a local, remote or cloud Oracle Database. Documentation link for further reading: You can connect from Python to a local, remote or cloud Oracle +Database. Documentation link for further reading: Connecting to Oracle Database.

-
    -
  • -

    1.1 Creating a basic connection

    -

    Review the code contained in connect.py :

    -
    +
      +
    • +

      1.1 Creating a basic connection

      + +

      Review the code contained in connect.py:

      + +
       import oracledb
       import db_config
       
      @@ -331,64 +448,85 @@ 

      1.1 Creating a basic connection

      print("Database version:", con.version)
      -

      The python-oracledb module is imported to provide the API for accessing the Oracle database. Many inbuilt and third-party modules can be included in Python scripts this way.

      +

      The python-oracledb module is imported to provide the API for accessing + the Oracle database. Many inbuilt and third-party modules can be included + in Python scripts this way.

      + +

      The username, the password and the connection string that you +configured in the db_config.py module is passed to the +connect() method. By default, Oracle's Easy Connect connection +string syntax is used. It consists of the hostname of your machine, +localhost, and the database service name +freepdb1. (In Python Database API terminology, the connection +string parameter is called the "data source name", or "dsn").

      -

      The username, the password and the connection string that you configured in the -db_config.py module is passed to the connect() method. By default, Oracle's Easy Connect connection string syntax is used. It consists of the hostname of your machine, localhost, and the database service name orclpdb. (In Python Database API terminology, the connection string parameter is called the "data source name", or "dsn").

      +

      Open a command terminal and change to the tutorial + directory:

      -

      Open a command terminal and change to the tutorial directory:

      +
      cd samples/tutorial
      -
      cd samples/tutorial
      +

      Run the Python script:

      -

      Run the Python script:

      +
      python connect.py
      -
      python connect.py
      +

      The version number of the database should be displayed. An exception is + raised if the connection fails. Adjust the username, password, or + connection string parameters to invalid values to see the exception.

      -

      The version number of the database should be displayed. An exception is raised if the connection fails. Adjust the username, password, or connection string parameters to invalid values to see the exception.

      +

      Python-oracledb also supports other authentication methods such as + "token authentication" and "external authentication", + which allow connections without needing usernames and passwords to be + embedded in the code. With external authentication, access could be + enforced by, for example, an Oracle Wallet.

      -

      Python-oracledb also supports "external authentication", which allows connections without needing usernames and passwords to be embedded in the code. Authentication would then be performed by, for - example, LDAP or Oracle Wallets.

      +
    • - +
    • +

      1.2 Indentation indicates code structure

      -
    • -

      1.2 Indentation indicates code structure

      +

      Python uses whitespace to indicate code blocks. It does not use + statement terminators, begin/end keywords, or braces.

      -

      In Python, there are no statement terminators, begin/end keywords, or braces to indicate code blocks.

      +

      Note that the sample files use spaces, not tabs.

      -

      Open connect.py in an editor. Indent the print statement with some spaces:

      +

      Open connect.py in an editor. Indent the print statement + with some spaces:

      -
      -import oracledb
      +    
      import oracledb
       import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
         print("Database version:", con.version)
       
      -

      Save the script and run it again:

      +

      Save the script and run it again:

      -
      python connect.py 
      +
      python connect.py 
      -

      This raises an exception about the indentation. The number of spaces or tabs must be consistent in each block; otherwise, the Python interpreter will either raise an exception or execute code unexpectedly.

      +

      This raises an exception about the indentation. The number of spaces or + tabs must be consistent in each block; otherwise, the Python interpreter + will either raise an exception or execute code unexpectedly.

      -

      Python may not always be able to identify accidental from deliberate indentation. Check if your indentation is correct before running each example. Make sure to indent all statement blocks equally. Note that the sample files use spaces, not tabs.

      +

      Python may not always be able to identify accidental from deliberate + indentation. Check if your indentation is correct before running each + example. Make sure to indent all statement blocks equally in the tutorial + with spaces (not tabs).

      -
    • + -
    • -

      1.3 Executing a query

      +
    • +

      1.3 Executing a query

      -

      Open query.py in an editor. It looks like:

      +

      Open query.py in an editor. It looks like:

      -
      +    
       import oracledb
       import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
       
      -

      Edit the file and add the code shown in bold below:

      +

      Edit the file and add the code shown in bold below:

       import oracledb
      @@ -403,30 +541,38 @@ 

      1.3 Executing a query

      print(row)
      -

      Make sure the print(row) line is indented. This tutorial uses spaces, not tabs.

      +

      Make sure the print(row) line is indented. This tutorial + uses spaces, not tabs.

      -

      The code executes a query and fetches all data.

      +

      The code executes a query and fetches all data.

      -

      Save the file and run it:

      +

      Save the file and run it:

      -
      python query.py
      +
      python query.py
      -

      In each loop iteration, a new row is stored in - row variable as a Python "tuple" and is displayed.

      +

      In each loop iteration, a new row is stored in row variable + as a Python "tuple" and is displayed.

      -

      Fetching data is described further in Section 3.

      -
    • +

      Fetching data is described further in Section + 3.

      -
    • -

      1.4 Closing connections

      +
    • + +
    • +

      1.4 Closing connections

      -

      Connections and other resources used by python-oracledb will automatically be closed at the end of scope. This is a common programming style that takes care of the correct order of resource closure.

      +

      Connections and other resources used by python-oracledb will + automatically be closed at the end of scope. This is a common programming + style that takes care of the correct order of resource closure.

      -

      Resources can also be explicitly closed to free up database resources if they are no longer needed. This is strongly recommended in blocks of code that remain active for some time.

      +

      Resources can also be explicitly closed to free up database resources if + they are no longer needed. This is strongly recommended in blocks of code + that remain active for some time.

      -

      Open query.py in an editor and add calls to close the cursor and connection like:

      +

      Open query.py in an editor and add calls to close the + cursor and connection like:

      -
      +    
       import oracledb
       import db_config
       
      @@ -442,19 +588,35 @@ 

      1.4 Closing connections

      con.close()
      -

      Running the script completes without error:

      +

      Running the script completes without error:

      + +
      python query.py
      -
      python query.py
      +

      If you swap the order of the two close() calls you will see + an error.

      -

      If you swap the order of the two close() calls you will see an error.

      -
    • +

      Often Python context managers are used to automatically scope resources + and automatically close them. You might encounter Python code that uses + them as with blocks like:

      -
    • -

      1.5 Checking versions

      +
      with oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn) as con:
      +    with con.cursor() as cur:
      +        cur.execute("select * from dept order by deptno")
      +        res = cur.fetchall()
      +        for row in res:
      +            print(row)
      -

      Review the code contained in versions.py:

      +

      For ease of editing and getting correct indentation this tutorial + doesn't use context managers, but they are recommended.

      -
      +  
    • + +
    • +

      1.5 Checking versions

      + +

      Review the code contained in versions.py:

      + +
       import oracledb
       import db_config
       
      @@ -462,15 +624,16 @@ 

      1.5 Checking versions

      print(oracledb.__version__) # two underscores before and after the version
      -

      Run the script:

      +

      Run the script:

      -
      python versions.py
      +
      python versions.py
      -

      This gives the version of the python-oracledb interface.

      +

      This gives the version of the python-oracledb interface.

      -

      Edit the file to print the version of the database, and the Oracle client libraries used by python-oracledb:

      +

      Edit the file to print the version of the database, and the Oracle + client libraries used by python-oracledb:

      -
      +    
       import oracledb
       import db_config
       
      @@ -480,48 +643,75 @@ 

      1.5 Checking versions

      print("Database version:", con.version)
      -

      When the script is run, it will display:

      +

      When the script is run, it will display something like:

      + +
      +3.1.0
      +Database version: 23.7.0.0.0
      + +

      Any python-oracledb installation can connect to older and newer Oracle + Database versions. By checking the Oracle Database version numbers, the + application can make use of the best Oracle features available.

      + +
    • -
      -1.0.0
      -Database version: 19.3.0.0.0
      +
    • +

      1.6 Using the ConnectParams builder class

      -

      Any python-oracledb installation can connect to older and newer - Oracle Database versions. By checking the Oracle Database - version numbers, the application can make use of - the best Oracle features available.

      +

      To help encapsulate connection arguments, a connection property builder + function oracledb.ConnectParams() can be used. It returns a + ConnectParams object. The object can be passed to + oracledb.connect() or oracledb.create_pool().

      -
    • -
    • -

      1.6 Using the ConnectParams builder class

      -

      - A connection property builder function oracledb.ConnectParams() has been added. It returns a new ConnectParams object. The object can be passed to oracledb.connect() or -oracledb.create_pool().

      +

      Open connect_params2.py in a text editor. It looks + like:

      -

      Open connect_params2.py in a text editor. It looks like:

      -
      import oracledb
      +    
      import oracledb
       import db_config
       
      -params = oracledb.ConnectParams(host="localhost", port=1521, service_name="orclpdb")
      +params = oracledb.ConnectParams(host="localhost", port=1521, service_name="freepdb1")
       con = oracledb.connect(user=db_config.user, password=db_config.pw, params=params)
       print("Database version:", con.version)
      - When the script is run (python connect_params2.py), it will display: -
      Database version: 19.3.0.0.
      -

      - The use of ConnectParams() is optional. Users can continue to use previous approaches. The list of parameters for the ConnectParams class is available in the python-oracledb documentation.

      -

      Notes:

      -
        -
      • If the params parameter is specified and keyword parameters are also specified, then the params parameter is updated with the values from the keyword parameters before being used to create the connection.
      • -
      • If the dsn parameter is specified and the params parameter is specified, then the params parameter is updated with the contents of the dsn parameter before being used to create the connection.
      • -
      -
    • -
    • -

      1.7 Checking Connection Health

      -

      The function Connection.is_healthy() checks the usability of a database connection locally. This function returns a boolean value indicating the health status of a connection.

      -

      Connections may become unusable in several cases, such as if the network socket is broken, if an Oracle error indicates the connection is unusable or after receiving a planned down notification from the database. -This function is best used before starting a new database request on an existing standalone connection. Pooled connections internally perform this check before returning a connection to the application. If this function returns False, the connection should be not be used by the application and a new connection should be established instead.

      -

      Open connect_health.py in a text editor. It looks like:

      -
      import oracledb
      +
      +    

      Edit the connection details to suit your environment.

      + +

      When the script is run (python + connect_params2.py), it will display something like:

      + +
      Database version: 23.7.0.0.
      + +

      The list of parameters for the ConnectParams class is + available in the python-oracledb documentation. The use of + ConnectParams() is optional. You can continue pass individual + parameters to connection and pool creation calls, if you like.

      + +

      Notes:

      + +
        +
      • If the params parameter is specified and keyword parameters are also specified, then the params parameter is updated with the values from the keyword parameters before being used to create the connection.
      • +
      • If the dsn parameter is specified and the params parameter is specified, then the params parameter is updated with the contents of the dsn parameter before being used to create the connection.
      • +
      +
    • + +
    • +

      1.7 Checking Connection Health

      + +

      The function Connection.is_healthy() checks the usability + of a database connection locally. This function returns a boolean value + indicating the health status of a connection.

      + +

      Connections may become unusable in several cases, such as if the network + socket is broken, if an Oracle error indicates the connection is unusable + or after receiving a planned down notification from the database. This + function is best used before starting a new database request on an existing + standalone connection. Pooled connections internally perform this check + before returning a connection to the application. If this function returns + False, the connection should be not be used by the application + and a new connection should be established instead.

      + +

      Open connect_health.py in a text editor. It looks like:

      + +
      import oracledb
       import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      @@ -529,21 +719,40 @@ 

      1.7 Checking Connection Health

      print("Healthy connection!") else: print("Unusable connection. Please check the database and network settings.")
      -

      When the script is run (python connect_health.py), it will display (when the connection is OK):

      -
      Healthy Connection!
      -

      To fully check a connection's health, use Connection.ping() which performs a round-trip to the database.

    • -
    -

    2. Connection Pooling

    -

    Connection pooling is important for performance when multi-threaded applications frequently connect and disconnect from the database. Pooling also gives the best support for Oracle's High Availability (HA) features. +

    Run the script:

    + +
    python connect_health.py
    + +

    It will display (when the connection is OK):

    + +
    Healthy Connection!
    + +

    To fully check a connection's health, use Connection.ping() + which performs a round-trip to the database and throws an exception if the + connection is not usable. Pinging the database impacts ultimate + scalability, so think carefully before adding it to your applications. + Also, explicit pinging is generally not needed when you use connection + pooling since the pool internally handles dead connection detection.

    + +
  • +
+ +

2. Connection Pooling

+ +

Connection pooling is important for performance when multi-threaded +applications frequently connect and disconnect from the database. Pooling also +gives the best support for Oracle's High Availability (HA) features. Documentation link for further reading: Connection Pooling.

+href="https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#connection-pooling">Connection +Pooling
.

-
    -
  • 2.1 Connection pooling

    +
      +
    • 2.1 Connection pooling

      -

      Review the code contained in connect_pool.py:

      -
      +    

      Review the code contained in connect_pool.py:

      + +
       import oracledb
       import threading
       import db_config
      @@ -571,34 +780,49 @@ 

      2. Connection Pooling

      print("All done!")
      -

      The create_pool() function creates a pool of Oracle connections for the user. Connections in the pool can be used by python-oracledb by calling pool.acquire(). - The initial pool size is 2 connections. The maximum size is 5 connections. When the pool needs to grow, then a single new connection will be created at a time based on the increment parameter. The pool can shrink back to the minimum size of 2 when the connections are no longer in use.

      +

      The create_pool() function creates a pool of Oracle + connections for the user. Connections in the pool can be used by + python-oracledb by calling pool.acquire(). The initial pool + size is 2 connections. The maximum size is 5 connections. When the pool + needs to grow, then a single new connection will be created at a time based + on the increment parameter. The pool can shrink back to the + minimum size of 2 when the connections are no longer in use.

      -

      The def Query(): line creates a method that is called by each thread.

      +

      The def Query(): line creates a method that is called by + each thread.

      -

      In the Query method, the pool.acquire() call gets one connection from the pool (as long as less than 5 are already in use). This connection is used in a loop of 4 iterations to query the sequence myseq. At the end of the method, python-oracledb will automatically close the cursor and release the connection back to the pool for reuse.

      +

      In the Query() method, the pool.acquire() call + gets one connection from the pool (as long as less than 5 are already in + use). This connection is used in a loop of 4 iterations to query the + sequence myseq. At the end of the method, python-oracledb will + automatically close the cursor and release the connection back to the pool + for reuse.

      -

      The seqval, = cur.fetchone() line fetches a row and puts the single value contained in the result tuple into the variable seqval. Without the comma, the value in seqval would be a tuple like - "(1,)".

      +

      The seqval, = cur.fetchone() line fetches a row and puts + the single value contained in the result tuple into the variable + seqval. Without the comma, the value in seqval + would be a tuple like "(1,)".

      -

      Two threads are created, each invoking the - Query() method.

      +

      Two threads are created, each invoking the Query() + method.

      -

      In a command terminal, run:

      +

      In a command terminal, run:

      -
      python connect_pool.py
      +
      python connect_pool.py
      -

      The output shows the interleaved query results as each thread fetches values independently. The order of interleaving may vary from run to run.

      +

      The output shows the interleaved query results as each thread fetches + values independently. The order of interleaving may vary from run to + run.

      -
    • - -
    • -

      2.2 Connection pool experiments

      +
    • +
    • +

      2.2 Connection pool experiments

      -

      Review connect_pool2.py, which has a loop for the number of threads, each iteration invoking the Query() method:

      +

      Review connect_pool2.py, which has a loop for the number of + threads, each iteration invoking the Query() method:

      -
      +    
       import oracledb
       import threading
       import db_config
      @@ -614,86 +838,111 @@ 

      2.2 Connection pool experiments

      seqval, = cur.fetchone() print("Thread", threading.current_thread().name, "fetched sequence =", seqval) -numberOfThreads = 2 -threadArray = [] +number_of_threads = 2 +thread_array = [] -for i in range(numberOfThreads): +for i in range(number_of_threads): thread = threading.Thread(name='#' + str(i), target=Query) - threadArray.append(thread) + thread_array.append(thread) thread.start() -for t in threadArray: +for t in thread_array: t.join() print("All done!")
      -

      In a command terminal, run:

      +

      In a command terminal, run:

      -
      python connect_pool2.py
      +
      python connect_pool2.py
      -

      Experiment with different values of the pool parameters and -numberOfThreads. Larger initial pool sizes will make the pool creation slower, but the connections will be available immediately when needed. -

      +

      Experiment with different values of the pool parameters and + numberOfThreads.

      -

      Try changing getmode to -oracledb.POOL_GETMODE_NOWAIT. When numberOfThreads -exceeds the maximum size of the pool, the acquire() call will now -generate an error such as "ORA-24459: OCISessionGet() timed out waiting for pool to create new connections".

      +

      Try changing getmode to + oracledb.POOL_GETMODE_NOWAIT, reducing the maxium pool size to + 2, and increasing the number of threads to 3. When + number_of_threads exceeds the maximum size of the pool, the + acquire() call will now generate an error such as + "DPY-4005: timed out waiting for the connection pool to return a + connection".

      -

      Pool configurations where min is the same as -max (and increment = 0) are often -recommended as a best practice for the optimum performance. Pools with such configurations are referred to as "static pools". This configuration avoids connection storms on the database server.

      +

      Pool configurations where min is the same as + max (and increment = 0) are often recommended as + a best practice for the optimum performance. Pools with such configurations + are referred to as "static pools". This configuration + avoids connection storms on the database server.

      -
    • + + +
    • +

      2.3 Creating a DRCP Connection

      + +

      Database Resident Connection Pooling allows multiple Python processes on + multiple machines to share a small pool of database server processes.

      + +

      Below left is a diagram without DRCP. Every application standalone + connection (or python-oracledb connection-pool connection) has its own + database server process. Standalone application connect() and + close calls require the expensive create and destroy of those database + server processes. Python-oracledb connection pools reduce these costs by + keeping database server processes open, but every python-oracledb + connection pool will require its own set of database server processes, even + if they are not doing database work: these idle server processes consume + database host resources. Below right is a diagram with DRCP. Scripts and + Python processes can share database servers from a pre-created pool of + servers and return them when they are not in use.

      + + + + + + +
      + Picture of 3-tier application architecture without DRCP showing connections from multiple application processes each going to a server process in the database tier +

      Without DRCP

      +
      + Picture of 3-tier application architecture with DRCP showing connections from multiple application processes going to a pool of server processes in the database tier +

      With DRCP

      +
      + +

      DRCP is useful when the database host machine does not have enough memory + to handle the number of database server processes required. If DRCP is + enabled, it is best used in conjunction with python-oracledb's connection + pooling. However, the default 'dedicated' server process model is generally + recommended if the database host memory is large enough. This can be with or + without a python-oracledb connection pool, depending on the connection + rate.

      + +

      Batch scripts doing long running jobs should generally use dedicated + connections. Both dedicated and DRCP servers can be used together in the same + application or database.

      -
    • -

      2.3 Creating a DRCP Connection

      - -

      Database Resident Connection Pooling allows multiple Python processes on multiple machines to share a small pool of database server processes.

      - -

      Below left is a diagram without DRCP. Every application standalone connection (or python-oracledb connection-pool connection) has its own database server process. Standalone application connect() and close calls require the expensive create and destroy of those database server processes. - Python-oracledb connection pools reduce these costs by keeping database server processes open, but every python-oracledb connection pool will require its own set of database server processes, even if they are not doing database work: these idle server processes consume database host resources. Below right is a diagram with DRCP. Scripts and Python processes can share database servers from a pre-created pool of servers and return them when they are not in use. -

      - - - - - - -
      - Picture of 3-tier application architecture without DRCP showing connections from multiple application processes each going to a server process in the database tier -

      Without DRCP

      -
      - Picture of 3-tier application architecture with DRCP showing connections from multiple application processes going to a pool of server processes in the database tier -

      With DRCP

      -
      - -

      DRCP is useful when the database host machine does not have enough memory to handle the number of database server processes required. If DRCP is enabled, it is best used in conjunction with python-oracledb's connection pooling. - However, the default 'dedicated' server process model is generally recommended if the database host memory is large enough. This can be with or without a python-oracledb connection pool, depending on the connection rate.

      - -

      Batch scripts doing long running jobs should generally use dedicated connections. Both dedicated and DRCP servers can be used together in the same application or database.

      Start the Database Resident Connection Pool (DRCP)

      -

      If you are running a local or remote Oracle Database (that is not an ADB), start the DRCP pool. Note that the DRCP pool is started in an Oracle Autonomous Database by default.

      +

      If you are running a local or remote Oracle Database + (that is not Oracle Autonomous Database), start the DRCP pool. Note that the + DRCP pool is started in an Oracle Autonomous Database by default.

      -

      Run SQL*Plus with SYSDBA privileges, for example:

      +

      Run SQL*Plus with SYSDBA privileges, for example:

      -sqlplus -l sys/syspassword@localhost/orclcdb as sysdba
      +sqlplus -l sys/syspassword@localhost/free as sysdba
       
      -

      and execute the command:

      +

      and execute the command:

       execute dbms_connection_pool.start_pool()
       
      -

      Note: If you are using Oracle Database 21c,

      -

      Run show parameter enable_per_pdb_drcp in SQL*Plus.

      -

      If this shows TRUE,

      -

      then you will need to run the execute command in a pluggable database, not a container database.

      -

      Connect to the Oracle Database through DRCP

      +

      Note: If you are using Oracle Database 21c, run show parameter + enable_per_pdb_drcp in SQL*Plus. If this shows TRUE, then you will + need to run the execute command in a pluggable database, not a + container database.

      + +

      Connect to the Oracle Database through DRCP

      +

      Review the code contained in connect_drcp.py:

      @@ -705,15 +954,28 @@ 

      Connect to the Oracle Database through DRCP

      print("Database version:", con.version)
      -

      This is similar to connect.py but - ":pooled" is appended to the connection string, telling - the database to use a pooled server. A Connection Class "PYTHONDEMO" is also passed into the connect() method to allow grouping of database servers to applications. Note that with Autonomous Database, the connection string has a different form, see the ADB documentation.

      - -

      The "purity" of the connection is defined as the PURITY_SELF constant, meaning the session state (such as the default date format) might be retained between connection calls, giving performance benefits. Session information will be discarded if a pooled server is later reused by an application with a different connection class name.

      - -

      Applications that should never share session information should use a different connection class and/or use PURITY_NEW to force creation of a new session. This reduces overall scalability but prevents applications from misusing the session information. The default purity for connections created with connect() is PURITY_NEW.

      +

      This is similar to connect.py but + ":pooled" is appended to the connection string, + telling the database to use a pooled server. A Connection Class "PYTHONDEMO" + is also passed into the connect() method to allow grouping of + database servers to applications. Note that with Autonomous Database, the + connection string has a different form, see the ADB documentation.

      + +

      The "purity" of the connection is defined as the + PURITY_SELF constant, meaning the session state (such as the + default date format) might be retained between connection calls, giving + performance benefits. Session information will be discarded if a pooled + server is later reused by an application with a different connection class + name.

      + +

      Applications that should never share session information should use a + different connection class and/or use PURITY_NEW to force + creation of a new session. This reduces overall scalability but prevents + applications from misusing the session information. The default purity for + connections created with connect() is + PURITY_NEW.

      Run connect_drcp.py in a terminal window.

      @@ -723,14 +985,16 @@

      Connect to the Oracle Database through DRCP

    • -
    • -

      2.4 Connection pooling and DRCP

      +
    • +

      2.4 Connection pooling and DRCP

      -

      DRCP works well with python-oracledb's connection pooling. The - default purity for pooled connections is PURITY_SELF.

      +

      DRCP works well with python-oracledb's connection pooling. The default + purity for pooled connections is PURITY_SELF.

      -

      Edit connect_pool2.py, reset any changed pool options, and modify it to use DRCP:

      -
      +    

      Edit connect_pool2.py, reset any changed pool options, and + modify it to use DRCP:

      + +
       import oracledb
       import threading
       import db_config
      @@ -761,22 +1025,34 @@ 

      2.4 Connection pooling and DRCP

      print("All done!")
      -

      The script logic does not need to be changed to benefit from - DRCP connection pooling.

      +

      The script logic does not need to be changed to benefit from DRCP + connection pooling.

      -

      Run the script:

      +

      Run the script:

      -
      python connect_pool2.py
      +
      python connect_pool2.py
      -

      Optionally, you can run drcp_query.py to check the DRCP pool statistics.

      +

      Optionally, you can run drcp_query.py to check the DRCP + pool statistics.

      -
      python drcp_query.py
      +
      python drcp_query.py
      -

      This will prompt for the SYSTEM (or ADMIN user), the password, and the database connection string. For running the file, you will need to connect to the container database in Oracle Database v19 or lower. From Oracle Database 21c onwards, you can enable DRCP in pluggable databases.

      +

      This will prompt for the SYSTEM (or ADMIN user), the password, and the + database connection string. For running the file, you will need to connect + to the root container database in Oracle Database 19c or lower. From + Oracle Database 21c onwards, you can optionally enable DRCP in pluggable + databases.

      -

      Note that with ADB, this view does not contain rows, so running this script is not useful. For other Oracle Databases, the script shows the number of connection requests made to the pool since the database was started ("NUM_REQUESTS"), how many of those reused a pooled server's session ("NUM_HITS"), and how many had to create new sessions ("NUM_MISSES"). Typically the goal is a low number of misses.

      -

      If the file is run successfully, you should see something like

      -
      Looking at DRCP Pool stats...
      +    

      Note that with ADB, this view does not contain rows, so running this + script is not useful. For other Oracle Databases, the script shows the + number of connection requests made to the pool since the database was + started ("NUM_REQUESTS"), how many of those reused a pooled server's + session ("NUM_HITS"), and how many had to create new sessions + ("NUM_MISSES"). Typically the goal is a low number of misses.

      + +

      If the file is run successfully, you should see something like:

      + +
      Looking at DRCP Pool stats...
       
       (CCLASS_NAME, NUM_REQUESTS, NUM_HITS, NUM_MISSES)
       -------------------------------------------------
      @@ -784,46 +1060,52 @@ 

      2.4 Connection pooling and DRCP

      ('PYTHONDEMO.PYTHONDEMO', 4, 2, 2) ('SYSTEM.SHARED', 11, 0, 11) Done.
      -

      To see the pool configuration, you can query DBA_CPOOL_INFO.

      -
    • -
    • -

      2.5 More DRCP investigation

      +

      To see the pool configuration, you can query DBA_CPOOL_INFO.

      -

      To further explore the behaviors of python-oracledb connection pooling and DRCP pooling, you could try changing the purity to oracledb.PURITY_NEW to see the effect on the DRCP NUM_MISSES statistic.

      +
    • -

      Another experiement is to include the time module at the file - top:

      +
    • +

      2.5 More DRCP investigation

      -
      +    

      To further explore the behaviors of python-oracledb connection pooling + and DRCP pooling, you could try changing the purity to + oracledb.PURITY_NEW to see the effect on the DRCP NUM_MISSES + statistic.

      + +

      Another experiement is to include the time module at the + file top:

      + +
       import time
      -

      and add calls to time.sleep(1) in the code, for - example in the query loop. Then look at the way the threads execute. Use - drcp_query.sql to monitor the pool's behavior.

      +

      and add calls to time.sleep(1) in the code, for example in + the query loop. Then look at the way the threads execute. Use + drcp_query.sql to monitor the pool's behavior.

    3. Fetching Data

    - -

    Executing SELECT queries is the primary way to get data from Oracle Database. Documentation link for further reading: Executing SELECT queries is the primary way to get data from Oracle + Database. Documentation link for further reading: SQL Queries.

      -
    • 3.1 A simple query

      +
    • 3.1 A simple query

      -

      There are several functions you can use to query an Oracle database, but the basics of querying are always the same:

      +

      There are several functions you can use to query an Oracle database, + but the basics of querying are always the same:

      -

      1. Execute the statement.
      - 2. Bind data values (optional).
      - 3. Fetch the results from the database.

      +

      1. Execute the statement.
      + 2. Bind data values (optional).
      + 3. Fetch the results from the database.

      -

      Review the code contained in query2.py:

      +

      Review the code contained in query2.py:

      -
      +      
       import oracledb
       import db_config
       
      @@ -837,21 +1119,23 @@ 

      3. Fetching Data

      print("Department location:", loc)
      -

      The cursor() method opens a cursor for statements to use.

      +

      The cursor() method opens a cursor for statements to + use.

      -

      The execute() method parses and executes the statement.

      +

      The execute() method parses and executes the + statement.

      -

      The loop fetches each row from the cursor and unpacks the returned - tuple into the variables deptno, dname, - loc, which are then printed.

      +

      The loop fetches each row from the cursor and unpacks the returned + tuple into the variables deptno, dname, + loc, which are then printed.

      -

      Run the script in a terminal window:

      +

      Run the script in a terminal window:

      -
      python query2.py
      +
      python query2.py
      -

      The output is:

      +

      The output is:

      -
      Department number:  10
      +      
      Department number:  10
       Department name:  ACCOUNTING
       Department location: NEW YORK
       Department number:  20
      @@ -864,15 +1148,17 @@ 

      3. Fetching Data

      Department name: OPERATIONS Department location: BOSTON
      -
    • + -
    • 3.2 Using fetchone()

      +
    • +

      3.2 Using fetchone()

      -

      When the number of rows is large, the fetchall() call may use too much memory.

      +

      When the number of rows is large, the fetchall() call may + use too much memory.

      -

      Review the code contained in query_one.py:

      +

      Review the code contained in query_one.py:

      -
      +      
       import oracledb
       import db_config
       
      @@ -887,22 +1173,24 @@ 

      3. Fetching Data

      print(row)
      -

      This uses the fetchone() method to return just a single row as a - tuple. When called multiple time, consecutive rows are returned:

      +

      This uses the fetchone() method to return just a single + row as a tuple. When called multiple time, consecutive rows are + returned:

      -

      Run the script in a terminal window:

      +

      Run the script in a terminal window:

      -
      python query_one.py
      +
      python query_one.py
      -

      The first two rows of the table are printed.

      +

      The first two rows of the table are printed.

      -
    • + -
    • 3.3 Using fetchmany()

      +
    • +

      3.3 Using fetchmany()

      -

      Review the code contained in query_many.py:

      +

      Review the code contained in query_many.py:

      -
      +      
       import oracledb
       import db_config
       
      @@ -915,32 +1203,46 @@ 

      3. Fetching Data

      print(res)
      -

      The fetchmany() method returns a list of tuples. By default the maximum number of rows that can be returned is specified by the cursor attribute arraysize (which defaults to 100). Here the numRows parameter specifies that three rows should be returned.

      +

      The fetchmany() method returns a list of tuples. By + default the maximum number of rows that can be returned is specified by + the cursor attribute arraysize (which defaults to 100). + Here the num_rows parameter specifies that three rows should + be returned.

      -

      Run the script in a terminal window:

      +

      Run the script in a terminal window:

      -
      python query_many.py
      +
      python query_many.py
      -

      The first three rows of the table are returned as a list - (Python's name for an array) of tuples.

      +

      The first three rows of the table are returned as a list (Python's + name for an array) of tuples.

      -

      You can access elements of the lists by position indexes. To see this, - edit the file and add:

      +

      You can access elements of the lists by position indexes. To see + this, edit the file and add:

      -
      +      
       print(res[0])    # first row
       print(res[0][1]) # second element of first row
       
      -
    • -
    • 3.4 Tuning with arraysize and prefetchrows

      +
    • + +
    • +

      3.4 Tuning with arraysize and prefetchrows

      + +

      This section demonstrates a way to improve query performance by + increasing the number of rows returned in each batch from Oracle Database + to the Python program.

      -

      This section demonstrates a way to improve query performance by increasing the number of rows returned in each batch from Oracle to the Python program.

      +

      Row prefetching and array fetching are internal buffering techniques + to reduce round-trips to the database. The difference is the code layer + that is doing the buffering, and when the buffering occurs.

      -

      Row prefetching and array fetching are internal buffering techniques to reduce round-trips to the database. The difference is the code layer that is doing the buffering, and when the buffering occurs.

      +

      The environment setup file has already + created the bigtab table with a large number of rows (to be used + by the query_arraysize.py file) by internally running the + sql script below:

      -

      The environment setup file has already created the bigtab table with a large number of rows (to be used by the query_arraysize.py file) by internally running the sql script below:

      -
      create table bigtab (mycol varchar2(20));
      +      
      create table bigtab (mycol varchar2(20));
       
       begin
        for i in 1..20000
      @@ -948,12 +1250,13 @@ 

      3. Fetching Data

      insert into bigtab (mycol) values (dbms_random.string('A',20)); end loop; end;
      -

      The setup file has also inserted around 20000 string values in the bigtab table.

      +

      The setup file has also inserted around 20000 string values in the + bigtab table.

      -

      Review the code contained in query_arraysize.py:

      +

      Review the code contained in query_arraysize.py:

      -
      +      
       import oracledb
       import time
       import db_config
      @@ -973,50 +1276,70 @@ 

      3. Fetching Data

      print(elapsed, "seconds")
      -

      This uses the 'time' module to measure elapsed time of the query. The prefetchrows and arraysize values are 100. This causes batches of 100 records at a time to be returned from the database to a cache in Python. - These values can be tuned to reduce the number of "round-trips" - made to the database, often reducing network load and reducing the number of context switches on the database server. The fetchone(), - fetchmany() and fetchall() methods will read from the cache before requesting more data from the database.

      +

      This uses the 'time' module to measure elapsed time of the query. The + prefetchrows and arraysize values are 100. This causes + batches of 100 records at a time to be returned from the database to a + cache in Python. These values can be tuned to reduce the number of + "round-trips" made to the database, often reducing network load + and reducing the number of context switches on the database server. The + fetchone(), fetchmany() and + fetchall() methods will read from the cache before + requesting more data from the database.

      -

      In a terminal window, run:

      +

      In a terminal window, run:

      + +
      python query_arraysize.py
      -
      python query_arraysize.py
      +

      Rerun a few times to see the average times.

      -

      Rerun a few times to see the average times.

      +

      Experiment with different prefetchrows and arraysize values. For + example, edit query_arraysize.py and change the arraysize + to:

      -

      Experiment with different prefetchrows and arraysize values. For example, edit query_arraysize.py and change the arraysize - to:

      +
      cur.arraysize = 2000
      -
      cur.arraysize = 2000
      +

      Rerun the script to compare the performance of different arraysize + settings.

      -

      Rerun the script to compare the performance of different - arraysize settings.

      +

      In general, larger array sizes improve performance. Depending on how + fast your system is, you may need to use different values than those + given here to see a meaningful time difference.

      -

      In general, larger array sizes improve performance. Depending on how fast your system is, you may need to use different values than those given here to see a meaningful time difference.

      +

      There is a time/space tradeoff for increasing the values. Larger + values will require more memory in Python for buffering the records. For + queries that return a very large number of rows, you may prefer to leave + the default prefetchrows value and only tune arraysize.

      -

      There is a time/space tradeoff for increasing the values. Larger values will require more memory in Python for buffering the records.

      +

      If you know the query returns a fixed number of rows, for example, 20 + rows, then set arraysize to 20 and prefetchrows to 21. The addition of + one extra row for prefetchrows prevents a round-trip to check for + end-of-fetch. The statement execution and fetch will take a total of one + round-trip. This minimizes the load on the database.

      -

      If you know the query returns a fixed number of rows, for example, 20 rows, then set arraysize to 20 and prefetchrows to 21. The addition of one extra row for prefetchrows prevents a round-trip to check for end-of-fetch. The statement execution and fetch will take a total of one round-trip. This minimizes the load on the database.

      +

      If you know a query only returns a few records, decrease the arraysize + from the default to reduce memory usage.

      -

      If you know a query only returns a few records, - decrease the arraysize from the default to reduce memory usage.

    4. Binding Data

    -

    Bind variables enable you to re-execute statements with new data values - without the overhead of re-parsing the statement. Binding improves code reusability, improves application scalability, and can reduce the risk of SQL injection attacks. Using bind variables is strongly recommended. +

    Bind variables enable you to re-execute statements with new data values +without the overhead of re-parsing the statement. Binding improves code +reusability, improves application scalability, and can reduce the risk of SQL +injection attacks. Using bind variables is strongly recommended. Documentation link for further reading: Using Bind Variables.

    +href="https://python-oracledb.readthedocs.io/en/latest/user_guide/bind.html" +>Using Bind Variables
    .

    -
      +
        -
      • 4.1 Binding in queries

        +
      • +

        4.1 Binding in queries

        -

        Review the code contained in bind_query.py:

        +

        Review the code contained in bind_query.py:

        -
        +    
         import oracledb
         import db_config
         
        @@ -1034,31 +1357,36 @@ 

        4. Binding Data

        print(res)
        -

        The statement contains a bind variable ":id" placeholder. - The statement is executed twice with different values for the - WHERE clause.

        +

        The statement contains a bind variable ":id" placeholder. + The statement is executed twice with different values for the + WHERE clause.

        -

        From a terminal window, run:

        +

        From a terminal window, run:

        -
        python bind_query.py
        +
        python bind_query.py
        -

        The output shows the details for the two departments.

        +

        The output shows the details for the two departments.

        -

        An arbitrary number of named arguments can be used in an - execute() call. Each argument name must match a bind - variable name. Alternatively, instead of passing multiple arguments you - could pass a second argument to execute() that is a sequence - or a dictionary. Later examples show these syntaxes.

        +

        An arbitrary number of named arguments can be used in an + execute() call. Each argument name must match a bind variable + name. Alternatively, instead of passing multiple arguments you could pass + a second argument to execute() that is a sequence or a + dictionary. Later examples show these syntaxes.

        -

        To bind a database NULL, use the Python value None.

        +

        To bind a database NULL, use the Python value None.

        -

        python-oracledb uses a cache of executed statements. As long as the statement you pass to execute() is in that cache, you can use different bind values and still avoid a full statement parse. The statement cache size is configurable for each connection. To see the default statement cache size, edit bind_query.py and add a line at the end:

        +

        python-oracledb uses a cache of executed statements. As long as the + statement you pass to execute() is in that cache, you can use + different bind values and still avoid a full statement parse. The + statement cache size is configurable for each connection. To see the + default statement cache size, edit bind_query.py and add a + line at the end:

         print(con.stmtcachesize)
         
        -

        Re-run the file.

        +

        Re-run the script.

        You would set the statement cache size to the number of unique statements commonly executed in your applications.

        @@ -1067,11 +1395,16 @@

        4. Binding Data

      • 4.2 Binding in inserts

        -

        The environment setup file has already created the mytab table (to be used by the bind_insert.py file) by internally running the sql script below:

        -
        create table mytab (id number, data varchar2(20), constraint my_pk primary key (id))
        -

        Now, review the code contained in bind_insert.py:

        +

        The environment setup file has already + created the mytab table (to be used by the + bind_insert.py file) by internally running the sql script + below:

        -
        +      
        create table mytab (id number, data varchar2(20), constraint my_pk primary key (id))
        + +

        Now, review the code contained in bind_insert.py:

        + +
         import oracledb
         import db_config
         
        @@ -1092,27 +1425,31 @@ 

        4. Binding Data

        res = cur2.fetchall() print(res)
        -

        The 'rows' array contains the data to be inserted into the mytab table created earlier.

        +

        The 'rows' array contains the data to be inserted into + the mytab table created earlier.

        -

        The executemany() call inserts all rows. This call uses "array binding", which is an efficient way to - insert multiple records.

        +

        The executemany() call inserts all rows. This call uses + "array binding", which is an efficient way to insert multiple + records.

        -

        The final part of the script queries the results back and displays them as a list of tuples.

        +

        The final part of the script queries the results back and displays + them as a list of tuples.

        From a terminal window, run:

        python bind_insert.py
        -

        The new results are automatically rolled back at the end of - the script. So, re-running the script will always show the same number of - rows in the table.

        +

        The new results are automatically rolled back at the end of the + script. Re-running the script will always show the same number of rows in + the table. Committing data is discussed later.

      • -
      • 4.3 Batcherrors

        +
      • +

        4.3 Batcherrors

        -

        The Batcherrors features allows invalid data to be identified - while allowing valid data to be inserted.

        +

        The Batcherrors features allows invalid data to be + identified while allowing valid data to be inserted.

        Edit the data values in bind_insert.py and create a row with a duplicate key:

        @@ -1135,7 +1472,7 @@

        4. Binding Data

        Edit the file again and enable batcherrors like:

        -
        +      
         import oracledb
         import db_config
         
        @@ -1161,38 +1498,51 @@ 

        4. Binding Data

        print(res)
        -

        Run the file:

        +

        Run the script:

        python bind_insert.py
        -

        The new code shows the offending duplicate row: "ORA-00001: unique constraint (PYTHONDEMO.MY_PK) violated at row offset 6". - This indicates the 6th data value (counting from 0) had a - problem.

        +

        The new code shows the offending duplicate row: "ORA-00001: unique + constraint (PYTHONDEMO.MY_PK) violated at row offset 6". This indicates + the 6th data value (counting from 0) had a problem.

        The other data gets inserted and is queried back.

        -

        At the end of the script, python-oracledb will roll back an uncommitted transaction. If you want to commit results, you can use:

        +

        At the end of the script, python-oracledb will roll back an + uncommitted transaction. If you want to commit results, you can use:

        -
        con.commit()
        +
        con.commit()

        To force python-oracledb to roll back the transaction, use:

        -
        con.rollback()
        +
        con.rollback()
        -
      • +

      5. PL/SQL

      -

      PL/SQL is Oracle's procedural language extension to SQL. PL/SQL procedures and functions are stored and run in the database. Using PL/SQL lets all database applications reuse logic, no matter how the application accesses the database. Many data-related operations can be performed in PL/SQL faster than extracting the data into a program (for example, Python) and then processing it. Documentation link for further reading: PL/SQL Execution.

      +

      PL/SQL is Oracle's procedural language extension to SQL. PL/SQL procedures + and functions are stored and run in the database. Using PL/SQL lets all + database applications reuse logic, no matter how the application accesses the + database. Many data-related operations can be performed in PL/SQL faster than + extracting the data into a program (for example, Python) and then processing + it. Documentation link for further reading: PL/SQL + Execution.

      • 5.1 PL/SQL function

        -

        The environment setup file has already created the new table named ptab and a PL/SQL stored function myfunc to insert a row into ptab and return double the inserted value by internally running the sql script below:

        -
        create table ptab (mydata varchar(20), myid number);
        +
        +      

        The environment setup file has already + created the new table named ptab and a PL/SQL stored + function myfunc to insert a row into + ptab and return double the inserted value. The script to + create the table and function is below (you do not need to run this):

        + +
        create table ptab (mydata varchar(20), myid number);
         
         create or replace function myfunc(d_p in varchar2, i_p in number) return number as
           begin
        @@ -1201,10 +1551,12 @@ 

        5.1 PL/SQL function

        end; /
        -

        The myfunc PL/SQL stored function will be used by the plsql_func.py file below.

        +

        The myfunc PL/SQL stored function will be used by the + plsql_func.py file below.

        +

        Review the code contained in plsql_func.py:

        -
        +      
         import oracledb
         import db_config
         
        @@ -1216,7 +1568,11 @@ 

        5.1 PL/SQL function

        This uses the callfunc() method to execute the function. - The second parameter is the type of the returned value. It should be one of the types supported by python-oracledb or one of the type constants defined by python-oracledb (such as oracledb.NUMBER). The two PL/SQL function parameters are passed as a tuple, binding them to the function parameter arguments.

        + The second parameter is the type of the returned value. It should be one + of the types supported by python-oracledb or one of the type constants + defined by python-oracledb (such as oracledb.NUMBER). The two + PL/SQL function parameters are passed as a tuple, binding them to the + function parameter arguments.

        From a terminal window, run:

        @@ -1228,18 +1584,23 @@

        5.1 PL/SQL function

      • 5.2 PL/SQL procedures

        -

        The environment setup file has already created a PL/SQL - stored procedure myproc to accept two parameters by internally running the sql script below:

        -
        create or replace procedure myproc(v1_p in number, v2_p out number) as
        +      

        The environment setup file has already + created a PL/SQL stored procedure myproc to accept two + parameters by running the SQL script below:

        + +
        create or replace procedure myproc(v1_p in number, v2_p out number) as
         begin
           v2_p := v1_p * 2;
         end;
         /
        -

        The second parameter contains an OUT return value.The myproc PL/SQL stored procedure will be used by the plsql_proc.py file below.

        -

        Review the code contained in plsql_proc.py:

        +

        The second parameter contains an OUT return value.The + myproc PL/SQL stored procedure will be used by the + plsql_proc.py file below.

        -
        +      

        Review the code contained in plsql_proc.py:

        + +
         import oracledb
         import db_config
         
        @@ -1260,20 +1621,22 @@ 

        5.1 PL/SQL function

        In a terminal window, run:

        -
        python plsql_proc.py
        +
        python plsql_proc.py

        The getvalue() method displays the returned value.

        +
      -

      6. Type Handlers

      +

      6. Type Handlers

      -

      Type handlers enable applications to alter data that is fetched from, or sent to, the database. Documentation links for further reading: Changing Fetched Data Types with Output Type Handlers and Changing Bind Data Types using an Input Type Handler.

      +

      Type handlers enable applications to alter data that is fetched from, or + sent to, the database. Documentation links for further reading: Changing Fetched Data Types with Output Type Handlers and Changing Bind Data Types using an Input Type Handler.

      • @@ -1281,18 +1644,17 @@

        6.1 Basic output type handler

        Output type handlers enable applications to change how data is fetched from the database. For example, numbers can be - returned as strings or decimal objects. LOBs can be returned as - strings or bytes.

        + returned as strings or decimal objects.

        A type handler is enabled by setting the outputtypehandler attribute on either a cursor or - the connection. If set on a cursor, it only affects queries executed + a connection. If set on a cursor, it only affects queries executed by that cursor. If set on a connection, it affects all queries executed on cursors created by that connection.

        Review the code contained in type_output.py:

        -
        +      
         import oracledb
         import db_config
         
        @@ -1313,9 +1675,9 @@ 

        6.1 Basic output type handler

        Add an output type handler to the bottom of the file:

        -
        +      
         def ReturnNumbersAsStrings(cursor, metadata):
        -    if metdata.type_code is oracledb.DB_TYPE_NUMBER:
        +    if metadata.type_code is oracledb.DB_TYPE_NUMBER:
                 return cursor.var(str, 9, cursor.arraysize)
         
         print("Output type handler output...")
        @@ -1357,9 +1719,9 @@ 

        6.1 Basic output type handler

        print("Value:", value, "* 3 =", value * 3)
        -

        Run the file:

        +

        Run the script:

        -
        python type_converter.py
        +
        python type_converter.py

        The output is like:

        @@ -1367,8 +1729,7 @@

        6.1 Basic output type handler

        Edit the file and add a type handler that uses a Python decimal converter:

        -
        -import oracledb
        +      
        import oracledb
         import decimal
         import db_config
         
        @@ -1376,7 +1737,7 @@ 

        6.1 Basic output type handler

        cur = con.cursor() def ReturnNumbersAsDecimal(cursor, metadata): - if metadata.type_code is oracledb.NUMBER: + if metadata.type_code is oracledb.DB_TYPE_NUMBER: return cursor.var(str, 9, cursor.arraysize, outconverter=decimal.Decimal) cur.outputtypehandler = ReturnNumbersAsDecimal @@ -1390,7 +1751,7 @@

        6.1 Basic output type handler

        from decimal.Decimal is returned in the output tuple.

        -

        Run the file again:

        +

        Run the script again:

        python type_converter.py
        @@ -1398,22 +1759,29 @@

        6.1 Basic output type handler

        Value: 0.1 * 3 = 0.3
        -

        The code above demonstrates the use of outconverter, but in this particular case, python-oracledb offers a simple convenience attribute to do the same conversion:

        +

        The code above demonstrates the use of outconverter, but in this + particular case, python-oracledb offers a simple convenience attribute to + do this exact conversion:

        -
        +    
         import oracledb
         
         oracledb.defaults.fetch_decimals = True
        -
      • + -
      • -

        6.3 Input type handlers

        +
      • -

        Input type handlers enable applications to change how data is bound to statements, or to enable new types to be bound directly without having to be converted individually.

        +
      • +

        6.3 Input type handlers

        -

        Review type_input.py, with the addition of a new class and converter (shown in bold):

        +

        Input type handlers enable applications to change how data is bound to + statements, or to enable new types to be bound directly without having to + be converted individually.

        -
        +    

        Review type_input.py, which includes a new class and + converter (shown in bold):

        + +
         import oracledb
         import db_config
         import json
        @@ -1468,7 +1836,7 @@ 

        6.3 Input type handlers

        def input_type_handler(cursor, value, num_elements): if isinstance(value, Building): - return cursor.var(oracledb.STRING, arraysize=num_elements, + return cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=num_elements, inconverter=building_in_converter) @@ -1487,26 +1855,31 @@

        6.3 Input type handlers

        print("Building Details in JSON format:", string_col)
        -

        In the new file, a Python class Building is defined, which holds basic information about a building. +

        In the file, a Python class Building is defined to hold + basic information about a building. The Building class is used + lower in the code to create a Python instance:

        -The Building class is used lower in the code to create a Python instance:

        - -
        +    
         building = Building(1, "The First Building", 5)
        -

        which is then directly bound into the INSERT statement like

        +

        which is then directly bound into the INSERT statement like:

        -
        cur.execute("insert into BuildingTable (ID, BuildingDetails) values (:1, :2)", (building.building_id, building))
        +
        cur.execute("insert into BuildingTable (ID, BuildingDetails) values (:1, :2)", (building.building_id, building))
        -

        The mapping between Python and Oracle objects is handled in -building_in_converter which creates -an Oracle STRING object from the Building Python object in a JSON format. The building_in_converter method is called by the input type handler input_type_handler,whenever an instance of Building is inserted with the cursor.

        +

        The mapping between Python and Oracle objects is handled in + building_in_converter which creates an Oracle character object + from the Building Python object in a JSON format. The + building_in_converter method is called by the input type + handler input_type_handler whenever an instance of + Building is inserted with the cursor.

        -

        To confirm the behavior, run the file:

        +

        To confirm the behavior, run the script:

        -
        python type_input.py
        -

        You should see the following output:

        -
        Querying the row just inserted...
        +    
        python type_input.py
        + +

        You should see the following output:

        + +
        Querying the row just inserted...
         Building ID: 1
         Building Details in JSON format: {"building_id": 1, "description": "The First Building", "num_floors": 5}
        @@ -1514,7 +1887,7 @@

        6.3 Input type handlers

      -

      7. LOBs

      +

      7. LOBs

      Oracle Database "LOB" long objects can be streamed using a LOB locator, or worked with directly as strings or bytes. Documentation link for further reading: 7.1 Fetching a CLOB using a locator LOB Object. Methods on LOB include size() and read().

      -

      To see the output, run the file:

      +

      To see the output, run the script:

      -
      python clob.py
      +
      python clob.py
      -

      Edit the file and experiment reading chunks of data by giving start character position and length, such as clob.read(1,10).

      +

      Edit the file and experiment reading chunks of data by giving start + character position and length, such as clob.read(1,10).

      @@ -1571,9 +1945,10 @@

      7.2 Fetching a CLOB as a string

      For CLOBs small enough to fit in the application memory, it is much faster to fetch them directly as strings.

      -

      Review the code contained in clob_string.py. The differences from clob.py are shown in bold:

      +

      Review the code contained in clob_string.py. The + differences from clob.py are shown in bold:

      -
      +      
       import oracledb
       import db_config
       
      @@ -1599,71 +1974,337 @@ 

      7.2 Fetching a CLOB as a string

      print("CLOB data:", clobdata)
      -

      Setting oracledb.defaults.fetch_lobs to False causes python-oracledb to fetch the CLOB as a - string. Standard Python string functions such as len() can be used on the result.

      +

      Setting oracledb.defaults.fetch_lobs to False causes + python-oracledb to fetch the CLOB as a string. Standard Python string + functions such as len() can be used on the result.

      -

      The output is the same as for clob.py. To - check, run the file:

      +

      The output is the same as for clob.py. To check, run the + script:

      -
      python clob_string.py
      +
      python clob_string.py
    -

    8. Rowfactory functions

    +

    8. JSON

    -

    Rowfactory functions enable queries to return objects other than - tuples. They can be used to provide names for the various columns - or to return custom objects.

    +

    Oracle Database supports a JSON data type and also has powerful + functionality to work with relational data as if it were + JSON. Documentation link for further reading: Using + JSON Data.

      -
    • 8.1 Rowfactory for mapping column names

      +
    • 8.1 Inserting JSON

      -

      Review the code contained in rowfactory.py:

      +

      Review the code contained in json_insert.py:

      -
      -import collections
      -import oracledb
      +      
      import oracledb
       import db_config
       
      -con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      +con = oracledb.connect(
      +    user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +)
       cur = con.cursor()
       
      -cur.execute("select deptno, dname from dept")
      -rows = cur.fetchall()
      +data = dict(name="Rod", dept="Sales", location="Germany")
      +inssql = "insert into jtab (id, json_data) values (:1, :2)"
      +cur.setinputsizes(None, oracledb.DB_TYPE_JSON)
      +cur.execute(inssql, [101, data])
      +
      -print('Array indexes:') -for row in rows: - print(row[0], "->", row[1]) +

      This inserts the Python dictionary into the json_data + column which is of type JSON.

      -print('Loop target variables:') -for c1, c2 in rows: - print(c1, "->", c2) +

      The setinputsizes() call specifies that binding the first + bind placeholder id (the value 101) should use + default numeric binding, and that the value for the second bind + placeholder (the dictionary data) will be treated as JSON + data.

      + +
    • + +
    • 8.2 Fetching JSON

      + +

      To fetch JSON, edit json_insert.py and add this at the + bottom:

      + +
      sql = "select c.json_data from jtab c"
      +for (j,) in cur.execute(sql):
      +    print(j)
       
      -

      This shows two methods of accessing result set items from a data row. The first uses array indexes like row[0]. The second uses loop target variables that take each row tuple's values.

      +

      Run the script:

      -

      Run the file:

      +
      python json_insert.py
      -
      python rowfactory.py
      +

      The inserted data is displayed. You can experiment with inserting + different dictionaries.

      -

      Both access methods gives the same results.

      +

      If you want to extract parts of a JSON document, you can use Oracle + Database "dot notation".

      -

      To use a rowfactory function, edit rowfactory.py and - add this code at the bottom:

      +

      Edit json_insert.py and add this at the + bottom:

      -
      -print('Rowfactory:')
      -cur.execute("select deptno, dname from dept")
      -cur.rowfactory = collections.namedtuple("MyClass", ["DeptNumber", "DeptName"])
      +      
      sql = """select c.json_data.location
      +         from jtab c
      +         offset 0 rows fetch next 1 rows only"""
      +for (j,) in cur.execute(sql):
      +    print(j)
      +
      + +

      Run the script:

      + +
      python json_insert.py
      + +

      Only the location of the inserted data is shown.

      + +

      Refer to the Database JSON Developer's Guide for more information about Oracle's + JSON support.

      + +
    • +
    + +

    9. VECTORs

    + +

    Oracle Database 23ai supports a VECTOR data type for artificial + intelligence and machine learning search operations. Documentation link + for further reading: Using VECTOR Data.

    + +

    This section uses the table vtab created as:

    + +
    +create table vtab (
    +  id number(9) not null primary key,
    +  v64 vector(3, float64));
    +
    + +

    The column v64 represents a dense vector of dimension 3 which + stores 64-bit floating points numbers. Oracle Database also supports 8-bit + signed integers, 8-bit unsigned integers, and 32-bit floating point number + vector formats. Vectors can be dense or sparse.

    + +
      +
    • +

      9.1 Inserting a VECTOR

      + +

      Review the code contained in vector.py:

      + +
      import array
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(
      +    user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +)
      +cur = con.cursor()
      +
      +vector_data_64 = array.array("d", [11.25, 11.75, 11.5])
      +
      +cur.execute(
      +    "insert into vtab (id, v64) values (:1, :2)", [101, vector_data_64],
      +)
      + +

      Python-oracledb uses the Python array.array() type to represent + vectors by default.

      + +
    • + +
    • +

      9.2 Fetching a VECTOR

      + +

      Edit vector.py and add this code at the bottom:

      + +
      cur.execute("select v64 from vtab")
      +
      +for (v,) in cur:
      +    print(v)
      +    print(type(v))
      +
      + +

      Run the script:

      + +
      python vector.py
      + +

      This displays the inserted vector.

      + +

      Each non-sparse vector is represented as an array.array type. + Sparse vectors (not shown) would be represented as + oracledb.SparseVector() instances.

      + +
    • + +
    • +

      9.3 Working with VECTORs and NumPy

      + +

      The NumPy package is one of the popular libraries for data analysis. It + is simple to use with the Oracle Database VECTOR data type.

      + +

      To run the example in this section, the package needs to be + installed:

      + +
      python -m pip install numpy --upgrade
      + +

      Review the code contained in vector_numpy.py:

      + +
      +import array
      +import numpy
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(
      +    user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +)
      +cur = con.cursor()
      +
      +vector_data_64 = numpy.array([11.25, 11.75, 11.5], dtype=numpy.float64)
      +
      +cur.execute(
      +    "insert into vtab (id, v64) values (:1, :2)", [202, vector_data_64],
      +)
      +
      +for (v,) in cur.execute("select v64 from vtab"):
      +    print(v)
      +    print(type(v))
      +
      + +

      As it is, this will not run because the NumPy data needs to be converted + to a Python array for python-oracledb.

      + +

      Edit vector_numpy.py and insert an inconverter and an input + type handler to convert the NumPy data into the correct format for + insertion.

      + +

      To convert queried vectors into NumPy format, also add an outconverter + and an output type handler:

      + +
      import array
      +import numpy
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(
      +    user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +)
      +cur = con.cursor()
      +
      +def numpy_converter_in(value):
      +    if value.dtype == numpy.float64:
      +        dtype = "d"
      +    elif value.dtype == numpy.float32:
      +        dtype = "f"
      +    elif value.dtype == numpy.uint8:
      +        dtype = "B"
      +    else:
      +        dtype = "b"
      +    return array.array(dtype, value)
      +
      +def input_type_handler(cur, value, arraysize):
      +    if isinstance(value, numpy.ndarray):
      +        return cur.var(
      +            oracledb.DB_TYPE_VECTOR,
      +            arraysize=arraysize,
      +            inconverter=numpy_converter_in,
      +        )
      +
      +con.inputtypehandler = input_type_handler
      +
      +def numpy_converter_out(value):
      +    return numpy.array(value, copy=False, dtype=value.typecode)
      +
      +def output_type_handler(cur, metadata):
      +    if metadata.type_code is oracledb.DB_TYPE_VECTOR:
      +        return cur.var(
      +            metadata.type_code,
      +            arraysize=cur.arraysize,
      +            outconverter=numpy_converter_out,
      +        )
      +
      +con.outputtypehandler = output_type_handler
      +
      +vector_data_64 = numpy.array([11.25, 11.75, 11.5], dtype=numpy.float64)
      +
      +cur.execute(
      +    "insert into vtab (id, v64) values (:1, :2)", [202, vector_data_64],
      +)
      +
      +for (v,) in cur.execute("select v64 from vtab"):
      +    print(v)
      +    print(type(v))
      +
      + +

      Run the script

      + +
      python vector_numpy.py
      + +

      Each vector queried is represented as a numpy.ndarray type.

      + +
    • +
    + +

    10. Rowfactory functions

    + +

    Rowfactory functions enable queries to return objects other than + tuples. They can be used to provide names for the various columns + or to return custom objects.

    + +
      +
    • 10.1 Rowfactory for mapping column names

      + +

      Review the code contained in rowfactory.py:

      + +
      +import collections
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      +cur = con.cursor()
      +
      +cur.execute("select deptno, dname from dept")
      +rows = cur.fetchall()
      +
      +print('Array indexes:')
      +for row in rows:
      +    print(row[0], "->", row[1])
      +
      +print('Loop target variables:')
      +for c1, c2 in rows:
      +    print(c1, "->", c2)
      +
      + +

      This shows two methods of accessing result set items from a data row. The + first uses array indexes like row[0]. The second uses loop + target variables that take each row tuple's values.

      + +

      Run the script:

      + +
      python rowfactory.py
      + +

      Both access methods gives the same results.

      + +

      To use a rowfactory function, edit rowfactory.py and + add this code at the bottom:

      + +
      +print('Rowfactory:')
      +cur.execute("select deptno, dname from dept")
      +cur.rowfactory = collections.namedtuple("MyClass", ["DeptNumber", "DeptName"])
       
       rows = cur.fetchall()
       for row in rows:
           print(row.DeptNumber, "->", row.DeptName)
       
      -

      This uses the Python factory function - namedtuple() to create a subclass of tuple that allows access to the elements via indexes or the given field names.

      +

      This uses the Python factory function namedtuple() to + create a subclass of tuple that allows access to the elements via indexes + or the given field names.

      The print() function shows the use of the new named tuple fields. This coding style can help reduce coding @@ -1679,17 +2320,17 @@

      8. Rowfactory functions

    -

    9. Subclassing connections and cursors

    +

    11. Subclassing connections and cursors

    -

    Subclassing enables application to "hook" connection and cursor - creation. This can be used to alter or log connection and execution - parameters, and to extend python-oracledb functionality. Documentation link for -further reading: Subclassing enables application to "hook" connection and cursor creation. + This can be used to alter or log connection and execution parameters, and to + extend python-oracledb functionality. Documentation link for further + reading: Application Tracing.

      -
    • 9.1 Subclassing connections

      +
    • 11.1 Subclassing connections

      Review the code contained in subclass.py:

      @@ -1711,18 +2352,20 @@

      9. Subclassing connections and cursors

      print("Number of rows:", count) -

      This creates a new class "MyConnection" that inherits from the python-oracledb Connection class. The __init__ method is - invoked when an instance of the new class is created. It prints a message and calls the base class, passing the connection credentials.

      +

      This creates a new class "MyConnection" that inherits from the + python-oracledb Connection class. The __init__ method is + invoked when an instance of the new class is created. It prints a message + and calls the base class, passing the connection credentials.

      In the "normal" application, the application code:

      con = MyConnection()

      does not need to supply any credentials, as they are embedded in the - custom subclass. All the python-oracledb methods such as cursor() are - available, as shown by the query.

      + custom subclass. All the python-oracledb methods such as + cursor() are available, as shown by the query.

      -

      Run the file:

      +

      Run the script:

      python subclass.py
      @@ -1730,7 +2373,7 @@

      9. Subclassing connections and cursors

    • -
    • 9.2 Subclassing cursors

      +
    • 11.2 Subclassing cursors

      Edit subclass.py and extend the cursor() method with a new MyCursor class:

      @@ -1769,12 +2412,16 @@

      9. Subclassing connections and cursors

      print("Number of rows:", count) -

      When the application gets a cursor from the -MyConnection class, the new cursor() method returns an instance of our new MyCursor class.

      +

      When the application gets a cursor from the MyConnection class, +the new cursor() method returns an instance of our new +MyCursor class.

      -

      The "application" query code remains unchanged. The new execute() and fetchone() methods of the MyCursor class get invoked. They do some logging and invoke the parent methods to do the actual statement execution.

      +

      The "application" query code remains unchanged. The new +execute() and fetchone() methods of the +MyCursor class get invoked. They do some logging and invoke the +parent methods to do the actual statement execution.

      -

      To confirm this, run the file again:

      +

      To confirm this, run the script again:

      python subclass.py
      @@ -1782,140 +2429,12 @@

      9. Subclassing connections and cursors

    -

    10. Python-oracledb Thick mode

    -

    All the above examples use python-oracledb in thin mode, but there are certain features which are only available in the thick mode of the python-oracledb driver. The upcoming sections show some of these. Note that you can also run all the earlier examples in thick mode by just changing the import line in examples from import db_config to import db_config_thick as db_config.

    - -

    The following sections assume you have installed the tutorial schema as shown at the tutorial start.

    - -
      -
    • -

      10.1 Review the Oracle Client library path

      - -

      You additionally need to make Oracle Client libraries available. Follow the documentation on Installing python-oracledb.

      - -

      When you have installed Oracle Client libraries, review the library path settings in db_config_thick.py file. If python-oracledb cannot locate Oracle Client libraries, then your applications will fail with an error like "DPI-1047: Cannot locate a 64-bit Oracle Client library". For our examples, we are using Oracle Instant Client libraries.

      - -
      -# On Linux, this must be None.
      -# Instead, the Oracle environment must be set before Python starts.
      -instant_client_dir = None
      -
      -# On Windows, if your database is on the same machine, comment these lines out
      -# and let instant_client_dir be None.  Otherwise, set this to your Instant
      -# Client directory.  Note the use of the raw string r"...", which allows backslashes to
      -# be used as directory separators.
      -if platform.system() == "Windows":
      -    instant_client_dir = r"C:\Oracle\instantclient_19_14"
      -
      -# On macOS set the directory to your Instant Client directory
      -if platform.system() == "Darwin":
      -    instant_client_dir = os.environ.get("HOME")+"/Downloads/instantclient_23_3"
      -
      -# You must always call init_oracle_client() to use thick mode
      -oracledb.init_oracle_client(lib_dir=instant_client_dir)
      - -

      Important! Calling the init_oracle_client() function enables the thick mode of python-oracledb. Once python-oracledb is in thick mode, you cannot return to thin mode without removing calls to init_oracle_client() and restarting the application.

      -

      Edit db_config_thick.py and set instant_client_dir to None or to a valid path according to the following notes:

      - -
        -
      • - -

        If you are on macOS (Intel x86) or Windows, and you have installed Oracle Instant Client libraries because your database is on a remote machine, then set instant_client_dir to the path of the Instant Client libraries.

        -
      • - -
      • - -

        If you are on Windows and have a local database installed, then comment out the two Windows lines, so that instant_client_dir remains None.

        - -
      • - -
      • - -

        In all other cases (including Linux with Oracle Instant Client), make sure that instant_client_dir is set to None. In these cases you must make sure that the Oracle libraries from Instant Client or your ORACLE_HOME are in your system library search path before you start Python. On Linux, the path can be configured with ldconfig or with the LD_LIBRARY_PATH environment variable.

        -
      • -
      -
    • -
    • 10.2 Review the configuration files for thick mode

      - -

      Review db_config_thick.py (thick mode), and db_config.sql files in the tutorial directory. These are included in other Python and SQL files for setting up the database connection.

      - -

      Edit db_config_thick.py file and change the default values to match the connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "pythondemo" unless the environment variable "PYTHON_USER" contains a different username. The default connection string is for the 'orclpdb' database service on the same machine as Python. In Python Database API terminology, the connection string parameter is called the "data source name" or "dsn". Using environment variables is convenient because you will not be asked to re-enter the password when you run scripts:

      - -
      -user = os.environ.get("PYTHON_USER", "pythondemo")
      -
      -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb")
      -
      -pw = os.environ.get("PYTHON_PASSWORD")
      -if pw is None:
      -    pw = getpass.getpass("Enter password for %s: " % user)
      -
      - -

      Also, change the default username and connection string in the SQL configuration file db_config.sql:

      - -
      --- Default database username
      -def user = "pythondemo"
      -
      --- Default database connection string
      -def connect_string = "localhost/orclpdb"
      -
      --- Prompt for the password
      -accept pw char prompt 'Enter database password for &user: ' hide
      -
      - - -

      The tutorial instructions may need adjusting, depending on how you have set up your environment.

      -
    • -
    - -

    The following sections are specific to the python-oracledb thick modes in this release of python-oracledb.

    - -

    11. Scrollable cursors

    - -

    Scrollable cursors enable python-oracledb thick mode applications to move backwards as well as forwards in query results. They can be used to skip rows as well as move to a particular row.

    -
      -
    • 11.1 Working with scrollable cursors

      -

      Review the code contained in query_scroll.py:

      - -
      -import oracledb
      -import db_config_thick as db_config
      -
      -con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      -cur = con.cursor(scrollable=True)
      -
      -cur.execute("select * from dept order by deptno")
      -
      -cur.scroll(2, mode="absolute")  # go to second row
      -print(cur.fetchone())
      -
      -cur.scroll(-1)                    # go back one row
      -print(cur.fetchone())
      -
      - -

      Run the script in a terminal window:

      - -
      python query_scroll.py
      - -

      Edit query_scroll.py and experiment with different - scroll options and orders, such as:

      - -
      cur.scroll(1)  # go to next row
      -print(cur.fetchone())
      -
      -cur.scroll(mode="first")  # go to first row
      -print(cur.fetchone())
      - -

      Try some scroll options that go beyond the number of rows in the resultset.

      -
    • -
    - -

    12. Binding named objects

    +

    10 Binding named objects

    -

    Python-oracledb's thick mode can fetch and bind named object types such as Oracle's Spatial Data Objects (SDO).

    +

    Python-oracledb can fetch and bind named object types such as Oracle's + Spatial Data Objects (SDO).

    -

    The SDO definition includes the following attributes:

    +

    The SDO definition includes the following attributes:

      Name                                      Null?    Type
    @@ -1926,13 +2445,16 @@ 

    12. Binding named objects

    SDO_ELEM_INFO MDSYS.SDO_ELEM_INFO_ARRAY SDO_ORDINATES MDSYS.SDO_ORDINATE_ARRAY
    +
      -
    • 12.1 How to bind named objects

      -

      Review the code contained in bind_sdo.py:

      +
    • +

      12.1 How to bind named objects

      + +

      Review the code contained in bind_sdo.py:

       import oracledb
      -import db_config_thick as db_config
      +import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
       cur = con.cursor()
      @@ -1971,9 +2493,14 @@ 

      12. Binding named objects

      cur.execute("select id, geometry from testgeometry"); for row in cur: print(row)
      -

      This uses gettype() to get the database types of the SDO and its object attributes. The newobject() calls create Python representations of those objects. The python object atributes are then set. Oracle VARRAY types such as SDO_ELEM_INFO_ARRAY are set with extend().

      -

      Run the file:

      +

      This uses gettype() to get the database types of the SDO +and its object attributes. The newobject() calls create +Python representations of those objects. The python object atributes are +then set. Oracle VARRAY types such as SDO_ELEM_INFO_ARRAY are set with +extend().

      + +

      Run the script:

      python bind_sdo.py
      @@ -1981,9 +2508,9 @@

      12. Binding named objects

      (1, <oracledb.Object MDSYS.SDO_GEOMETRY at 0x104a76230>)
      -

      To show the attribute values, edit the query code section at -the end of the file. Add a new method that traverses the object. The file below the existing comment "# (Change below here)") -should look like:

      +

      To show the attribute values, edit the query code section at the end of the +file. Add a new method that traverses the object. The file below the existing +comment "# (Change below here)") should look like:

       # (Change below here)
      @@ -2016,7 +2543,7 @@ 

      12. Binding named objects

      print("Id: ", id) dumpobject(obj)
      -

      Run the file again:

      +

      Run the script again:

      python bind_sdo.py
      @@ -2044,28 +2571,39 @@

      12. Binding named objects

      } -

      To explore further, try setting the SDO attribute SDO_POINT, which is of type SDO_POINT_TYPE.

      +

      To explore further, try setting the SDO attribute SDO_POINT, which is of +type SDO_POINT_TYPE.

      + +

      The gettype() and newobject() methods can also be +used to bind PL/SQL Records and Collections.

      -

      The gettype() and newobject() methods can also be used to bind PL/SQL Records and Collections.

      +

      Before deciding to use objects, review your performance goals because +working with scalar values can be faster.

      -

      Before deciding to use objects, review your performance goals because working with scalar values can be faster.

    13. Input and Output Type Handlers with named objects

    -

    Named objects can only be used in python-oracledb's thick mode. Documentation links for further reading: Documentation links for further reading: Changing Fetched Data Types with Output Type Handlers and Changing Bind Data Types using an Input Type Handler.

    +
      -
    • +
    • 13.1 Input type handlers with named objects

      -

      Input type handlers for named objects can enable applications to change how data is bound to the individual attributes of the named objects. Review the code contained in type_input_named_obj.py, which is similar to the final bind_sdo.py from section 12.1, with the + +

      Input type handlers for named objects can enable applications to change + how data is bound to the individual attributes of the named objects. Review + the code contained in type_input_named_obj.py, which is + similar to the final bind_sdo.py from section 12.1, with the addition of a new class and converter (shown in bold):

      +
       import oracledb
      -import db_config_thick as db_config
      +import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
       cur = con.cursor()
      @@ -2141,43 +2679,56 @@ 

      13.1 Input type handlers with named objects

      print("Id: ", id) dumpobject(obj)
      -

      The mapping between Python and Oracle objects is handled in SDOInConverter which uses the python-oracledb newobject() and extend() methods to create an Oracle object from the Python object values. The SDOInConverter method is called by the input type handler -SDOInputTypeHandler whenever an instance of -mySDO is inserted with the cursor.

      -

      To confirm the behavior, run the file:

      +

      The mapping between Python and Oracle objects is handled in +SDOInConverter which uses the python-oracledb +newobject() and extend() methods to create an Oracle +object from the Python object values. The SDOInConverter method is +called by the input type handler SDOInputTypeHandler whenever an +instance of mySDO is inserted with the cursor.

      + +

      To confirm the behavior, run the script:

      python type_input_named_obj.py
      -

      This will show

      + +

      This will show:

      +
      Querying row just inserted...
       Id:  1
          {
      -    SDO_GTYPE : 2003.0
      +    SDO_GTYPE : 2003
           SDO_SRID : None
           SDO_POINT : None
           SDO_ELEM_INFO :
              [
      -         1.0
      -         1003.0
      -         3.0
      +         1
      +         1003
      +         3
              ]
           SDO_ORDINATES :
              [
      -         1.0
      -         1.0
      -         5.0
      -         7.0
      +         1
      +         1
      +         5
      +         7
              ]
          }
      +
    • 13.2 Output type handlers with named objects

      -

      Output type handlers enable applications to extract the data from database named objects into a user-defined Python object (defined by the mySDO class here). Review the code contained in type_output_named_obj.py with the output converter function shown in bold:

      + +

      Output type handlers enable applications to extract the data from database + named objects into a user-defined Python object (defined by the + mySDO class here). Review the code contained in + type_output_named_obj.py with the output converter function + shown in bold:

      +
       import oracledb
      -import db_config_thick as db_config
      +import db_config
       
       con = oracledb.connect(user=db_config.user,
                              password=db_config.pw, dsn=db_config.dsn)
      @@ -2246,37 +2797,51 @@ 

      13.2 Output type handlers with named objects

      print("SDO GYTPE:", obj.gtype) print("SDO ELEMINFO:", obj.elemInfo) print("SDO_ORDINATES:", obj.ordinates)
      -

      Note that the Input Type Handler and the InConverter functions are the same as the previous example.

      -

      The mapping between the Python and Oracle objects is handled in SDOOutConverter. The SDOOutConverter method is called by the output type handler - SDOOutputTypeHandler whenever data of the named object (MDSYS.SDOGEOMETRY in this case) is selected with the cursor and needs to be converted to a user-defined Python object (mySDO object in this case).

      -

      To confirm the behavior, run the file:

      +

      Note that the Input Type Handler and the InConverter functions are the same +as the previous example.

      + +

      The mapping between the Python and Oracle objects is handled in +SDOOutConverter. The SDOOutConverter method is called +by the output type handler SDOOutputTypeHandler whenever data of +the named object (MDSYS.SDOGEOMETRY in this case) is selected with +the cursor and needs to be converted to a user-defined Python object +(mySDO object in this case).

      + +

      To confirm the behavior, run the script:

      python type_output_named_obj.py
      -

      This will show

      + +

      This will show:

      +
      Querying the Spatial Data Object(SDO) Table using the Output Type Handler...
       ----------------------------------------------------------------------------
       SDO ID: 1
       SDO GYTPE: 2003
      -SDO ELEMINFO: [1.0, 1003.0, 3.0]
      -SDO_ORDINATES: [1.0, 1.0, 5.0, 7.0]
    • +SDO ELEMINFO: [1, 1003, 3] +SDO_ORDINATES: [1, 1, 5, 7] + + +

    14. Advanced Queuing

    -

    Oracle Advanced Queuing (AQ) APIs usable in python-oracledb thick mode allow messages to be passed between applications. Documentation link for further reading: Oracle Advanced Queuing (AQ).

    +

    Oracle Advanced Queuing (AQ) APIs allow messages to be passed between +applications. Documentation link for further reading: Using Oracle Transactional Event Queues and Advanced Queuing.

    • 14.1 Message passing with Oracle Advanced Queuing

      -

      Review aq.py:

      +

      Review the code contained in aq.py:

       import oracledb
       import decimal
      -import db_config_thick as db_config
      +import db_config
       
       con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
       cur = con.cursor()
      @@ -2348,27 +2913,27 @@ 

      14.1 Message passing with Oracle Advanced Queuing

      print("\nDone.")
      -

      This file sets up Advanced Queuing using Oracle's DBMS_AQADM -package. The queue is used for passing Oracle UDT_BOOK objects. The file uses AQ interface features enhanced in python-oracledb v1.0.

      +

      This file sets up Advanced Queuing using Oracle's DBMS_AQADM package. The +queue is used for passing Oracle UDT_BOOK objects.

      -

      Run the file:

      +

      Run the script:

      python aq.py

      The output shows messages being queued and dequeued.

      -

      To experiment, split the code into three files: one to create and -start the queue and two other files to queue and dequeue messages. -Experiment with running the queue and dequeue files concurrently in -separate terminal windows.

      +

      To experiment, split the code into three files: one to create and start the +queue and two other files to queue and dequeue messages. Experiment with +running the queue and dequeue files concurrently in separate terminal +windows.

      -

      Try removing the commit() call in -aq-dequeue.py. Now run aq-enqueue.py once -and then aq-dequeue.py several times. The same messages -will be available each time you try to dequeue them.

      +

      Try removing the commit() call in aq-dequeue.py. +Now run aq-enqueue.py once and then aq-dequeue.py +several times. The same messages will be available each time you try to +dequeue them.

      -

      Change aq-dequeue.py to commit in a separate -transaction by changing the "visibility" setting:

      +

      Change aq-dequeue.py to commit in a separate transaction by +changing the "visibility" setting:

       queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE
      @@ -2383,27 +2948,475 @@ 

      14.1 Message passing with Oracle Advanced Queuing

      queue.enqone(con.msgproperties(payload=book, expiration=4))
      -

      Now run aq-enqueue.py and wait four seconds before you -run aq-dequeue.py. There should be no messages to -dequeue.

      +

      Now run aq-enqueue.py and wait four seconds before you run +aq-dequeue.py. There should be no messages to dequeue.

      + +

      If you are stuck, look in the solutions directory at the +aq-dequeue.py, aq-enqueue.py and +aq-queuestart.py files.

      + +
    • +
    + +

    15. Scrollable cursors

    + +

    Scrollable cursors enable python-oracledb applications to move backwards + as well as forwards in query results. They can be used to skip rows as well + as move to a particular row. Documentation link for further reading: Scrollable Cursors.

    + +
      +
    • 15.1 Working with scrollable cursors

      + +

      Review the code contained in query_scroll.py:

      + +
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      +cur = con.cursor(scrollable=True)
      +
      +cur.execute("select * from dept order by deptno")
      +
      +cur.scroll(2, mode="absolute")  # go to second row
      +print(cur.fetchone())
      +
      +cur.scroll(-1)                    # go back one row
      +print(cur.fetchone())
      +
      + +

      Run the script in a terminal window:

      + +
      python query_scroll.py
      + +

      Edit query_scroll.py and experiment with different + scroll options and orders, such as:

      + +
      cur.scroll(1)  # go to next row
      +print(cur.fetchone())
      +
      +cur.scroll(mode="first")  # go to first row
      +print(cur.fetchone())
      + +

      Try some scroll options that go beyond the number of rows in the + resultset.

      + +
    • +
    + +

    16. Dataframes

    + +

    Python-oracledb can query data directly into a dataframe format that exposes +an Apache Arrow PyCapsule interface. This is an efficient way to use Python +libraries such as Apache PyArrow, Pandas, Polars, NumPy, PyTorch, or to write +files in Apache Parquet format. Documentation link for further reading: Fetching Data Frames.

    + +
      +
    • 16.1 Working with Dataframes

      + +

      This section shows how to efficiently fetch data for use with Pandas. It + uses the pyarrow and pandas packages, which need to be + installed:

      + +
      python -m pip install pyarrow pandas --upgrade
      + +

      Review the code contained in query_pandas.py:

      + +
      +import pandas
      +import pyarrow
      +import oracledb
      +import db_config
      +
      +con = oracledb.connect(
      +    user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +)
      +
      +odf = con.fetch_df_all(
      +    statement="select sal from emp order by empno",
      +    arraysize=100)
      +
      + +

      This uses fetch_df_all() to directly fetch data into an + OracleDataFrame that internally exposes a PyCapsule interface. For large + result sets you can tune the arraysize parameter, or use an iterator from + the fetch_df_batches() method.

      + +

      To use the new dataframe in Pandas, edit query_pandas.py + and add this code at the bottom:

      + +
      +df = pyarrow.Table.from_arrays(
      +    odf.column_arrays(), names=odf.column_names()
      +).to_pandas()
      +
      +print("\nSum:")
      +print(df.sum())
      +
      +print("\nMedian:")
      +print(df.median())
      +
      + +

      This uses PyArrow functionality to convert the OracleDataFrame to a + Pandas dataframe.

      + +

      Run the script in a terminal window:

      + +
      python query_pandas.py
      + +

      The output is the expected calculations on the employee salary data.

      + +
    • +
    + +

    17. Concurrent Programming with asyncio

    + +

    The Asynchronous I/O (asyncio) Python library can be used in python-oracledb +Thin mode for concurrent programming. This library allows you to run operations +in parallel, for example to run a long-running operation in the background +without blocking the rest of the application.

    + +

    All python-oracledb synchronous methods that require a round-trip to the +database have corresponding asynchronous counterparts. The methods +oracledb.connect_async() and +oracledb.create_pool_async() are used to create connections and +pools, respectively. Documentation link for further reading: Concurrent +Programming with asyncio.

    + +
      +
    • 17.1 Using asyncio

      + +

      Review the code contained in async_gather.py:

      + +
      import asyncio
      +import oracledb
      +import db_config
      +
      +CONCURRENCY = 5   # Number of coroutines to run
      +POOLSIZE = 5      # Maximum connection pool size
      +
      +SQL = """select unique current_timestamp as ct, sid||'-'||serial# as sidser
      +         from v$session_connect_info
      +         where sid = sys_context('userenv', 'sid')"""
      +
      +async def init_session(connection, requested_tag):
      +    res = await connection.fetchone(SQL)
      +    print(res[0].strftime("%H:%M:%S.%f"), "- init_session SID-SERIAL#", res[1])
      +
      +async def query(pool):
      +    async with pool.acquire() as connection:
      +        await connection.callproc("dbms_session.sleep", [1])
      +        res = await connection.fetchone(SQL)
      +        print(res[0].strftime("%H:%M:%S.%f"), "- query SID-SERIAL#", res[1])
      +
      +async def main():
      +
      +    pool = oracledb.create_pool_async(
      +        user=db_config.user,
      +        password=db_config.pw,
      +        dsn=db_config.dsn,
      +        min=1,
      +        max=POOLSIZE,
      +        session_callback=init_session,
      +    )
      +
      +    coroutines = [query(pool) for i in range(CONCURRENCY)]
       
      -

      If you are stuck, please look in the solutions directory at the aq-dequeue.py, aq-enqueue.py and aq-queuestart.py files.

      + await asyncio.gather(*coroutines) + + await pool.close() + +asyncio.run(main()) +
      + +

      The application creates a connection pool using + create_pool_async() and starts multiple coroutines that get a + pooled connection and execute a query. Note that + create_pool_async() is a synchronous call and is not + awaited.

      + +

      The pool's init_session() method is invoked each time a + connection is created by the pool. It displays the unique session + identifier/serial number combination of the created connection. Depending + on the values of CONNECTION and POOLSIZE, and how fast your machine is, you + may not see the specified maximum number of connections created.

      + +

      The main coroutine query() sleeps for a short time and then + displays the unique session identifier/serial number of the connection used + by that coroutine.

      + +

      When all the awaitables executed by gather() have + completed, the pool is closed.

      + +

      Run the script:

      + +
      python async_gather.py
      + +

      The session identifier/serial numbers of connections created and used + are displayed, along with timestamps.

      + +

      Experiment with different values of CONCURRENCY and POOLSIZE to see how + the maximum poolsize affects the actual number of coroutines that are able + to be doing work concurrently.

    -

    15. Simple Oracle Document Access (SODA)

    +

    18. Pipelining multiple operations

    + +

    Pipelining allows python-oracledb Thin mode applications to send multiple, +independent statements to Oracle Database with one call. The database is kept +busy processing them without waiting for the application to receive a result +set and send the next statement. While the database processes the pipeline of +statements, the application can continue with non-database work. When the +database has executed all the pipelined operations, their results are returned +to the application. Effective use of Oracle Database Pipelining can increase +the responsiveness of an application and improve overall system throughput. +

    + +

    Pipelining requires the use of asyncio. True pipelining only occurs when +you are connected to Oracle Database 23ai. Documentation link for further +reading: Pipelining +Database Operations.

    + +
      +
    • 18.1 Using Pipelining

      + +

      Review the code contained in pipelining.py:

      + +
      import asyncio
      +import oracledb
      +import db_config
      +
      +async def get_weather():
      +    return "Hot and sunny"
      +
      +async def get_location():
      +    return "Melbourne"
      +
      +async def main():
      +    con = await oracledb.connect_async(
      +        user=db_config.user, password=db_config.pw, dsn=db_config.dsn
      +    )
      +
      +    pipeline = oracledb.create_pipeline()
      +    pipeline.add_fetchone(
      +        "select ename, job from emp where empno = :en", [7839]
      +    )
      +    pipeline.add_fetchall("select dname from dept order by deptno")
      +
      +    return_values = await asyncio.gather(
      +        get_weather(), get_location(), con.run_pipeline(pipeline)
      +    )
      +
      +    for r in return_values:
      +        if isinstance(r, list):  # the pipeline return list
      +            for result in r:
      +                if result.rows:
      +                    for row in result.rows:
      +                        print(*row, sep="\t")
      +        else:
      +            print(r)  # a local operation result
      +
      +    await con.close()
      +
      +asyncio.run(main())
      +
      + +

      Connection is established using oracledb.connect_async(). + Asynchronous methods are awated.

      + +

      The script creates a pipeline with create_pipeline() and + adds two database operations. The use of asyncio.gather() + initiates parallel calls to the two local methods + get_weather() and get_location(), and it also + executes run_pipeline() which send the two database queries to + Oracle Database. Note although the database receives all the operations at + the same time, it will execute each operation sequentially. The local + Python work executes during the time the database is processing the + queries.

      -

      Simple Oracle Document Access (SODA) is a set of NoSQL-style APIs. - Documents can be inserted, queried, and retrieved from Oracle Database. By default, documents are JSON strings. SODA APIs exist in many languages. It is usable in python-oracledb's thick mode. Documentation link for further reading: Simple Oracle Document Access (SODA).

      +

      When all the awaitables executed by gather() have + completed, the results are displayed.

      + +

      Run the script:

      + +
      python pipeline.py
      + +

      The weather, location, employee and department information is + displayed.

      + +
    • +
    + +

    19. Python-oracledb Thick mode

    + +

    All the above examples were run in python-oracledb's default Thin mode that +connects directly to Oracle Database. Most could also have been run in Thick +mode too, by changing each import db_config line to import +db_config_thick as db_config. Python-oracledb Thick mode uses Oracle +Client libraries (such as from Oracle Instant Client) to handle network +connectivity to Oracle Database. There are some additional features these +libraries provide which are therefore only available in python-oracledb Thick +mode. The next example shows one of these. Other Oracle Database features that +require python-oracledb Thick mode include Application Continurity, and +Continuous Query Notification. Documentation link for further reading: Enabling python-oracledb Thick mode.

    + +

    The following sections assume you have installed the tutorial schema as +shown at the tutorial start.

    + +
      +
    • +

      19.1 Review the Oracle Client library path

      + +

      You additionally need to make Oracle Client libraries available. Follow the +documentation on Installing python-oracledb.

      + +

      When you have installed Oracle Client libraries, review the library path +settings in db_config_thick.py file. If python-oracledb cannot +locate Oracle Client libraries, then your applications will fail with an error +like "DPI-1047: Cannot locate a 64-bit Oracle Client library". For our +examples, we are using Oracle Instant Client libraries.

      + +
      +# On Linux, this must be None.
      +# Instead, the Oracle environment must be set before Python starts.
      +instant_client_dir = None
      +
      +# On Windows, if your database is on the same machine, comment these lines out
      +# and let instant_client_dir be None.  Otherwise, set this to your Instant
      +# Client directory.  Note the use of the raw string r"...", which allows backslashes to
      +# be used as directory separators.
      +if platform.system() == "Windows":
      +    instant_client_dir = r"C:\Oracle\instantclient_23_7"
      +
      +# On macOS set the directory to your Instant Client directory
      +if platform.system() == "Darwin":
      +    instant_client_dir = os.environ.get("HOME")+"/Downloads/instantclient_23_3"
      +
      +# You must always call init_oracle_client() to use thick mode
      +oracledb.init_oracle_client(lib_dir=instant_client_dir)
      + +

      Important! Calling the init_oracle_client() + function enables the Thick mode of python-oracledb. Once python-oracledb is in + Thick mode, you cannot return to Thin mode without removing calls to + init_oracle_client() and restarting the application.

      + +

      Edit db_config_thick.py and set + instant_client_dir to None or to a valid path + according to the following notes:

        +
      • + +

        If you are on macOS or Windows, and you have installed Oracle Instant + Client libraries because your database is on a remote machine, then set + instant_client_dir to the path of the Instant Client + libraries.

        + +
      • + +
      • + +

        If you are on Windows and have a local database installed, then + comment out the two Windows lines, so that + instant_client_dir remains None.

        + +
      • -

        15.1 Inserting JSON Documents

        -

        Review soda.py :

        +

        In all other cases (including Linux with Oracle Instant Client), make + sure that instant_client_dir is set to None. + In these cases you must make sure that the Oracle libraries from Instant + Client or your ORACLE_HOME are in your system library search path before + you start Python. On Linux, the path can be configured with + ldconfig or with the LD_LIBRARY_PATH environment + variable.

        + +
      • +
      +
    • + +
    • +

      19.2 Review the configuration files for thick mode

      + +

      Review db_config_thick.py (thick mode), and + sql/db_config.sql files in the tutorial directory. + These are included in other Python and SQL files for setting up the database + connection.

      + +

      Edit db_config_thick.py file and change the default values to + match the connection information for your environment. Alternatively, you + can set the given environment variables in your terminal window. For example, + the default username is "pythondemo" unless the environment variable + "PYTHON_USER" contains a different username. The default connection + string is for the 'freepdb1' database service on the same machine as + Python. In Python Database API terminology, the connection string parameter + is called the "data source name" or "dsn". Using environment variables is + convenient because you will not be asked to re-enter the password when you + run scripts:

      + +
      +user = os.environ.get("PYTHON_USER", "pythondemo")
      +
      +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1")
      +
      +pw = os.environ.get("PYTHON_PASSWORD")
      +if pw is None:
      +    pw = getpass.getpass("Enter password for %s: " % user)
      +
      + +

      Also, change the default username and connection string in the SQL +configuration file sql/db_config.sql:

      + +
      +-- Default database username
      +def user = "pythondemo"
      +
      +-- Default database connection string
      +def connect_string = "localhost/freepdb1"
      +
      +-- Prompt for the password
      +accept pw char prompt 'Enter database password for &user: ' hide
      +
      + + +

      The tutorial instructions may need adjusting, depending on how you have + set up your environment.

      + +
    • +
    + +

    The following section is specific to the python-oracledb Thick mode in this +release of python-oracledb.

    + +

    20. Simple Oracle Document Access (SODA)

    + +

    Simple Oracle Document Access (SODA) is a set of NoSQL-style APIs. + Documents can be inserted, queried, and retrieved from Oracle Database. By + default, documents are JSON strings. SODA APIs exist in many languages. It + is usable in python-oracledb's thick mode. Documentation link for further + reading: Simple Oracle Document Access (SODA).

    + +
      + +
    • +

      20.1 Inserting JSON Documents

      + +

      Review soda.py :

       import oracledb
      @@ -2446,27 +3459,36 @@ 

      15.1 Inserting JSON Documents

      print('Retrieved SODA document dictionary is:') print(content)
      -

      soda.createCollection() will create a new collection, or open an existing collection, if the name is already in use. (Due to a change in the default "sqlType" storage for Oracle Database 21c, the metadata is explicitly stated to use a BLOB column. This lets the example run with different client and database versions).

      +

      soda.createCollection() will create a new collection, or + open an existing collection, if the name is already in use. (Due to a + change in the default "sqlType" storage for Oracle Database 21c, + the metadata is explicitly stated to use a BLOB column. This lets the + example run with different client and database versions).

      -

      insertOneAndGet() inserts the content of a document into the database and returns a SODA Document Object. -This allows access to metadata such as the document key. By default, document keys are automatically generated.

      +

      insertOneAndGet() inserts the content of a document into + the database and returns a SODA Document Object. This allows access to + metadata such as the document key. By default, document keys are + automatically generated.

      -

      The find() method is used to begin an operation that will act upon documents in the collection.

      +

      The find() method is used to begin an operation that will + act upon documents in the collection.

      -

      content is a dictionary. You can also get a JSON string by calling doc.getContentAsString().

      +

      content is a dictionary. You can also get a JSON string by + calling doc.getContentAsString().

      -

      Run the file:

      +

      Run the script:

      python soda.py
      -

      The output shows the content of the new document.

      +

      The output shows the content of the new document.

    • -

      15.2 Searching SODA Documents

      +

      20.2 Searching SODA Documents

      -

      Extend soda.py to insert some more documents and perform a find filter operation:

      +

      Extend soda.py to insert some more documents and perform + a find filter operation:

       my_docs = [
      @@ -2488,12 +3510,13 @@ 

      15.2 Searching SODA Documents

      python soda.py
      -

      The find operation filters the collection and returns documents where the city is Melbourne. Note the - insertMany() method is currently in preview.

      +

      The find operation filters the collection and returns documents where + the city is Melbourne. Note the insertMany() method is + currently in preview.

      SODA supports query by example (QBE) with an extensive set of - operators. Extend soda.py with a QBE to find - documents where the age is less than 25:

      + operators. Extend soda.py with a QBE to find documents + where the age is less than 25:

       filter_spec = {'age': {'$lt': 25}}
      @@ -2504,24 +3527,28 @@ 

      15.2 Searching SODA Documents

      print(doc.getContent()["name"])
      -

      Running the script displays the names.

      +

      Running the script displays the names.

    Summary

    +

    In this tutorial, you have learned how to:

    +
      -
    • Install the python-oracledb driver and use thin and thick modes
    • -
    • Create and work with connections
    • -
    • Use python-oracledb's connection pooling and Database Resident Connection Pooling
    • +
    • Install the python-oracledb driver and use Thin and Thick modes
    • +
    • Create and work with connections, connection pooling and Database Resident Connection Pooling
    • Execute queries and fetch data
    • Use bind variables
    • Use PL/SQL stored functions and procedures
    • Extend python-oracledb classes
    • -
    • Use scrollable cursors
    • +
    • Work with LOB, JSON, and VECTOR data types
    • Work with named objects
    • Use Oracle Advanced Queuing
    • +
    • Work with dataframes
    • +
    • Use asynchronous programming and Pipelining
    • +
    • Use scrollable cursors
    • Use the SODA document store API
    @@ -2530,7 +3557,9 @@

    Summary

    Appendix: Python Primer

    -

    Python is a dynamically typed scripting language. It is most often used to run command-line scripts but is also used for web applications and web services.

    +

    Python is a dynamically typed scripting language. It is most often used to + run command-line scripts but is also used for web applications and web + services.

    Running Python

    @@ -2538,23 +3567,42 @@

    Running Python

      -
    • Create a file of Python commands, such as - myfile.py. This can be run with:

      -
      python myfile.py
    • +
    • -
    • Alternatively run the Python interpreter by executing the python command in a terminal, and then interactively enter commands. Use Ctrl-D to exit back to the operating system prompt.

    • +

      Create a file of Python commands, such as myfile.py. + This can be run with:

      -
    +
    python myfile.py
    + +
  • + +
  • + +

    Alternatively run the Python interpreter by executing the + python command in a terminal, and then interactively enter + commands. Use Ctrl-D to exit back to the operating + system prompt.

    + +
  • -

    On some machines, you may need to run the python3 command instead of python.

    -

    When you run scripts, Python automatically creates bytecode versions of them in a folder called __pycache__. - These improve the performance of scripts that are run multiple times. They are automatically recreated if the source file changes.

    +
+ +

On some machines, you may need to run the python3 command + instead of python.

-

Indentation

+

When you run scripts, Python automatically creates bytecode versions of + them in a folder called __pycache__. These improve the + performance of scripts that are run multiple times. They are automatically + recreated if the source file changes.

-

Whitespace indentation is significant in Python. When copying examples, use the same column alignment as shown. The samples in this tutorial use spaces, not tabs.

+

Indentation

-

The following indentation prints 'done' once after the loop has completed:

+

Whitespace indentation is significant in Python. When copying examples, + use the same column alignment as shown. The samples in this tutorial use + spaces, not tabs.

+ +

The following indentation prints 'done' once after the loop has + completed:

 for i in range(5):
@@ -2562,7 +3610,7 @@ 

Indentation

print('done')
-

But this indentation prints 'done' in each iteration:

+

But this indentation prints 'done' in each iteration:

 for i in range(5):
@@ -2572,8 +3620,7 @@ 

Indentation

Strings

-

Python strings can be enclosed in - single or double quotes:

+

Python strings can be enclosed in single or double quotes:

'A string constant'
 "another constant"
@@ -2583,44 +3630,56 @@

Strings

FROM EMP """
-

Variables

+

Variables

+ +

Variables do not need types declared:

-

Variables do not need types declared:

count = 1
 ename = 'Arnie'
-

Comments

+

Comments

+ +

Comments can be single line:

-

Comments can be single line:

# a short comment
-

Or they can be multi-line using the triple-quote token to create a string that does nothing:

+ +

Or they can be multi-line using the triple-quote token to create a string + that does nothing:

+
"""
 a longer
 comment
 """
 
-

Printing

+

Printing

+ +

Strings and variables can be displayed with a print() + function:

-

Strings and variables can be displayed with a print() function:

print('Hello, World!')
 print('Value:', count)
-

Data Structures

+

Data Structures

-

Associative arrays are called 'dictionaries':

-
a2 = {'PI':3.1415, 'E':2.7182}
-

Ordered arrays are called 'lists':

-
a3 = [101, 4, 67]
-

Lists can be accessed via indexes.

-
+  

Associative arrays are called 'dictionaries':

+ +
a2 = {'PI':3.1415, 'E':2.7182}
+ +

Ordered arrays are called 'lists':

+ +
a3 = [101, 4, 67]
+ +

Lists can be accessed via indexes.

+ +
 print(a3[0])
 print(a3[-1])
 print(a3[1:3])
 
-

Tuples are like lists but cannot be changed once they are - created. They are created with parentheses:

+

Tuples are like lists but cannot be changed once they are created. They + are created with parentheses:

a4 = (3, 7, 10)
@@ -2628,11 +3687,11 @@

Data Structures

v1, v2, v3 = a4
-

Now the variable v1 contains 3, the variable v2 contains 7 and the variable v3 contains 10.

+

Now the variable v1 contains 3, the variable v2 contains 7 and the + variable v3 contains 10.

-

The value in a single entry tuple like "(13,)"can be - assigned to a variable by putting a comma after the variable name - like:

+

The value in a single entry tuple like "(13,)"can be assigned + to a variable by putting a comma after the variable name like:

v1, = (13,)
@@ -2642,16 +3701,19 @@

Data Structures

then v1 will contain the whole tuple "(13,)"

-

Objects

+

Objects

+ +

Everything in Python is an object. As an example, given the of the list +a3 above, the append() method can be used to add a +value to the list.

-

Everything in Python is an object. As an example, given the of the list a3 above, the append() method can be used to add a value to the list.

+
a3.append(23)
-
a3.append(23)
-

Now a3 contains [101, 4, 67, 23]

+

Now a3 contains [101, 4, 67, 23]

Flow Control

-

Code flow can be controlled with tests and loops. The +

Code flow can be controlled with tests and loops. The if/elif/else statements look like:

@@ -2663,31 +3725,33 @@ 

Flow Control

print('Unknown number')
-

This also shows how the clauses are delimited with colons, and each sub-block of code is indented.

+

This also shows how the clauses are delimited with colons, and each +sub-block of code is indented.

Loops

-

A traditional loop is:

-
for i in range(10):
+

A traditional loop is:

+ +
for i in range(10):
     print(i)
-

This prints the numbers from 0 to 9. The value of i - is incremented in each iteration.

+

This prints the numbers from 0 to 9. The value of i is +incremented in each iteration.

-

The 'for' command can also be used to iterate over lists and tuples:

+

The 'for' command can also be used to iterate over lists and +tuples:

-
+
 a5 = ['Aa', 'Bb', 'Cc']
 for v in a5:
     print(v)
 
-

This sets v to each element of the list -a5 in turn.

+

This sets v to each element of the list a5 in turn.

Functions

-

A function may be defined as:

+

A function may be defined as:

 def myfunc(p1, p2):
@@ -2695,56 +3759,87 @@ 

Functions

print(p1, p2) return p1 + p2
-

Functions may or may not return values. This function could be called using:

+

Functions may or may not return values. This function could be called +using:

v3 = myfunc(1, 3)

Function calls must appear after their function definition.

Functions are also objects and have attributes. The inbuilt -__doc__ attribute can be used to find the function description:

+__doc__ attribute can be used to find the function +description:

print(myfunc.__doc__)

Modules

-

Sub-files can be included in Python scripts with an import statement.

-
import os
+

Sub-files can be included in Python scripts with an import statement.

+ +
import os
 import sys
-

Many predefined modules exist, such as the os and the sys modules.

+

Many predefined modules exist, such as the os and the sys modules.

+ +

Other modules, such as python-oracledb, need to be installed separately. +These can be installed from PyPI, for example from pypi.org/project/oracledb/ (as +shown in the tutorial setup instructions). In some +enviroments you may prefer to install using a package repository such as yum.oracle.com for +Oracle Linux.

+ +

After installing python-oracledb, your Python scripts can import the driver +using:

+ +
import oracledb

Resources


License

-

Copyright © 2017, 2025, Oracle and/or its affiliates.

+ +

Copyright © 2017, 2025, Oracle and/or its affiliates.

This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.

+

If you elect to accept the software under the Apache License, Version 2.0, -the following applies:

-

Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at

-

- https://www.apache.org/licenses/LICENSE-2.0 -

+the following applies:

+ +

Licensed under the Apache License, Version 2.0 (the "License"); you may not +use this file except in compliance with the License. You may obtain a copy of +the License at https://www.apache.org/licenses/LICENSE-2.0

+

Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License.

+distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations under +the License.

diff --git a/samples/tutorial/aq.py b/samples/tutorial/aq.py index 3d291ce8..fb5814b4 100644 --- a/samples/tutorial/aq.py +++ b/samples/tutorial/aq.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,7 +28,7 @@ import oracledb import decimal -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/async_gather.py b/samples/tutorial/async_gather.py new file mode 100644 index 00000000..c60180a4 --- /dev/null +++ b/samples/tutorial/async_gather.py @@ -0,0 +1,79 @@ +# ----------------------------------------------------------------------------- +# async_gather.py (Section 17.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import asyncio +import oracledb +import db_config + +# Number of coroutines to run +CONCURRENCY = 5 + +# Maximum connection pool size +POOLSIZE = 5 + +# Query the unique session identifier/serial number combination of a connection +SQL = """select unique current_timestamp as ct, sid||'-'||serial# as sidser + from v$session_connect_info + where sid = sys_context('userenv', 'sid')""" + + +# Show the unique session identifier/serial number of each connection that the +# pool opens +async def init_session(connection, requested_tag): + res = await connection.fetchone(SQL) + print(res[0].strftime("%H:%M:%S.%f"), "- init_session SID-SERIAL#", res[1]) + + +# The coroutine simply shows the session identifier/serial number of the +# connection returned by the pool.acquire() call +async def query(pool): + async with pool.acquire() as connection: + await connection.callproc("dbms_session.sleep", [1]) + res = await connection.fetchone(SQL) + print(res[0].strftime("%H:%M:%S.%f"), "- query SID-SERIAL#", res[1]) + + +async def main(): + + pool = oracledb.create_pool_async( + user=db_config.user, + password=db_config.pw, + dsn=db_config.dsn, + min=1, + max=POOLSIZE, + session_callback=init_session, + ) + + coroutines = [query(pool) for i in range(CONCURRENCY)] + + await asyncio.gather(*coroutines) + + await pool.close() + + +asyncio.run(main()) diff --git a/samples/tutorial/bind_sdo.py b/samples/tutorial/bind_sdo.py index b791b980..5c088e70 100644 --- a/samples/tutorial/bind_sdo.py +++ b/samples/tutorial/bind_sdo.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/connect_params2.py b/samples/tutorial/connect_params2.py index 198b5b79..1a366561 100644 --- a/samples/tutorial/connect_params2.py +++ b/samples/tutorial/connect_params2.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -30,7 +30,7 @@ import db_config params = oracledb.ConnectParams( - host="localhost", port=1521, service_name="orclpdb" + host="localhost", port=1521, service_name="freepdb1" ) con = oracledb.connect( user=db_config.user, password=db_config.pw, params=params diff --git a/samples/tutorial/create_user.py b/samples/tutorial/create_user.py index 6f6b7d3b..32918c45 100644 --- a/samples/tutorial/create_user.py +++ b/samples/tutorial/create_user.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,8 +34,8 @@ # default values PYTHON_USER = "pythondemo" -PYTHON_CONNECT_STRING = "localhost/orclpdb" -PYTHON_DRCP_CONNECT_STRING = "localhost/orclpdb:pooled" +PYTHON_CONNECT_STRING = "localhost/freepdb1" +PYTHON_DRCP_CONNECT_STRING = "localhost/freepdb1:pooled" # dictionary containing all parameters; these are acquired as needed by the # methods below (which should be used instead of consulting this dictionary diff --git a/samples/tutorial/db_config.py b/samples/tutorial/db_config.py index 664e30ff..c807a5d8 100644 --- a/samples/tutorial/db_config.py +++ b/samples/tutorial/db_config.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ user = os.environ.get("PYTHON_USER", "pythondemo") -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb") +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1") pw = os.environ.get("PYTHON_PASSWORD") if pw is None: diff --git a/samples/tutorial/db_config_sys.py b/samples/tutorial/db_config_sys.py index 2ffc93aa..255965b9 100644 --- a/samples/tutorial/db_config_sys.py +++ b/samples/tutorial/db_config_sys.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ sysuser = os.environ.get("PYTHON_SYSUSER", "SYSTEM") -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb") +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1") syspw = os.environ.get("PYTHON_SYSPASSWORD") if syspw is None: diff --git a/samples/tutorial/db_config_thick.py b/samples/tutorial/db_config_thick.py index 2c9cc9b2..a7bbdc28 100644 --- a/samples/tutorial/db_config_thick.py +++ b/samples/tutorial/db_config_thick.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -42,7 +42,7 @@ # Client directory. Note the use of the raw string r"..." so backslashes can # be used as directory separators. if platform.system() == "Windows": - instant_client_dir = r"C:\Oracle\instantclient_19_14" + instant_client_dir = r"C:\Oracle\instantclient_23_7" # On macOS set the directory to your Instant Client directory if platform.system() == "Darwin": @@ -62,7 +62,7 @@ user = os.environ.get("PYTHON_USER", "pythondemo") -dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/orclpdb") +dsn = os.environ.get("PYTHON_CONNECT_STRING", "localhost/freepdb1") pw = os.environ.get("PYTHON_PASSWORD") if pw is None: diff --git a/samples/tutorial/drcp_query.py b/samples/tutorial/drcp_query.py index aaae7e0b..637b807f 100644 --- a/samples/tutorial/drcp_query.py +++ b/samples/tutorial/drcp_query.py @@ -1,10 +1,10 @@ # ----------------------------------------------------------------------------- # drcp_query.py (Section 2.4 and 2.5) -# Look at pool statistics of the DRCP Connection +# Query DRCP pool statistics # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. All rights reserved. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. All rights reserved. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,9 +33,7 @@ # default values PYTHON_SYS_USER = "SYSTEM" -PYTHON_USER = "pythondemo" -PYTHON_CONNECT_STRING = "localhost/orclpdb" -PYTHON_DRCP_CONNECT_STRING = "localhost/orclpdb:pooled" +PYTHON_DRCP_CONNECT_STRING = "localhost/free" # dictionary containing all parameters; these are acquired as needed by the # methods below (which should be used instead of consulting this dictionary @@ -63,7 +61,11 @@ def get_value(name, label, default_value=""): def get_main_user(): - return get_value("user", "Enter the DRCP User", PYTHON_SYS_USER) + return get_value( + "user", + "Enter the privileged user with access to DRCP views", + PYTHON_SYS_USER, + ) def get_main_password(): @@ -73,10 +75,10 @@ def get_main_password(): def get_drcp_connect_string(): connect_string = get_value( "DRCP_CONNECT_STRING", - "Enter the DRCP Connect String", + "Enter the connect string to access the DRCP views", PYTHON_DRCP_CONNECT_STRING, ) - return "%s/%s@%s" % (get_main_user(), get_main_password(), connect_string) + return connect_string drcp_user = get_main_user() diff --git a/samples/tutorial/json_insert.py b/samples/tutorial/json_insert.py new file mode 100644 index 00000000..ce6f436b --- /dev/null +++ b/samples/tutorial/json_insert.py @@ -0,0 +1,41 @@ +# ----------------------------------------------------------------------------- +# json_insert.py (Section 8.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + +# Insert JSON data +data = dict(name="Rod", dept="Sales", location="Germany") +inssql = "insert into jtab (id, json_data) values (:1, :2)" +cur.setinputsizes(None, oracledb.DB_TYPE_JSON) +cur.execute(inssql, [101, data]) diff --git a/samples/tutorial/pipelining.py b/samples/tutorial/pipelining.py new file mode 100644 index 00000000..fb0a989f --- /dev/null +++ b/samples/tutorial/pipelining.py @@ -0,0 +1,73 @@ +# ----------------------------------------------------------------------------- +# pipelining.py (Section 18.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import asyncio +import oracledb +import db_config + + +async def get_weather(): + return "Hot and sunny" + + +async def get_location(): + return "Melbourne" + + +async def main(): + con = await oracledb.connect_async( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn + ) + + pipeline = oracledb.create_pipeline() + pipeline.add_fetchone( + "select ename, job from emp where empno = :en", [7839] + ) + pipeline.add_fetchall("select dname from dept order by deptno") + + # Run the pipeline and non-database operations concurrently. + # Note although the database receives all the operations at the same time, + # it will execute each operation sequentially. The local Python work + # executes during the time the database is processing the queries. + return_values = await asyncio.gather( + get_weather(), get_location(), con.run_pipeline(pipeline) + ) + + for r in return_values: + if isinstance(r, list): # the pipeline return list + for result in r: + if result.rows: + for row in result.rows: + print(*row, sep="\t") + else: + print(r) # a local operation result + + await con.close() + + +asyncio.run(main()) diff --git a/samples/tutorial/query_pandas.py b/samples/tutorial/query_pandas.py new file mode 100644 index 00000000..06f29146 --- /dev/null +++ b/samples/tutorial/query_pandas.py @@ -0,0 +1,42 @@ +# ----------------------------------------------------------------------------- +# query_pandas.py (Section 16.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import pandas +import pyarrow +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) + +# Get an OracleDataFrame +# Adjust arraysize to tune the query fetch performance +odf = con.fetch_df_all( + statement="select sal from emp order by empno", arraysize=100 +) diff --git a/samples/tutorial/query_scroll.py b/samples/tutorial/query_scroll.py index cddf5732..5cf4539c 100644 --- a/samples/tutorial/query_scroll.py +++ b/samples/tutorial/query_scroll.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# query_scroll.py (Section 11.1) +# query_scroll.py (Section 15.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/rowfactory.py b/samples/tutorial/rowfactory.py index 0c44cde2..830be093 100644 --- a/samples/tutorial/rowfactory.py +++ b/samples/tutorial/rowfactory.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# rowfactory.py (Section 8.1) +# rowfactory.py (Section 10.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,6 +26,7 @@ # limitations under the License. # ----------------------------------------------------------------------------- +import collections import oracledb import db_config diff --git a/samples/tutorial/run_sql_script.py b/samples/tutorial/run_sql_script.py index f71f56b4..b70b82c7 100644 --- a/samples/tutorial/run_sql_script.py +++ b/samples/tutorial/run_sql_script.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2023, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,8 +33,8 @@ DEFAULT_MAIN_USER = "pythondemo" DEFAULT_EDITION_USER = "pythoneditions" DEFAULT_EDITION_NAME = "python_e1" -DEFAULT_CONNECT_STRING = "localhost/orclpdb" -DEFAULT_DRCP_CONNECT_STRING = "localhost/orclpdb:pooled" +DEFAULT_CONNECT_STRING = "localhost/freepdb1" +DEFAULT_DRCP_CONNECT_STRING = "localhost/freepdb1:pooled" def run_sql_script(conn, script_name, **kwargs): diff --git a/samples/tutorial/soda.py b/samples/tutorial/soda.py index 490c0e81..83c7b640 100644 --- a/samples/tutorial/soda.py +++ b/samples/tutorial/soda.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# soda.py (Section 15.1) +# soda.py (Section 20.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2023, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/tutorial/solutions/aq-dequeue.py b/samples/tutorial/solutions/aq-dequeue.py index 66b97a40..f4a1c84c 100644 --- a/samples/tutorial/solutions/aq-dequeue.py +++ b/samples/tutorial/solutions/aq-dequeue.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/solutions/aq-enqueue.py b/samples/tutorial/solutions/aq-enqueue.py index ba7e5bd7..05b26ce0 100644 --- a/samples/tutorial/solutions/aq-enqueue.py +++ b/samples/tutorial/solutions/aq-enqueue.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,7 +28,7 @@ import oracledb import decimal -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/solutions/aq-queuestart.py b/samples/tutorial/solutions/aq-queuestart.py index 8a5d5879..4706d8b3 100644 --- a/samples/tutorial/solutions/aq-queuestart.py +++ b/samples/tutorial/solutions/aq-queuestart.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/solutions/bind_sdo.py b/samples/tutorial/solutions/bind_sdo.py index 865db64b..8269cdc7 100644 --- a/samples/tutorial/solutions/bind_sdo.py +++ b/samples/tutorial/solutions/bind_sdo.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/solutions/connect_pool2.py b/samples/tutorial/solutions/connect_pool2.py index 0532691a..073c8daa 100644 --- a/samples/tutorial/solutions/connect_pool2.py +++ b/samples/tutorial/solutions/connect_pool2.py @@ -59,16 +59,16 @@ def Query(): # time.sleep(1) -numberOfThreads = 5 -threadArray = [] +number_of_threads = 5 +thread_array = [] -for i in range(numberOfThreads): +for i in range(number_of_threads): thread = threading.Thread(name="#" + str(i), target=Query) - threadArray.append(thread) + thread_array.append(thread) # time.sleep(4) thread.start() -for t in threadArray: +for t in thread_array: t.join() print("All done!") diff --git a/samples/tutorial/solutions/json_insert.py b/samples/tutorial/solutions/json_insert.py new file mode 100644 index 00000000..fcd08c31 --- /dev/null +++ b/samples/tutorial/solutions/json_insert.py @@ -0,0 +1,53 @@ +# ----------------------------------------------------------------------------- +# json_insert.py (Section 8.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + +# Insert JSON data +data = dict(name="Rod", dept="Sales", location="Germany") +inssql = "insert into jtab (id, json_data) values (:1, :2)" +cur.setinputsizes(None, oracledb.DB_TYPE_JSON) +cur.execute(inssql, [101, data]) + +# Select JSON data +sql = "select c.json_data from jtab c" +for (j,) in cur.execute(sql): + print(j) + +# Dot-notation to extract a value from a JSON column +sql = """select c.json_data.location + from jtab c + offset 0 rows fetch next 1 rows only""" +for (j,) in cur.execute(sql): + print(j) diff --git a/samples/tutorial/solutions/query_pandas.py b/samples/tutorial/solutions/query_pandas.py new file mode 100644 index 00000000..6b6716a5 --- /dev/null +++ b/samples/tutorial/solutions/query_pandas.py @@ -0,0 +1,55 @@ +# ----------------------------------------------------------------------------- +# query_pandas.py (Section 16.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import pandas +import pyarrow +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) + +# Get an OracleDataFrame +# Adjust arraysize to tune the query fetch performance +odf = con.fetch_df_all( + statement="select sal from emp order by empno", arraysize=100 +) + +# Get a Pandas DataFrame from the data +df = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() +).to_pandas() + +# Perform various Pandas operations on the DataFrame + +print("\nSum:") +print(df.sum()) + +print("\nMedian:") +print(df.median()) diff --git a/samples/tutorial/solutions/query_scroll.py b/samples/tutorial/solutions/query_scroll.py index 7c1ab757..3cfb94a5 100644 --- a/samples/tutorial/solutions/query_scroll.py +++ b/samples/tutorial/solutions/query_scroll.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# query_scroll.py (Section 11.1) +# query_scroll.py (Section 15.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright 2017, 2023, Oracle and/or its affiliates. +# Copyright 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/solutions/rowfactory.py b/samples/tutorial/solutions/rowfactory.py index 7244f050..5220d609 100644 --- a/samples/tutorial/solutions/rowfactory.py +++ b/samples/tutorial/solutions/rowfactory.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# rowfactory.py (Section 8.1) +# rowfactory.py (Section 10.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/tutorial/solutions/soda.py b/samples/tutorial/solutions/soda.py index 931f8031..510e8979 100644 --- a/samples/tutorial/solutions/soda.py +++ b/samples/tutorial/solutions/soda.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# soda.py (Section 15.2) +# soda.py (Section 20.2) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2023, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/tutorial/solutions/subclass.py b/samples/tutorial/solutions/subclass.py index ae81eb04..dfbc626b 100644 --- a/samples/tutorial/solutions/subclass.py +++ b/samples/tutorial/solutions/subclass.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# subclass.py (Section 9.2) +# subclass.py (Section 11.2) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/tutorial/solutions/vector.py b/samples/tutorial/solutions/vector.py new file mode 100644 index 00000000..a9ac1d29 --- /dev/null +++ b/samples/tutorial/solutions/vector.py @@ -0,0 +1,52 @@ +# ----------------------------------------------------------------------------- +# vector.py (Section 9.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import array +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + +vector_data_64 = array.array("d", [11.25, 11.75, 11.5]) + +cur.execute( + "insert into vtab (id, v64) values (:1, :2)", + [101, vector_data_64], +) + +# Query +cur.execute("select v64 from vtab") + +# Each non-sparse vector is represented as an array.array type. +# Sparse vectors are represented as oracledb.SparseVector() instances +for (v,) in cur: + print(v) + print(type(v)) diff --git a/samples/tutorial/solutions/vector_numpy.py b/samples/tutorial/solutions/vector_numpy.py new file mode 100644 index 00000000..633755ad --- /dev/null +++ b/samples/tutorial/solutions/vector_numpy.py @@ -0,0 +1,92 @@ +# ----------------------------------------------------------------------------- +# vector_numpy.py (Section 9.3) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import array +import numpy +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + + +# Convert from NumPy ndarray type to array type when inserting vectors +def numpy_converter_in(value): + if value.dtype == numpy.float64: + dtype = "d" + elif value.dtype == numpy.float32: + dtype = "f" + elif value.dtype == numpy.uint8: + dtype = "B" + else: + dtype = "b" + return array.array(dtype, value) + + +def input_type_handler(cur, value, arraysize): + if isinstance(value, numpy.ndarray): + return cur.var( + oracledb.DB_TYPE_VECTOR, + arraysize=arraysize, + inconverter=numpy_converter_in, + ) + + +con.inputtypehandler = input_type_handler + + +# Convert from array types to NumPy ndarray types when fetching vectors +def numpy_converter_out(value): + return numpy.array(value, copy=False, dtype=value.typecode) + + +def output_type_handler(cur, metadata): + if metadata.type_code is oracledb.DB_TYPE_VECTOR: + return cur.var( + metadata.type_code, + arraysize=cur.arraysize, + outconverter=numpy_converter_out, + ) + + +con.outputtypehandler = output_type_handler + +# Insert +vector_data_64 = numpy.array([11.25, 11.75, 11.5], dtype=numpy.float64) + +cur.execute( + "insert into vtab (id, v64) values (:1, :2)", + [202, vector_data_64], +) + +# Each vector is represented as a numpy.ndarray type +for (v,) in cur.execute("select v64 from vtab"): + print(v) + print(type(v)) diff --git a/samples/tutorial/sql/db_config.sql b/samples/tutorial/sql/db_config.sql index 7b4e2333..27ce595a 100644 --- a/samples/tutorial/sql/db_config.sql +++ b/samples/tutorial/sql/db_config.sql @@ -1,5 +1,5 @@ /*----------------------------------------------------------------------------- - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. * * This software is dual-licensed to you under the Universal Permissive License * (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,7 +26,7 @@ def user = "pythondemo" -- Default database connection string -def connect_string = "localhost/orclpdb" +def connect_string = "localhost/freepdb1" -- Prompt for the password accept pw char prompt 'Enter database password for &user: ' hide diff --git a/samples/tutorial/sql/setup_tutorial.sql b/samples/tutorial/sql/setup_tutorial.sql index f6b46187..9d470228 100644 --- a/samples/tutorial/sql/setup_tutorial.sql +++ b/samples/tutorial/sql/setup_tutorial.sql @@ -1,5 +1,5 @@ /*----------------------------------------------------------------------------- - * Copyright (c) 2017, 2022, Oracle and/or its affiliates. + * Copyright (c) 2017, 2025, Oracle and/or its affiliates. * * This software is dual-licensed to you under the Universal Permissive License * (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -214,9 +214,42 @@ create or replace function myfunc(d_p in varchar2, i_p in number) return number end; / ---PL/SQL procedure for plsql_proc.py +-- PL/SQL procedure for plsql_proc.py create or replace procedure myproc(v1_p in number, v2_p out number) as begin v2_p := v1_p * 2; end; / + +-- Table for json_insert.py (requires Oracle Database 21c or later) +begin + execute immediate 'drop table jtab'; +exception +when others then + if sqlcode not in (-00942) then + raise; + end if; +end; +/ + +create table jtab ( + id number(9) not null primary key, + json_data json) +/ + +-- Table for vector.py and vector_numpy.py +-- (requires Oracle Database 23ai or later) +begin + execute immediate 'drop table vtab'; +exception +when others then + if sqlcode not in (-00942) then + raise; + end if; +end; +/ + +create table vtab ( + id number(9) not null primary key, + v64 vector(3, float64)) +/ diff --git a/samples/tutorial/subclass.py b/samples/tutorial/subclass.py index b8c16c36..2c3bd17b 100644 --- a/samples/tutorial/subclass.py +++ b/samples/tutorial/subclass.py @@ -1,9 +1,9 @@ # ----------------------------------------------------------------------------- -# subclass.py (Section 9.1 and 9.2) +# subclass.py (Section 11.1 and 11.2) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/tutorial/type_input.py b/samples/tutorial/type_input.py index 0deb7627..fa1d95a9 100644 --- a/samples/tutorial/type_input.py +++ b/samples/tutorial/type_input.py @@ -98,7 +98,7 @@ def building_in_converter(value): def input_type_handler(cursor, value, num_elements): if isinstance(value, Building): return cursor.var( - oracledb.STRING, + oracledb.DB_TYPE_VARCHAR, arraysize=num_elements, inconverter=building_in_converter, ) diff --git a/samples/tutorial/type_input_named_obj.py b/samples/tutorial/type_input_named_obj.py index 18fa801e..5bdbd02c 100644 --- a/samples/tutorial/type_input_named_obj.py +++ b/samples/tutorial/type_input_named_obj.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn @@ -55,9 +55,8 @@ """ ) -# Create a Python class for an SDO - +# Create a Python class for an SDO class mySDO(object): def __init__(self, gtype, elemInfo, ordinates): self.gtype = gtype @@ -97,9 +96,8 @@ def SDOInputTypeHandler(cursor, value, numElements): cur.inputtypehandler = SDOInputTypeHandler cur.execute("insert into testgeometry values (:1, :2)", (1, sdo)) -# Define a function to dump the contents of an Oracle object - +# Define a function to dump the contents of an Oracle object def dumpobject(obj, prefix=" "): if obj.type.iscollection: print(prefix, "[") diff --git a/samples/tutorial/type_output_named_obj.py b/samples/tutorial/type_output_named_obj.py index 2db3921e..7815b2ad 100644 --- a/samples/tutorial/type_output_named_obj.py +++ b/samples/tutorial/type_output_named_obj.py @@ -3,7 +3,7 @@ # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2023, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,7 @@ # ----------------------------------------------------------------------------- import oracledb -import db_config_thick as db_config +import db_config con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn diff --git a/samples/tutorial/vector.py b/samples/tutorial/vector.py new file mode 100644 index 00000000..157455b4 --- /dev/null +++ b/samples/tutorial/vector.py @@ -0,0 +1,43 @@ +# ----------------------------------------------------------------------------- +# vector.py (Section 9.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import array +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + +vector_data_64 = array.array("d", [11.25, 11.75, 11.5]) + +cur.execute( + "insert into vtab (id, v64) values (:1, :2)", + [101, vector_data_64], +) diff --git a/samples/tutorial/vector_numpy.py b/samples/tutorial/vector_numpy.py new file mode 100644 index 00000000..4631739c --- /dev/null +++ b/samples/tutorial/vector_numpy.py @@ -0,0 +1,48 @@ +# ----------------------------------------------------------------------------- +# vector_numpy.py (Section 9.3) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import array +import numpy +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) +cur = con.cursor() + +vector_data_64 = numpy.array([11.25, 11.75, 11.5], dtype=numpy.float64) + +cur.execute( + "insert into vtab (id, v64) values (:1, :2)", + [202, vector_data_64], +) + +for (v,) in cur.execute("select v64 from vtab"): + print(v) + print(type(v)) From 86bbf052005ce03b6e1c05128a76edadc076c02f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 16:14:17 -0600 Subject: [PATCH 062/239] Fixed bug with some databases when a connection is killed. --- doc/src/release_notes.rst | 3 +++ src/oracledb/errors.py | 23 ++++++++++++++++-- src/oracledb/impl/thin/messages/base.pyx | 31 +----------------------- src/oracledb/impl/thin/packet.pyx | 4 ++- src/oracledb/impl/thin/protocol.pyx | 3 +++ 5 files changed, 31 insertions(+), 33 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e61ab7c4..972dfdef 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,6 +19,9 @@ Thin Mode Changes #) Added support for using :meth:`Queue.deqmany()` with JSON payloads using Oracle Database 21c. +#) Fixed bug with some databases when a connection is killed. In some + scenarios the :meth:`Connection.is_healthy()` would have incorrectly + returned the value *True* and in other cases a possible hang could occur. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index c3eec753..9ef74320 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -112,17 +112,21 @@ def _make_adjustments(self): args = {} if match is None else match.groupdict() else: driver_error_num = driver_error_info - if driver_error_num == ERR_CONNECTION_CLOSED: - self.is_session_dead = True driver_error = _get_error_text(driver_error_num, **args) self.message = f"{driver_error}\n{self.message}" self.full_code = f"{ERR_PREFIX}-{driver_error_num:04}" # determine exception class to use when raising this error + # also determine whether error is recoverable and whether the session + # is deemed "dead" if self.full_code.startswith("DPY-"): driver_error_num = int(self.full_code[4:]) + if driver_error_num == ERR_CONNECTION_CLOSED: + self.is_session_dead = self.isrecoverable = True self.exc_type = ERR_EXCEPTION_TYPES[driver_error_num // 1000] elif self.code != 0: + if self.code in ERR_RECOVERABLE_ERROR_CODES: + self.isrecoverable = True if self.code in ERR_INTEGRITY_ERROR_CODES: self.exc_type = exceptions.IntegrityError elif self.code in ERR_INTERFACE_ERROR_CODES: @@ -485,6 +489,21 @@ def _raise_not_supported(feature: str) -> None: 28511, # lost RPC connection to heterogeneous remote agent ] +# Oracle error codes that are deemed recoverable +# NOTE: this does not include the errors that are mapped to +# ERR_CONNECTION_CLOSED since those are all deemed recoverable +ERR_RECOVERABLE_ERROR_CODES = [ + 376, # file %s cannot be read at this time + 1033, # ORACLE initialization or shutdown in progress + 1034, # the Oracle instance is not available for use + 1090, # shutdown in progress + 1115, # IO error reading block from file %s (block # %s) + 12514, # Service %s is not registered with the listener + 12571, # TNS:packet writer failure + 12757, # instance does not currently know of requested service + 16456, # missing or invalid value +] + # driver error message exception types (multiples of 1000) ERR_EXCEPTION_TYPES = { 1: exceptions.InterfaceError, diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 9e1efb9c..83c05075 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -67,39 +67,10 @@ cdef class Message: connection" error is detected, the connection is forced closed immediately. """ - cdef bint is_recoverable = False if self.error_occurred: - if self.error_info.num in ( - 28, # session has been terminated - 31, # session marked for kill - 376, # file %s cannot be read at this time - 603, # ORACLE server session terminated - 1012, # not logged on - 1033, # ORACLE initialization or shutdown in progress - 1034, # the Oracle instance is not available for use - 1089, # immediate shutdown or close in progress - 1090, # shutdown in progress - 1092, # ORACLE instance terminated - 1115, # IO error reading block from file %s (block # %s) - 2396, # exceeded maximum idle time - 3113, # end-of-file on communication channel - 3114, # not connected to ORACLE - 3135, # connection lost contact - 12153, # TNS:not connected - 12514, # Service %s is not registered with the listener - 12537, # TNS:connection closed - 12547, # TNS:lost contact - 12570, # TNS:packet reader failure - 12571, # TNS:packet writer failure - 12583, # TNS:no reader - 12757, # instance does not currently know of requested service - 16456, # missing or invalid value - ): - is_recoverable = True error = errors._Error(self.error_info.message, code=self.error_info.num, - offset=self.error_info.pos, - isrecoverable=is_recoverable) + offset=self.error_info.pos) if error.is_session_dead: self.conn_impl._protocol._force_close() raise error.exc_type(error) diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index 21de9b62..4a2e907d 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -65,7 +65,7 @@ cdef class Packet: char *ptr ptr = cpython.PyBytes_AS_STRING(self.buf) flags = decode_uint16be( &ptr[PACKET_HEADER_SIZE]) - if flags & TNS_DATA_FLAGS_END_OF_RESPONSE: + if flags & TNS_DATA_FLAGS_END_OF_RESPONSE or flags & TNS_DATA_FLAGS_EOF: return True if self.packet_size == PACKET_HEADER_SIZE + 3 \ and ptr[PACKET_HEADER_SIZE + 2] == TNS_MSG_TYPE_END_OF_RESPONSE: @@ -231,6 +231,8 @@ cdef class ReadBuffer(Buffer): errors._raise_err(errors.ERR_UNSUPPORTED_INBAND_NOTIFICATION, err_num=self._pending_error_num) elif self._transport is None: + if self._pending_error_num == TNS_ERR_SESSION_SHUTDOWN: + errors._raise_err(errors.ERR_CONNECTION_CLOSED) errors._raise_err(errors.ERR_NOT_CONNECTED) cdef int _get_int_length_and_sign(self, uint8_t *length, diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index 63692de4..1dfbbb11 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -902,6 +902,9 @@ cdef class BaseAsyncProtocol(BaseProtocol): """ if not self._in_connect: self._transport = None + self._read_buf._transport = None + self._write_buf._transport = None + self._read_buf._pending_error_num = TNS_ERR_SESSION_SHUTDOWN if self._read_buf._waiter is not None \ and not self._read_buf._waiter.done(): error = errors._create_err(errors.ERR_CONNECTION_CLOSED) From 3b22367f497167607ba7a12c4251cb814ad2266b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 3 Apr 2025 16:26:07 -0600 Subject: [PATCH 063/239] Only skip the tutorial, not the entire set of samples! --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 09e53feb..8d193308 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,4 +14,4 @@ repos: rev: v0.0.291 hooks: - id: ruff - exclude: ^samples/ + exclude: ^samples/tutorial/ From 475a51b1dd7ac05e0026c8c9092975442d1b959e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 4 Apr 2025 09:15:31 -0600 Subject: [PATCH 064/239] Spelling updates from John Bampton (#479). --- README.md | 2 +- doc/src/api_manual/module.rst | 2 +- doc/src/release_notes.rst | 7 +++++-- doc/src/user_guide/appendix_b.rst | 2 +- doc/src/user_guide/connection_handling.rst | 2 +- doc/src/user_guide/exception_handling.rst | 2 +- doc/src/user_guide/extending.rst | 4 ++-- doc/src/user_guide/initialization.rst | 2 +- samples/bind_insert.py | 2 +- samples/bind_insert_async.py | 2 +- samples/containers/app_dev/README.md | 2 +- samples/cqn.py | 2 +- samples/database_change_notification.py | 2 +- samples/json_blob.py | 2 +- samples/json_blob_async.py | 2 +- ...nd-Oracle-Database-The-New-Wave-of-Scripting.html | 12 ++++++------ samples/tutorial/setup_tutorial.py | 2 +- setup.py | 2 +- src/oracledb/driver_mode.py | 2 +- 19 files changed, 29 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index e1d35ccc..7f0483bb 100644 --- a/README.md +++ b/README.md @@ -59,7 +59,7 @@ Examples can be found in the [/samples][samples] directory and the ## Help -Questions can be asked in [Github Discussions][ghdiscussions]. +Questions can be asked in [GitHub Discussions][ghdiscussions]. Problem reports can be raised in [GitHub Issues][ghissues]. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 59c8d2b5..45a33682 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2664,7 +2664,7 @@ Oracledb Methods are parsed by python-oracledb itself and a generated connect descriptor is sent to the Oracle Client libraries. This value is only used in the python-oracledb Thick mode. The default value is - :attr:`defualts.thick_mode_dsn_passthrough`. For more information, see + :attr:`defaults.thick_mode_dsn_passthrough`. For more information, see :ref:`usingconfigfiles`. The ``extra_auth_params`` parameter is expected to be a dictionary diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 972dfdef..4de6672d 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -29,6 +29,9 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Miscellaneous grammar and spelling fixes by John Bampton + (`PR 479 `__). + oracledb 3.1.0 (April 2025) --------------------------- @@ -342,7 +345,7 @@ Thin Mode Changes connection string. #) Added :meth:`oracledb.enable_thin_mode()` as a means of enabling python-oracledb Thin mode without waiting for an initial connection to be - succesfully established. Since python-oracledb defaults to Thin mode, this + successfully established. Since python-oracledb defaults to Thin mode, this method is mostly useful for applications with multiple threads concurrently creating connections to databases when the application starts (`issue 408 `__). @@ -1719,7 +1722,7 @@ cx_Oracle 8.2 (May 2021) connection. #) Eliminated a memory leak when calling :meth:`SodaOperation.filter()` with a dictionary. -#) The distributed transaction handle assosciated with the connection is now +#) The distributed transaction handle associated with the connection is now cleared on commit or rollback (`issue 530 `__). #) Added a check to ensure that when setting variables or object attributes, diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index 98748205..f2725fea 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -148,7 +148,7 @@ differs from the python-oracledb Thick mode in the following ways: ``handle`` parameters. The parameters that are ignored in the Thick mode include ``wallet_password``, ``disable_oob``, and ``debug_jdwp`` parameters. -* The python-oracledb Thin mode only suppports :ref:`homogeneous +* The python-oracledb Thin mode only supports :ref:`homogeneous ` pools. * The python-oracledb Thin mode creates connections in a daemon thread and so diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 6c66b165..d6cb61bd 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -2512,7 +2512,7 @@ The :meth:`Connection.is_healthy()` method is an alternative to it does not perform a full connection check. If the ``getmode`` parameter in :meth:`oracledb.create_pool()` is set to -:data:`oracledb.POOL_GETMODE_TIMEDWAIT`, then the maxium amount of time an +:data:`oracledb.POOL_GETMODE_TIMEDWAIT`, then the maximum amount of time an :meth:`~ConnectionPool.acquire()` call will wait to get a connection from the pool is limited by the value of the :data:`ConnectionPool.wait_timeout` parameter. A call that cannot be immediately satisfied will wait no longer diff --git a/doc/src/user_guide/exception_handling.rst b/doc/src/user_guide/exception_handling.rst index 6fe6ad6b..1f715a20 100644 --- a/doc/src/user_guide/exception_handling.rst +++ b/doc/src/user_guide/exception_handling.rst @@ -84,7 +84,7 @@ in the examples below: DPY-4010: a bind variable replacement value for placeholder ":1" was not provided * Connection messages: The python-oracledb Thin mode connection and networking - is handled by Python itself. Some errors portable accross operating systems + is handled by Python itself. Some errors portable across operating systems and Python versions have DPY-prefixed errors displayed by python-oracledb. Other messages are returned directly from Python and may vary accordingly. The traditional Oracle connection errors with prefix "ORA" are not shown. For diff --git a/doc/src/user_guide/extending.rst b/doc/src/user_guide/extending.rst index 313c8a1f..0d0b31b2 100644 --- a/doc/src/user_guide/extending.rst +++ b/doc/src/user_guide/extending.rst @@ -16,7 +16,7 @@ Subclassing Connections ======================= Subclassing enables applications to change python-oracledb, for example by -extending connection and statement execution behvior. This can be used to +extending connection and statement execution behavior. This can be used to alter, or log, connection and execution parameters, or to further change python-oracledb functionality. @@ -220,7 +220,7 @@ strings prefixed with "myprefix://". In myhookfunc: protocol=myprefix arg=localhost/orclpdb1 host=localhost, port=1521, service name=orclpdb1 -7. To uninstall the plugin, simply remove the packge:: +7. To uninstall the plugin, simply remove the package:: python -m pip uninstall myplugin diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index f057f8a9..bb30a61b 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -317,7 +317,7 @@ going to be used. In one special case, you may wish to explicitly enable Thin mode to prevent Thick mode from being enabled later. To allow application portability, the driver's internal logic allows -applications to initally attempt :ref:`standalone connection +applications to initially attempt :ref:`standalone connection ` creation in Thin mode, but then lets them :ref:`enable Thick mode ` if that connection is unsuccessful. An example is when trying to connect to an Oracle Database that turns out to be an old diff --git a/samples/bind_insert.py b/samples/bind_insert.py index abd7750f..712e858e 100644 --- a/samples/bind_insert.py +++ b/samples/bind_insert.py @@ -86,7 +86,7 @@ # Inserting a single bind still needs tuples # ----------------------------------------------------------------------------- -rows = [("Eleventh",), ("Twelth",)] +rows = [("Eleventh",), ("Twelfth",)] with connection.cursor() as cursor: cursor.executemany("insert into mytab(id, data) values (12, :1)", rows) diff --git a/samples/bind_insert_async.py b/samples/bind_insert_async.py index 2e3a3660..37f0acfb 100644 --- a/samples/bind_insert_async.py +++ b/samples/bind_insert_async.py @@ -92,7 +92,7 @@ async def main(): # Inserting a single bind still needs tuples # ------------------------------------------------------------------------- - rows = [("Eleventh",), ("Twelth",)] + rows = [("Eleventh",), ("Twelfth",)] await connection.executemany( "insert into mytab(id, data) values (12, :1)", rows diff --git a/samples/containers/app_dev/README.md b/samples/containers/app_dev/README.md index f05d3257..25569ebd 100644 --- a/samples/containers/app_dev/README.md +++ b/samples/containers/app_dev/README.md @@ -23,7 +23,7 @@ It has been tested on macOS using podman and docker. By default, Apache has SSL enabled and is listening on port 8443. -## Usage for Application Devlopment +## Usage for Application Development - Run a container: diff --git a/samples/cqn.py b/samples/cqn.py index c5c6516a..d6399f75 100644 --- a/samples/cqn.py +++ b/samples/cqn.py @@ -55,7 +55,7 @@ def callback(message): registered = False return print("Message database name:", message.dbname) - print("Message tranasction id:", message.txid) + print("Message transaction id:", message.txid) print("Message queries:") for query in message.queries: print("--> Query ID:", query.id) diff --git a/samples/database_change_notification.py b/samples/database_change_notification.py index 7861ab2c..385e73bf 100644 --- a/samples/database_change_notification.py +++ b/samples/database_change_notification.py @@ -55,7 +55,7 @@ def callback(message): registered = False return print("Message database name:", message.dbname) - print("Message tranasction id:", message.txid) + print("Message transaction id:", message.txid) print("Message tables:") for table in message.tables: print("--> Table Name:", table.name) diff --git a/samples/json_blob.py b/samples/json_blob.py index 61b8576f..e0d06658 100644 --- a/samples/json_blob.py +++ b/samples/json_blob.py @@ -57,7 +57,7 @@ client_version = oracledb.clientversion()[0] db_version = int(connection.version.split(".")[0]) -# Minimum database vesion is 12 +# Minimum database version is 12 if db_version < 12: sys.exit("This example requires Oracle Database 12.1.0.2 or later") diff --git a/samples/json_blob_async.py b/samples/json_blob_async.py index d8b9221d..5c9fb45c 100644 --- a/samples/json_blob_async.py +++ b/samples/json_blob_async.py @@ -54,7 +54,7 @@ async def main(): params=sample_env.get_connect_params(), ) - # Minimum database vesion is 12 + # Minimum database version is 12 db_version = int(connection.version.split(".")[0]) if db_version < 12: sys.exit("This example requires Oracle Database 12.1.0.2 or later") diff --git a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html index 8734268f..394d51a5 100644 --- a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html +++ b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html @@ -860,7 +860,7 @@

2.2 Connection pool experiments

numberOfThreads.

Try changing getmode to - oracledb.POOL_GETMODE_NOWAIT, reducing the maxium pool size to + oracledb.POOL_GETMODE_NOWAIT, reducing the maximum pool size to 2, and increasing the number of threads to 3. When number_of_threads exceeds the maximum size of the pool, the acquire() call will now generate an error such as @@ -1073,7 +1073,7 @@

2.5 More DRCP investigation

oracledb.PURITY_NEW to see the effect on the DRCP NUM_MISSES statistic.

-

Another experiement is to include the time module at the +

Another experiment is to include the time module at the file top:

@@ -2496,7 +2496,7 @@ 

12.1 How to bind named objects

This uses gettype() to get the database types of the SDO and its object attributes. The newobject() calls create -Python representations of those objects. The python object atributes are +Python representations of those objects. The python object attributes are then set. Oracle VARRAY types such as SDO_ELEM_INFO_ARRAY are set with extend().

@@ -3230,7 +3230,7 @@

18. Pipelining multiple operations

Connection is established using oracledb.connect_async(). - Asynchronous methods are awated.

+ Asynchronous methods are awaited.

The script creates a pipeline with create_pipeline() and adds two database operations. The use of asyncio.gather() @@ -3265,7 +3265,7 @@

19. Python-oracledb Thick mode

connectivity to Oracle Database. There are some additional features these libraries provide which are therefore only available in python-oracledb Thick mode. The next example shows one of these. Other Oracle Database features that -require python-oracledb Thick mode include Application Continurity, and +require python-oracledb Thick mode include Application Continuity, and Continuous Query Notification. Documentation link for further reading: Enabling python-oracledb Thick mode.

@@ -3785,7 +3785,7 @@

Modules

These can be installed from PyPI, for example from pypi.org/project/oracledb/ (as shown in the tutorial setup instructions). In some -enviroments you may prefer to install using a package repository such as yum.oracle.com for Oracle Linux.

diff --git a/samples/tutorial/setup_tutorial.py b/samples/tutorial/setup_tutorial.py index 3a64cc7c..64ba3c2a 100644 --- a/samples/tutorial/setup_tutorial.py +++ b/samples/tutorial/setup_tutorial.py @@ -35,7 +35,7 @@ user=db_config.user, password=db_config.pw, dsn=db_config.dsn ) -# create sample schemas and defintions for the tutorial +# create sample schemas and definitions for the tutorial print("Setting up the sample tables and other DB objects for the tutorial...") run_sql_script.run_sql_script( con, "setup_tutorial", user=db_config.user, pw=db_config.pw diff --git a/setup.py b/setup.py index 9729f381..9e753737 100644 --- a/setup.py +++ b/setup.py @@ -78,7 +78,7 @@ thin_depends.append(base_pxd) # if the platform is macOS: -# - target the minimim OS version that current Python packages work with. +# - target the minimum OS version that current Python packages work with. # (Use 'otool -l /path/to/python' and look for 'version' in the # LC_VERSION_MIN_MACOSX section) # - add argument required for cross-compilation for both x86_64 and arm64 diff --git a/src/oracledb/driver_mode.py b/src/oracledb/driver_mode.py index 630b9934..42594a9f 100644 --- a/src/oracledb/driver_mode.py +++ b/src/oracledb/driver_mode.py @@ -127,7 +127,7 @@ def is_thin_mode() -> bool: oracledb.init_oracle_client() is called successfully, then a subsequent call to is_thin_mode() will return False indicating that Thick mode is enabled. Once the first standalone connection or connection pool is - created succesfully, or a call to oracledb.init_oracle_client() is made + created successfully, or a call to oracledb.init_oracle_client() is made successfully, then python-oracledb's mode is fixed and the value returned by is_thin_mode() will never change for the lifetime of the process. From 54c321e4a31ce59837e9e61c7cc4e0636d045a26 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 7 Apr 2025 10:25:20 -0600 Subject: [PATCH 065/239] Tweak release notes. --- doc/src/release_notes.rst | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 4de6672d..1ee41da1 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,11 +17,12 @@ oracledb 3.2.0 (TBD) Thin Mode Changes +++++++++++++++++ -#) Added support for using :meth:`Queue.deqmany()` with JSON payloads using - Oracle Database 21c. +#) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using + Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many + times as needed. #) Fixed bug with some databases when a connection is killed. In some - scenarios the :meth:`Connection.is_healthy()` would have incorrectly - returned the value *True* and in other cases a possible hang could occur. + scenarios :meth:`Connection.is_healthy()` would have incorrectly returned + the value *True* and in other cases a possible hang could occur. Thick Mode Changes ++++++++++++++++++ From 69d58b8ac7e1fd1bfaadb3aa57344a67417b9247 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 09:37:55 -0600 Subject: [PATCH 066/239] Remove unused variable. --- src/oracledb/impl/thin/messages/connect.pyx | 1 - 1 file changed, 1 deletion(-) diff --git a/src/oracledb/impl/thin/messages/connect.pyx b/src/oracledb/impl/thin/messages/connect.pyx index ee4cead9..d8715039 100644 --- a/src/oracledb/impl/thin/messages/connect.pyx +++ b/src/oracledb/impl/thin/messages/connect.pyx @@ -48,7 +48,6 @@ cdef class ConnectMessage(Message): const char_type *redirect_data uint32_t flags2 = 0 uint8_t flags1 - bytes db_uuid if buf._current_packet.packet_type == TNS_PACKET_TYPE_REDIRECT: if not self.read_redirect_data_len: buf.read_uint16be(&self.redirect_data_len) From 9a9e4547634270102f35d728fe4d8eefab3db103 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 09:38:44 -0600 Subject: [PATCH 067/239] Doc updates. --- doc/src/user_guide/appendix_c.rst | 195 +++++++++++---------- doc/src/user_guide/connection_handling.rst | 7 +- doc/src/user_guide/sql_execution.rst | 19 +- doc/src/user_guide/troubleshooting.rst | 5 + 4 files changed, 119 insertions(+), 107 deletions(-) diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index 64deb980..70773042 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -95,7 +95,8 @@ from cx_Oracle: - ``encoding`` and ``nencoding``: The encodings in use are always UTF-8. - ``threaded``: Threaded Oracle Call Interface (OCI) is now always enabled - in Thick mode. This option is not relevant to the Thin mode. + in python-oracledb Thick mode. This option is not relevant to the Thin + mode. See :ref:`deprecations` for more information. @@ -115,11 +116,11 @@ The :ref:`Connection object ` differences between the python-oracledb and cx_Oracle drivers are: - The attribute :attr:`Connection.maxBytesPerCharacter` is deprecated. This - will return a constant value of 4 since encodings are always UTF-8. + will return a constant value of *4* since encodings are always UTF-8. - A new boolean attribute, :attr:`Connection.thin` is available. This - attribute is True if the connection was established in the Thin mode. In - Thick mode, the value of this attribute is False. + attribute is *True* if the connection was established in python-oracledb Thin + mode. In Thick mode, the value of this attribute is *False*. - The new method signature of :attr:`Connection.outputtypehandler` is ``handler(cursor, metadata)``. The old signature ``handler(cursor, name, @@ -190,9 +191,9 @@ The SessionPool object (which is an alias for the :ref:`ConnectionPool object of ``cx_Oracle.SessionPool``. - A new boolean attribute, ``SessionPool.thin`` (see - :attr:`ConnectionPool.thin`) is available. This attribute is True if the - connection was established in the Thin mode. In Thick mode, the value of - this attribute is False. + :attr:`ConnectionPool.thin`) is available. This attribute is *True* if the + connection was established in python-oracledb Thin mode. In Thick mode, the + value of this attribute is *False*. Cursor Object Differences from cx_Oracle ---------------------------------------- @@ -211,7 +212,7 @@ python-oracledb and cx_Oracle drivers are: - ``Cursor.executemanyprepared()``: This method was previously deprecated in cx_Oracle 6.4 and has been removed in python-oracledb. Instead, use - :meth:`Cursor.executemany()`, by passing None for the statement argument and + :meth:`Cursor.executemany()`, by passing *None* for the statement argument and an integer for the parameters argument. - ``Cursor.bindarraysize``: This attribute is desupported and removed in @@ -219,7 +220,7 @@ python-oracledb and cx_Oracle drivers are: - :attr:`Cursor.rowcount`: After :meth:`Cursor.execute()` or :meth:`Cursor.executemany()` with PL/SQL statements, ``Cursor.rowcount`` - will return 0. If the cursor or connection are not open, then the value -1 + will return *0*. If the cursor or connection are not open, then the value *-1* will be returned as required by the Python Database API. - :attr:`Cursor.description`: This attribute was previously a sequence of @@ -393,18 +394,18 @@ to python-oracledb: See :ref:`driverdiff`. -- The python-oracledb Thin and Thick modes have the same level of support for +- python-oracledb Thin and Thick modes have the same level of support for the `Python Database API specification `_ and can be used to connect to on-premises databases and Oracle Cloud - databases. However, the python-oracledb Thin mode does not support some of - the advanced Oracle Database features such as Application Continuity (AC), - Advanced Queuing (AQ), Continuous Query Notification (CQN), and Sharding. - See :ref:`Features Supported ` for details. + databases. However, python-oracledb Thin mode does not support some + advanced Oracle Database features such as Application Continuity (AC), + Continuous Query Notification (CQN), and Sharding. See :ref:`Features + Supported ` for details. - python-oracledb can be used in SQLAlchemy, Django, Pandas, Superset and other frameworks and Object-relational Mappers (ORMs). To use python-oracledb in - versions of these libraries that don't have native support for the new name, - you can override the use of cx_Oracle with a few lines of code. See + older versions of these libraries that do not have native support for the new + name, you can override the use of cx_Oracle with a few lines of code. See :ref:`frameworks`. - python-oracledb connection and pool creation calls require keyword arguments @@ -421,32 +422,8 @@ to python-oracledb: oracledb.connect("scott", pw, "localhost/orclpdb") -- The python-oracledb Thin mode ignores all NLS environment variables. It - also ignores the ``ORA_TZFILE`` environment variable. Thick mode does use - these variables. See :ref:`globalization` for alternatives. - -- To use a ``tnsnames.ora`` file in the python-oracledb Thin mode, you must - explicitly set the environment variable ``TNS_ADMIN`` to the directory - containing the file, or set :attr:`defaults.config_dir`, or set the - ``config_dir`` parameter when connecting. - - Only python-oracledb Thick mode will read :ref:`sqlnet.ora ` - files. The Thin mode lets equivalent properties be set in the application - when connecting. - - Configuration files in a "default" location such as the Instant Client - ``network/admin/`` subdirectory, in ``$ORACLE_HOME/network/admin/``, or in - ``$ORACLE_BASE/homes/XYZ/network/admin/`` (in a read-only Oracle Database - home) are not automatically loaded in Thin mode. Default locations are - only automatically searched in Thick mode. - -- To use the python-oracledb Thin mode in an ORACLE_HOME database installation - environment, you must use an explicit connection string since the - ``ORACLE_SID``, ``TWO_TASK``, and ``LOCAL`` environment variables are not - used. They are used in Thick mode. - -- This is a major release so some previously deprecated features are no longer - available. See :ref:`deprecations`. +- Some previously deprecated features are no longer available. See + :ref:`deprecations`. .. _commonupgrade: @@ -501,7 +478,7 @@ following steps: You **must** replace positional parameters with keyword parameters, unless only one parameter is being passed. Python-oracledb uses keyword parameters exclusively unless a DSN containing the user, password, and connect string - combined, for example ``un/pw@cs``, is used. This change makes the driver + combined, for example ``"un/pw@cs"``, is used. This change makes the driver compliant with the Python Database API specification `PEP 249 `_. @@ -545,8 +522,8 @@ following steps: :data:`~oracledb.POOL_GETMODE_NOWAIT`. The new default value improves the behavior for most applications. If the pool is in the middle of growing, the new value prevents transient connection creation errors from occurring - when using the Thin mode, or when using the Thick mode with recent Oracle - Client libraries. + when using python-oracledb Thin mode, or when using Thick mode with recent + Oracle Client libraries. If the old default value is required, modify any pool creation code to explicitly specify ``getmode=oracledb.POOL_GETMODE_NOWAIT``. @@ -557,36 +534,45 @@ following steps: as :data:`~oracledb.POOL_GETMODE_NOWAIT` and :data:`~oracledb.PURITY_SELF` are now preferred. The old namespaces still work. -7. The method signature of the :ref:`output type handler ` - which can be specified on a - :attr:`connection ` or on a - :attr:`cursor ` is ``handler(cursor, metadata)``. - The old signature ``handler(cursor, name, default_type, length, precision, - scale)`` was deprecated in python-oracledb 1.4 but will still work and will - be removed in a future version. - -8. VARCHAR2 and LOB columns that have the ``IS JSON`` constraint enabled are +7. VARCHAR2 and LOB columns that have the ``IS JSON`` constraint enabled are fetched by default as Python objects in python-oracledb. In cx_Oracle, - VARCHAR2 and LOB columns that contain JSON data are fetched by default as + VARCHAR2 and LOB columns that contain JSON data were fetched by default as strings and LOB objects respectively. See :ref:`fetchisjson`. -9. Review the following sections to see if your application requirements are - satisfied by the python-oracledb Thin mode: +8. Review :ref:`compatibility`. + + If your code base uses an older cx_Oracle version, review the previous + :ref:`release notes ` for additional changes to modernize + the code. + +9. Modernize code as needed or desired. + + For example, replace all usages of the deprecated Advanced Queuing API with + the new API originally introduced in cx_Oracle 7.2, see + :ref:`aqusermanual`. + + The method signature of the :ref:`output type handler ` + which can be specified on a :attr:`connection + ` or on a :attr:`cursor + ` is ``handler(cursor, metadata)``. The old + signature ``handler(cursor, name, default_type, length, precision, scale)`` + was deprecated in python-oracledb 1.4 but will still work and will be + removed in a future version. + + See :ref:`deprecations` for the list of all deprecations in python-oracledb. + +10. Review the following sections to see if your application requirements are + satisfied by python-oracledb Thin mode: - :ref:`featuresummary` - :ref:`driverdiff` - If your application requirements are not supported by the Thin mode, then - use the python-oracledb Thick mode. + If so, then follow :ref:`upgradethin`. -10. Review :ref:`compatibility`. + If your application requirements are not supported by python-oracledb Thin + mode, then use Thick mode, see :ref:`upgradethick`. - If your code base uses an older cx_Oracle version, review the previous - :ref:`release notes ` for additional changes to modernize - the code. - -11. Modernize code as needed or desired. See :ref:`deprecations` for the list - of deprecations in python-oracledb. +.. _upgradethin: Additional Upgrade Steps to use python-oracledb Thin Mode +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -595,7 +581,7 @@ To use python-oracledb Thin mode, the following changes need to be made in addition to the common :ref:`commonupgrade`: 1. Remove calls to :func:`~oracledb.init_oracle_client` since this turns on - the python-oracledb Thick mode. + python-oracledb Thick mode. 2. If the ``config_dir`` parameter of :func:`~oracledb.init_oracle_client` had been used, then set the new :attr:`defaults.config_dir` attribute to the @@ -628,19 +614,34 @@ addition to the common :ref:`commonupgrade`: See :ref:`otherinit`. -4. If the application is connecting using a :ref:`TNS Alias ` from - a ``tnsnames.ora`` file located in a "default" location such as the Instant - Client ``network/admin/`` subdirectory, in ``$ORACLE_HOME/network/admin/``, - or in ``$ORACLE_BASE/homes/XYZ/network/admin/`` (in a read-only Oracle - Database home), then the configuration file directory must now explicitly be - set as shown in Step 2. - -5. Remove calls to :func:`oracledb.clientversion()` which is only available in - the python-oracledb Thick mode. Oracle Client libraries are not available +4. Remove calls to :func:`oracledb.clientversion()` which is only available in + python-oracledb Thick mode. Oracle Client libraries are not available in Thin mode. -6. Ensure that any assumptions about when connections are created in the - connection pool are eliminated. The python-oracledb Thin mode creates +5. To connect using a :ref:`TNS Alias ` from a ``tnsnames.ora`` + file (see :ref:`optnetfiles`) in python-oracledb Thin mode, you should + explicitly set the environment variable ``TNS_ADMIN`` to the directory + containing the file, or set :attr:`defaults.config_dir`, or set the + ``config_dir`` parameter when connecting. + + A ``tnsnames.ora`` file in a "default" location such as the Instant Client + ``network/admin/`` subdirectory may not be automatically loaded in Thin mode + on some platforms. A ``tnsnames.ora`` file identified by the Windows + registry, or in ``$ORACLE_BASE/homes/XYZ/network/admin/`` (in a read-only + Oracle Database home) will never be automatically located by python-oracledb + Thin mode. + + Only python-oracledb Thick mode will read :ref:`sqlnet.ora ` and + :ref:`oraaccess.xml ` files. The Thin mode lets equivalent + properties be set in the application when connecting. + +6. To use python-oracledb Thin mode in an ORACLE_HOME database installation + environment, you must use an explicit connection string since the + ``ORACLE_SID``, ``TWO_TASK``, and ``LOCAL`` environment variables are not + used. They are used in Thick mode. + +7. Ensure that any assumptions about when connections are created in the + connection pool are eliminated. Python-oracledb Thin mode creates connections in a daemon thread and so the attribute :attr:`ConnectionPool.opened` will change over time and will not be equal to :attr:`ConnectionPool.min` immediately after the pool is created. Note @@ -651,9 +652,14 @@ addition to the common :ref:`commonupgrade`: :meth:`ConnectionPool.acquire()` until sufficient time has passed for connections in the pool to be created. -7. Review error handling improvements. See :ref:`errorhandling`. +8. Review error handling improvements. See :ref:`errorhandling`. -8. Review locale and globalization usage. See :ref:`globalization`. +9. Review locale and globalization usage. Python-oracledb Thin mode ignores + all NLS environment variables. It also ignores the ``ORA_TZFILE`` + environment variable. Thick mode does use these variables. See + :ref:`globalization`. + +.. _upgradethick: Additional Upgrade Steps to use python-oracledb Thick Mode ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -661,28 +667,25 @@ Additional Upgrade Steps to use python-oracledb Thick Mode To use python-oracledb Thick mode, the following changes need to be made in addition to the common :ref:`commonupgrade`: -1. The function :func:`oracledb.init_oracle_client()` *must* be called. It - can be called anywhere before the first call to :func:`~oracledb.connect()`, - ``oracledb.Connection()``, and ``oracledb.SessionPool()``. This enables the - Thick mode. See :ref:`enablingthick` for more details. +1. The function :func:`oracledb.init_oracle_client()` *must* be called to + enable python-oracle Thick mode. It can be called anywhere before the first + call to :func:`oracledb.connect()`, ``oracledb.Connection()``, or + ``oracledb.SessionPool()``. See :ref:`enablingthick` for more details. - The requirement to call ``init_oracle_client()`` means that Oracle Client - library loading is not automatically deferred until the driver is first - used, such as when a connection is opened. The application must explicitly - manage this if deferral is required. + The requirement to call :func:`~oracledb.init_oracle_client()` means that + Oracle Client library loading is not automatically deferred until the driver + is first used, such as when a connection is opened. The application must + explicitly manage this if deferral is required. - In python-oracledb, ``init_oracle_client()`` can be called multiple times in - a Python process as long as the arguments are the same. + In python-oracledb, :func:`~oracledb.init_oracle_client()` can be called + multiple times in a Python process as long as the arguments are the same. Note that on Linux and related operating systems, the - ``init_oracle_client()`` parameter ``lib_dir`` should not be + :func:`~oracledb.init_oracle_client()` parameter ``lib_dir`` should not be passed. Instead, set the system library search path with ``ldconfig`` or ``LD_LIBRARY_PATH`` prior to running Python. -2. Replace all usages of the deprecated Advanced Queuing API with the new API - originally introduced in cx_Oracle 7.2, see :ref:`aqusermanual`. - -3. Review error handling improvements. See :ref:`errorhandling`. +2. Review error handling improvements. See :ref:`errorhandling`. Code to Aid the Upgrade to python-oracledb ------------------------------------------ @@ -725,7 +728,7 @@ You can then choose what mode is in use by setting the environment variable export ORA_PYTHON_DRIVER_TYPE=thin python test.py -Output shows the python-oracledb Thin mode was used:: +Output shows that python-oracledb Thin mode was used:: python-oracledb thn : 3.0.0 diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index d6cb61bd..24876705 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -4869,10 +4869,9 @@ encryption. A database username and password are still required for your application connections. If you need to create a new database schema so you do not login as the privileged ADMIN user, refer to the relevant Oracle Cloud documentation, -for example see `Create Database Users -`__ in the Oracle -Autonomous Database manual. +for example see `Create Database Users `__ in the +Oracle Autonomous Database manual. .. _onewaytls: diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index b09b5ee9..565c7132 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -1376,13 +1376,18 @@ then: :ref:`fetching-raw-data`. Note that the encoding used for all character data in python-oracledb is "UTF-8". -* Check for corrupt data in the database. - -If data really is corrupt, you can pass options to the internal `decode() -`__ used by -python-oracledb to allow it to be selected and prevent the whole query failing. -Do this by creating an :ref:`outputtypehandler ` and -setting ``encoding_errors``. For example, to replace corrupt characters in +* Check for corrupt data in the database and fix it. For example, if you have + a table MYTABLE with a character column MYVALUE that you suspect has some + corrupt values, then you may be able to identify the problem data by using a + query like ``select id from mytable where + utl_i18n.validate_character_encoding(myvalue) > 0`` which will print out the + keys of the rows with invalid data. + +If corrupt data cannot be modified, you can pass options to the internal +`decode() `__ +used by python-oracledb to allow it to be selected and prevent the whole query +failing. Do this by creating an :ref:`outputtypehandler ` +and setting ``encoding_errors``. For example, to replace corrupt characters in character columns: .. code-block:: python diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index 028d6fd1..3093c9de 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -195,6 +195,11 @@ Client library could not be loaded. both 32-bit. The ``DPI-1047`` message will tell you whether the 64-bit or 32-bit Oracle Client is needed for your Python. +- If you are using Oracle Instant Client libraries (and not using a full + Oracle Database installation or a full Oracle Instant Client installation + (such as installed by Oracle's GUI installer)), check whether the + ``ORACLE_HOME`` environment variable is set. If it is, unset this variable. + - Set the environment variable ``DPI_DEBUG_LEVEL`` to 64 and restart python-oracledb. The trace messages will show how and where python-oracledb is looking for the Oracle Client libraries. From 15dff27547356bcd7007e64574f35cda26600dee Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 09:43:02 -0600 Subject: [PATCH 068/239] Added support for the Cython 3.1 release (#493). --- doc/src/release_notes.rst | 2 ++ src/oracledb/interchange/column.py | 47 ++++++++++++------------------ 2 files changed, 21 insertions(+), 28 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 1ee41da1..ff8348d9 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,6 +30,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added support for using the Cython 3.1 release + (`issue 493 `__). #) Miscellaneous grammar and spelling fixes by John Bampton (`PR 479 `__). diff --git a/src/oracledb/interchange/column.py b/src/oracledb/interchange/column.py index 8701b7b4..c44873dc 100644 --- a/src/oracledb/interchange/column.py +++ b/src/oracledb/interchange/column.py @@ -41,19 +41,8 @@ ) from .nanoarrow_bridge import ( - NANOARROW_TIME_UNIT_SECOND, - NANOARROW_TIME_UNIT_MILLI, - NANOARROW_TIME_UNIT_MICRO, - NANOARROW_TIME_UNIT_NANO, - NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_DOUBLE, - NANOARROW_TYPE_FLOAT, - NANOARROW_TYPE_INT64, - NANOARROW_TYPE_LARGE_BINARY, - NANOARROW_TYPE_LARGE_STRING, - NANOARROW_TYPE_STRING, - NANOARROW_TYPE_TIMESTAMP, - NANOARROW_TYPE_DECIMAL128, + ArrowTimeUnit, + ArrowType, ) @@ -92,8 +81,8 @@ def _offsets_buffer(self): size_in_bytes=size_bytes, address=address, buffer_type="offsets" ) if self.ora_arrow_array.arrow_type in ( - NANOARROW_TYPE_LARGE_STRING, - NANOARROW_TYPE_LARGE_BINARY, + ArrowType.NANOARROW_TYPE_LARGE_STRING, + ArrowType.NANOARROW_TYPE_LARGE_BINARY, ): dtype = (DtypeKind.INT, 64, "l", "=") else: @@ -133,24 +122,26 @@ def dtype(self) -> Dtype: Returns the data type of the column. The returned dtype provides information on the storage format and the type of data in the column. """ - if self.ora_arrow_array.arrow_type == NANOARROW_TYPE_INT64: + arrow_type = self.ora_arrow_array.arrow_type + if arrow_type == ArrowType.NANOARROW_TYPE_INT64: return (DtypeKind.INT, 64, "l", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_DOUBLE: + elif arrow_type == ArrowType.NANOARROW_TYPE_DOUBLE: return (DtypeKind.FLOAT, 64, "g", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_FLOAT: + elif arrow_type == ArrowType.NANOARROW_TYPE_FLOAT: return (DtypeKind.FLOAT, 64, "g", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_STRING: + elif arrow_type == ArrowType.NANOARROW_TYPE_STRING: return (DtypeKind.STRING, 8, "u", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_TIMESTAMP: - if self.ora_arrow_array.time_unit == NANOARROW_TIME_UNIT_MICRO: + elif arrow_type == ArrowType.NANOARROW_TYPE_TIMESTAMP: + time_unit = self.ora_arrow_array.time_unit + if time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_MICRO: return (DtypeKind.DATETIME, 64, "tsu:", "=") - elif self.ora_arrow_array.time_unit == NANOARROW_TIME_UNIT_SECOND: + elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_SECOND: return (DtypeKind.DATETIME, 64, "tss:", "=") - elif self.ora_arrow_array.time_unit == NANOARROW_TIME_UNIT_MILLI: + elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_MILLI: return (DtypeKind.DATETIME, 64, "tsm:", "=") - elif self.ora_arrow_array.time_unit == NANOARROW_TIME_UNIT_NANO: + elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_NANO: return (DtypeKind.DATETIME, 64, "tsn:", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_DECIMAL128: + elif arrow_type == ArrowType.NANOARROW_TYPE_DECIMAL128: array = self.ora_arrow_array return ( DtypeKind.DECIMAL, @@ -158,11 +149,11 @@ def dtype(self) -> Dtype: f"d:{array.precision}.{array.scale}", "=", ) - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_BINARY: + elif arrow_type == ArrowType.NANOARROW_TYPE_BINARY: return (DtypeKind.STRING, 8, "z", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_LARGE_BINARY: + elif arrow_type == ArrowType.NANOARROW_TYPE_LARGE_BINARY: return (DtypeKind.STRING, 8, "Z", "=") - elif self.ora_arrow_array.arrow_type == NANOARROW_TYPE_LARGE_STRING: + elif arrow_type == ArrowType.NANOARROW_TYPE_LARGE_STRING: return (DtypeKind.STRING, 8, "U", "=") def get_buffers(self) -> ColumnBuffers: From 1b5a4cb8d7e384ddf9014df5f56f3924e40876c8 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 09:45:39 -0600 Subject: [PATCH 069/239] Fixed a bug resulting in a segfault when attempting to use an output type handler while fetching data frames (#486). --- doc/src/release_notes.rst | 5 +++++ src/oracledb/impl/base/cursor.pyx | 6 +++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ff8348d9..f24fe4fd 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,6 +30,11 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Fixed a bug resulting in a segfault when attempting to use an + :ref:`output type handler ` while fetching data frames + with :meth:`Connection.fetch_df_all()` and + :meth:`Connection.fetch_df_batches()` + (`issue 486 `__). #) Added support for using the Cython 3.1 release (`issue 493 `__). #) Miscellaneous grammar and spelling fixes by John Bampton diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index 3a78fa40..5d6b11d5 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -301,11 +301,15 @@ cdef class BaseCursorImpl: """ Return the output type handler to use for the cursor. If one is not directly defined on the cursor then the one defined on the connection - is used instead. + is used instead. When fetching Arrow data, however, no output type + handlers are used since for most data no conversion to Python objects + ever takes place. """ cdef: BaseConnImpl conn_impl object type_handler + if self.fetching_arrow: + return None if self.outputtypehandler is not None: type_handler = self.outputtypehandler else: From 46cc1fec6f0187e80dcae0ea45a9faa1ce06fd19 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 09:46:33 -0600 Subject: [PATCH 070/239] Fix occasional "ORA-38810: cannot drop edition that has a parent". --- samples/sql/drop_schema.sql | 6 ++++-- tests/sql/drop_schema.sql | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/samples/sql/drop_schema.sql b/samples/sql/drop_schema.sql index 82c6c949..62e11959 100644 --- a/samples/sql/drop_schema.sql +++ b/samples/sql/drop_schema.sql @@ -1,5 +1,5 @@ /*----------------------------------------------------------------------------- - * Copyright 2017, 2022, Oracle and/or its affiliates. + * Copyright 2017, 2025, Oracle and/or its affiliates. * * This software is dual-licensed to you under the Universal Permissive License * (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -43,7 +43,9 @@ begin for r in ( select edition_name from dba_editions - where edition_name in (upper('&edition_name')) + start with edition_name = upper('&edition_name') + connect by prior edition_name = parent_edition_name + order by level desc ) loop execute immediate 'drop edition ' || r.edition_name || ' cascade'; end loop; diff --git a/tests/sql/drop_schema.sql b/tests/sql/drop_schema.sql index a1b100f3..8bfad5c0 100644 --- a/tests/sql/drop_schema.sql +++ b/tests/sql/drop_schema.sql @@ -1,5 +1,5 @@ /*----------------------------------------------------------------------------- - * Copyright (c) 2020, 2022, Oracle and/or its affiliates. + * Copyright (c) 2020, 2025, Oracle and/or its affiliates. * * This software is dual-licensed to you under the Universal Permissive License * (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -43,7 +43,9 @@ begin for r in ( select edition_name from dba_editions - where edition_name in (upper('&edition_name')) + start with edition_name = upper('&edition_name') + connect by prior edition_name = parent_edition_name + order by level desc ) loop execute immediate 'drop edition ' || r.edition_name || ' cascade'; end loop; From a740a58d62a6e160e15fd9f7eb68deac4003b34e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 16:15:49 -0600 Subject: [PATCH 071/239] Put all methods in alphabetical order. --- src/oracledb/interchange/nanoarrow_bridge.pyx | 36 +++++++++---------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index dd931d8c..d266cba7 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -39,6 +39,10 @@ cdef extern from "nanoarrow/nanoarrow.c": ctypedef int ArrowErrorCode + cdef struct ArrowBuffer: + uint8_t *data + int64_t size_bytes + cdef union ArrowBufferViewData: const void* data @@ -49,10 +53,6 @@ cdef extern from "nanoarrow/nanoarrow.c": cdef struct ArrowArrayView: ArrowBufferView *buffer_views - cdef struct ArrowBuffer: - uint8_t *data - int64_t size_bytes - cdef struct ArrowDecimal: pass @@ -65,21 +65,19 @@ cdef extern from "nanoarrow/nanoarrow.c": cdef ArrowErrorCode NANOARROW_OK - void ArrowArrayRelease(ArrowArray *array) - void ArrowSchemaRelease(ArrowSchema *schema) - - ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, - ArrowType storage_type) ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* array, ArrowBufferView value) - ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) - ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) - ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, const ArrowDecimal* value) + ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) + ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) + ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, ArrowError* error) + ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, + ArrowType storage_type) + void ArrowArrayRelease(ArrowArray *array) ArrowErrorCode ArrowArrayReserve(ArrowArray* array, int64_t additional_size_elements) ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) @@ -90,8 +88,15 @@ cdef extern from "nanoarrow/nanoarrow.c": const ArrowArray* array, ArrowError* error) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) + void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, + int32_t precision, int32_t scale) + void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) + ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, + ArrowStringView value) void ArrowSchemaInit(ArrowSchema* schema) ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) + void ArrowSchemaRelease(ArrowSchema *schema) + ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) ArrowErrorCode ArrowSchemaSetTypeDateTime(ArrowSchema* schema, ArrowType arrow_type, ArrowTimeUnit time_unit, @@ -100,15 +105,8 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowType type, int32_t decimal_precision, int32_t decimal_scale) - ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) int64_t ArrowSchemaToString(const ArrowSchema* schema, char* out, int64_t n, char recursive) - void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, - int32_t precision, int32_t scale) - void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) - ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, - ArrowStringView value) - cdef int _check_nanoarrow(int code) except -1: """ From 6466c42a63e4bc3f31e89e46534250dfcd987ec0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 16:16:11 -0600 Subject: [PATCH 072/239] Fixed memory corruption in data frame queries (#489) and added support for converting an OracleDataFrame object to a foreign data frame object multiple times (#470). --- doc/src/release_notes.rst | 5 + src/oracledb/interchange/nanoarrow_bridge.pxd | 5 +- src/oracledb/interchange/nanoarrow_bridge.pyx | 126 ++++++++++++++---- tests/test_8000_dataframe.py | 11 +- tests/test_8100_dataframe_async.py | 11 +- 5 files changed, 118 insertions(+), 40 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index f24fe4fd..3f5939cf 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -39,6 +39,11 @@ Common Changes (`issue 493 `__). #) Miscellaneous grammar and spelling fixes by John Bampton (`PR 479 `__). +#) Fixed memory corruption in DataFrame queries + (`issue 489 `__). +#) Added support for converting an OracleDataFrame object to a foreign data + frame object more than once + (`issue 470 `__) oracledb 3.1.0 (April 2025) diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index 479fa7d0..5c413f22 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -41,6 +41,9 @@ cdef extern from "nanoarrow.h": int64_t null_count int64_t offset int64_t n_buffers + int64_t n_children + ArrowArray** children + const void** buffers void (*release)(ArrowArray*) cdef struct ArrowSchema: @@ -57,6 +60,7 @@ cdef extern from "nanoarrow.h": NANOARROW_TYPE_LARGE_STRING NANOARROW_TYPE_STRING NANOARROW_TYPE_TIMESTAMP + NANOARROW_TYPE_UNINITIALIZED cpdef enum ArrowTimeUnit: NANOARROW_TIME_UNIT_SECOND @@ -87,7 +91,6 @@ cdef class OracleArrowArray: double factor ArrowArray *arrow_array ArrowSchema *arrow_schema - void (*actual_array_release)(ArrowArray*) noexcept cdef str _schema_to_string(self) cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index d266cba7..34fc6c0d 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -31,7 +31,6 @@ cimport cpython from libc.stdint cimport uintptr_t from libc.string cimport memcpy, strlen, strchr -from cpython.pycapsule cimport PyCapsule_New from .. import errors @@ -39,9 +38,15 @@ cdef extern from "nanoarrow/nanoarrow.c": ctypedef int ArrowErrorCode + ctypedef void (*ArrowBufferDeallocatorCallback) + + cdef struct ArrowBufferAllocator: + void *private_data + cdef struct ArrowBuffer: uint8_t *data int64_t size_bytes + ArrowBufferAllocator allocator cdef union ArrowBufferViewData: const void* data @@ -65,6 +70,8 @@ cdef extern from "nanoarrow/nanoarrow.c": cdef ArrowErrorCode NANOARROW_OK + ArrowErrorCode ArrowArrayAllocateChildren(ArrowArray *array, + int64_t n_children) ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* array, ArrowBufferView value) ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, @@ -88,11 +95,15 @@ cdef extern from "nanoarrow/nanoarrow.c": const ArrowArray* array, ArrowError* error) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) + ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, + void *private_data) void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, int32_t precision, int32_t scale) void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, ArrowStringView value) + ArrowErrorCode ArrowSchemaDeepCopy(const ArrowSchema *schema, + ArrowSchema *schema_out) void ArrowSchemaInit(ArrowSchema* schema) ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) void ArrowSchemaRelease(ArrowSchema *schema) @@ -117,22 +128,13 @@ cdef int _check_nanoarrow(int code) except -1: errors._raise_err(errors.ERR_ARROW_C_API_ERROR, code=code) -cdef void array_deleter(ArrowArray *array) noexcept: - """ - Called when an external library calls the release for an Arrow array. This - method simply marks the release as completed but doesn't actually do it, so - that the handling of duplicate rows can still make use of the array, even - if the external library no longer requires it! - """ - array.release = NULL - - cdef void pycapsule_array_deleter(object array_capsule) noexcept: cdef ArrowArray* array = cpython.PyCapsule_GetPointer( array_capsule, "arrow_array" ) if array.release != NULL: ArrowArrayRelease(array) + cpython.PyMem_Free(array) cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: @@ -141,6 +143,65 @@ cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: ) if schema.release != NULL: ArrowSchemaRelease(schema) + cpython.PyMem_Free(schema) + + +cdef void arrow_buffer_dealloc_callback(ArrowBufferAllocator *allocator, + uint8_t *ptr, int64_t size): + """ + ArrowBufferDeallocatorCallback for an ArrowBuffer borrowed from + OracleArrowArray + """ + cpython.Py_DECREF( allocator.private_data) + + +cdef int copy_arrow_array(OracleArrowArray oracle_arrow_array, + ArrowArray *src, ArrowArray *dest) except -1: + """ + Shallow copy source ArrowArray to destination ArrowArray. The source + ArrowArray belongs to the wrapper OracleArrowArray. The shallow copy idea + is borrowed from nanoarrow: + https://github.com/apache/arrow-nanoarrow/main/blob/python + """ + cdef: + ArrowBuffer *dest_buffer + ssize_t i + _check_nanoarrow( + ArrowArrayInitFromType( + dest, NANOARROW_TYPE_UNINITIALIZED + ) + ) + + # Copy metadata + dest.length = src.length + dest.offset = src.offset + dest.null_count = src.null_count + + # Borrow an ArrowBuffer belonging to OracleArrowArray. The ArrowBuffer can + # belong to an immediate ArrowArray or a child (in case of nested types). + # Either way, we PY_INCREF(oracle_arrow_array), so that it is not + # prematurely garbage collected. The corresponding PY_DECREF happens in the + # ArrowBufferDeAllocator callback. + for i in range(src.n_buffers): + if src.buffers[i] != NULL: + dest_buffer = ArrowArrayBuffer(dest, i) + dest_buffer.data = src.buffers[i] + dest_buffer.size_bytes = 0 + dest_buffer.allocator = ArrowBufferDeallocator( + arrow_buffer_dealloc_callback, + oracle_arrow_array + ) + cpython.Py_INCREF(oracle_arrow_array) + dest.buffers[i] = src.buffers[i] + dest.n_buffers = src.n_buffers + + # shallow copy of children (recursive call) + if src.n_children > 0: + _check_nanoarrow(ArrowArrayAllocateChildren(dest, src.n_children)) + for i in range(src.n_children): + copy_arrow_array( + oracle_arrow_array, src.children[i], dest.children[i] + ) cdef class OracleArrowArray: @@ -187,8 +248,6 @@ cdef class OracleArrowArray: def __dealloc__(self): if self.arrow_array != NULL: - if self.arrow_array.release == NULL: - self.arrow_array.release = self.actual_array_release if self.arrow_array.release != NULL: ArrowArrayRelease(self.arrow_array) cpython.PyMem_Free(self.arrow_array) @@ -409,6 +468,26 @@ cdef class OracleArrowArray: def offset(self) -> int: return self.arrow_array.offset + def __arrow_c_schema__(self): + """ + Export an ArrowSchema PyCapsule + """ + cdef ArrowSchema *exported_schema = \ + cpython.PyMem_Malloc(sizeof(ArrowSchema)) + try: + _check_nanoarrow( + ArrowSchemaDeepCopy( + self.arrow_schema, + exported_schema + ) + ) + except: + cpython.PyMem_Free(exported_schema) + raise + return cpython.PyCapsule_New( + exported_schema, 'arrow_schema', &pycapsule_schema_deleter + ) + def __arrow_c_array__(self, requested_schema=None): """ Returns @@ -419,13 +498,14 @@ cdef class OracleArrowArray: """ if requested_schema is not None: raise NotImplementedError("requested_schema") - - array_capsule = PyCapsule_New( - self.arrow_array, 'arrow_array', &pycapsule_array_deleter - ) - self.actual_array_release = self.arrow_array.release - self.arrow_array.release = array_deleter - schema_capsule = PyCapsule_New( - self.arrow_schema, "arrow_schema", &pycapsule_schema_deleter - ) - return schema_capsule, array_capsule + cdef ArrowArray *exported_array = \ + cpython.PyMem_Malloc(sizeof(ArrowArray)) + try: + copy_arrow_array(self, self.arrow_array, exported_array) + array_capsule = cpython.PyCapsule_New( + exported_array, 'arrow_array', &pycapsule_array_deleter + ) + except: + cpython.PyMem_Free(exported_array) + raise + return self.__arrow_c_schema__(), array_capsule diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index d287b249..3abb84ec 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -409,18 +409,13 @@ def test_8009(self): self.__test_df_batches_interop(DATASET_4, batch_size=5, num_batches=2) def test_8010(self): - "8010 - verify passing Arrow arrays twice fails" + "8010 - verify passing Arrow arrays twice works" self.__check_interop() self.__populate_table(DATASET_1) statement = "select * from TestDataFrame order by Id" ora_df = self.conn.fetch_df_all(statement) - pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - with self.assertRaises(pyarrow.lib.ArrowInvalid): - pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) + self.__validate_df(ora_df, DATASET_1) + self.__validate_df(ora_df, DATASET_1) def test_8011(self): "8011 - verify empty data set" diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 5cebcbd0..5d9c3de1 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -420,18 +420,13 @@ async def test_8109(self): ) async def test_8110(self): - "8110 - verify passing Arrow arrays twice fails" + "8110 - verify passing Arrow arrays twice works" self.__check_interop() await self.__populate_table(DATASET_1) statement = "select * from TestDataFrame order by Id" ora_df = await self.conn.fetch_df_all(statement) - pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - with self.assertRaises(pyarrow.lib.ArrowInvalid): - pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) + self.__validate_df(ora_df, DATASET_1) + self.__validate_df(ora_df, DATASET_1) async def test_8111(self): "8111 - verify empty data set" From 13374084199a110206d4c4f83df32ab05fb9d252 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 12 May 2025 16:16:37 -0600 Subject: [PATCH 073/239] Fixed parsing of the connect string when using the Azure Config Store Provider. --- doc/src/release_notes.rst | 3 +++ src/oracledb/plugins/azure_config_provider.py | 7 ++++--- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 3f5939cf..7742778d 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -39,6 +39,9 @@ Common Changes (`issue 493 `__). #) Miscellaneous grammar and spelling fixes by John Bampton (`PR 479 `__). +#) Fixed parsing of the connection string in the + :ref:`Azure App Centralized Configuration Provider + `. #) Fixed memory corruption in DataFrame queries (`issue 489 `__). #) Added support for converting an OracleDataFrame object to a foreign data diff --git a/src/oracledb/plugins/azure_config_provider.py b/src/oracledb/plugins/azure_config_provider.py index c7cb7ca2..52719f85 100644 --- a/src/oracledb/plugins/azure_config_provider.py +++ b/src/oracledb/plugins/azure_config_provider.py @@ -188,9 +188,10 @@ def _parse_parameters(protocol_arg: str) -> dict: parameters = { key.lower(): value[0] for key, value in parsed_values.items() } - parameters["appconfigname"] = ( - protocol_arg[:pos].rstrip("/").rstrip(".azconfig.io") + ".azconfig.io" - ) + config_name = protocol_arg[:pos].rstrip("/") + if not config_name.endswith(".azconfig.io"): + config_name += ".azconfig.io" + parameters["appconfigname"] = config_name return parameters From 52035722c3df03f5ffc4f538a9ed47a29705f632 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 15 May 2025 13:44:36 -0600 Subject: [PATCH 074/239] Adjust tests for bug fix in newer databases. --- tests/test_7700_sparse_vector.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/test_7700_sparse_vector.py b/tests/test_7700_sparse_vector.py index 9f008713..56cd8a12 100644 --- a/tests/test_7700_sparse_vector.py +++ b/tests/test_7700_sparse_vector.py @@ -222,7 +222,7 @@ def test_7709(self): value = oracledb.SparseVector( 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") + self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") self.__test_insert_and_fetch_sparse( value, "SparseVectorFlexAllCol", "d" ) @@ -357,7 +357,7 @@ def test_7715(self): value = oracledb.SparseVector( 16, [1, 3, 5], array.array("b", [1, 0, 5]) ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") + self.__test_insert_and_fetch(value, "VectorFlexAllCol", "b") self.__test_insert_and_fetch_sparse( value, "SparseVectorFlexAllCol", "b" ) @@ -442,7 +442,7 @@ def test_7722(self): dim, [1, 3, 5], array.array(typ, [element_value] * 3) ) self.__test_insert_and_fetch( - value, "VectorFlexAllCol", "f" + value, "VectorFlexAllCol", typ ) self.__test_insert_and_fetch_sparse( value, "SparseVectorFlexAllCol", typ @@ -682,9 +682,9 @@ def test_7734(self): self.assertEqual(value.values, array.array("d")) self.assertEqual(value.indices, array.array("I")) self.assertEqual(value.num_dimensions, 0) - with self.assertRaisesFullCode("ORA-51803", "ORA-21560"): + with self.assertRaisesFullCode("ORA-51803", "ORA-21560", "ORA-51862"): self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - with self.assertRaisesFullCode("ORA-51803", "ORA-21560"): + with self.assertRaisesFullCode("ORA-51803", "ORA-21560", "ORA-51862"): self.__test_insert_and_fetch_sparse( value, "SparseVectorFlexAllCol", "d" ) From 8c8c7bca9489333e31670cdb942c26b5b155f882 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 15 May 2025 13:45:19 -0600 Subject: [PATCH 075/239] Preparing to release python-oracledb 3.1.1. --- doc/src/release_notes.rst | 47 ++++++++++++++++++++++++++------------- 1 file changed, 32 insertions(+), 15 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 7742778d..0fc47bd6 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -20,9 +20,6 @@ Thin Mode Changes #) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many times as needed. -#) Fixed bug with some databases when a connection is killed. In some - scenarios :meth:`Connection.is_healthy()` would have incorrectly returned - the value *True* and in other cases a possible hang could occur. Thick Mode Changes ++++++++++++++++++ @@ -30,23 +27,43 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Fixed a bug resulting in a segfault when attempting to use an - :ref:`output type handler ` while fetching data frames - with :meth:`Connection.fetch_df_all()` and - :meth:`Connection.fetch_df_batches()` - (`issue 486 `__). +#) Improved the test suite and documentation. + + +oracledb 3.1.1 (May 2025) +------------------------- + +Thin Mode Changes ++++++++++++++++++ + +#) Fixed bug with :meth:`Connection.is_healthy()` after a session is killed, + such as by a DBA running ALTER SYSTEM KILL SESSION. Previously, in some + databases, it could incorrectly return *True*, while in other cases it + could hang. + +Common Changes +++++++++++++++ + #) Added support for using the Cython 3.1 release (`issue 493 `__). -#) Miscellaneous grammar and spelling fixes by John Bampton - (`PR 479 `__). +#) Improvements to data frame fetching with :meth:`Connection.fetch_df_all()` + and :meth:`Connection.fetch_df_batches()`: + + - Added support for converting an :ref:`OracleDataFrame + ` object to a foreign data frame object more than + once + (`issue 470 `__). + - Fixed a bug resulting in a segfault when attempting to use an + :ref:`output type handler ` while fetching data frames + (`issue 486 `__). + - Fixed memory corruption in data frame queries + (`issue 489 `__). + #) Fixed parsing of the connection string in the :ref:`Azure App Centralized Configuration Provider `. -#) Fixed memory corruption in DataFrame queries - (`issue 489 `__). -#) Added support for converting an OracleDataFrame object to a foreign data - frame object more than once - (`issue 470 `__) +#) Miscellaneous grammar and spelling fixes by John Bampton + (`PR 479 `__). oracledb 3.1.0 (April 2025) From a30186e8fba1617109aaf9a0099934be7ab44b3b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 16 May 2025 10:34:46 -0600 Subject: [PATCH 076/239] Refactor: simplify code for destroying the database object type cache. --- src/oracledb/impl/thin/connection.pyx | 13 ++++++++++--- src/oracledb/impl/thin/protocol.pyx | 8 ++------ 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index a92f8f85..f8859388 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -92,6 +92,15 @@ cdef class BaseThinConnImpl(BaseConnImpl): state=state) self._transaction_context = None + cdef int _clear_dbobject_type_cache(self) except -1: + """ + """ + cdef int cache_num + if self._dbobject_type_cache_num > 0: + cache_num = self._dbobject_type_cache_num + self._dbobject_type_cache_num = 0 + remove_dbobject_type_cache(cache_num) + cdef BaseThinLobImpl _create_lob_impl(self, DbType dbtype, bytes locator=None): """ @@ -167,9 +176,7 @@ cdef class BaseThinConnImpl(BaseConnImpl): cdef int _force_close(self) except -1: self._pool = None - if self._dbobject_type_cache_num > 0: - remove_dbobject_type_cache(self._dbobject_type_cache_num) - self._dbobject_type_cache_num = 0 + self._clear_dbobject_type_cache() self._protocol._force_close() cdef Statement _get_statement(self, str sql = None, diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index 1dfbbb11..15e9326d 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -195,9 +195,7 @@ cdef class Protocol(BaseProtocol): # otherwise, destroy the database object type cache, send the # logoff message and final close packet - if conn_impl._dbobject_type_cache_num > 0: - remove_dbobject_type_cache(conn_impl._dbobject_type_cache_num) - conn_impl._dbobject_type_cache_num = 0 + conn_impl._clear_dbobject_type_cache() if self._transport is not None: if not conn_impl._drcp_enabled: message = conn_impl._create_message(LogoffMessage) @@ -555,9 +553,7 @@ cdef class BaseAsyncProtocol(BaseProtocol): # otherwise, destroy the database object type cache, send the # logoff message and final close packet - if conn_impl._dbobject_type_cache_num > 0: - remove_dbobject_type_cache(conn_impl._dbobject_type_cache_num) - conn_impl._dbobject_type_cache_num = 0 + conn_impl._clear_dbobject_type_cache() if self._transport is not None: if not conn_impl._drcp_enabled: message = conn_impl._create_message(LogoffMessage) From 39ce333b882acafb449d9166bbe88d67722fba87 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 16 May 2025 10:35:29 -0600 Subject: [PATCH 077/239] Doc and collateral updates. --- README.md | 46 ++++++++++++--- doc/src/user_guide/appendix_c.rst | 68 +++++++++++++--------- doc/src/user_guide/connection_handling.rst | 4 +- doc/src/user_guide/installation.rst | 17 +++--- doc/src/user_guide/sql_execution.rst | 5 +- samples/async_gather.py | 36 ++++++++---- samples/connection_pool.py | 58 ++++++++++-------- 7 files changed, 150 insertions(+), 84 deletions(-) diff --git a/README.md b/README.md index 7f0483bb..76c03c0f 100644 --- a/README.md +++ b/README.md @@ -1,8 +1,11 @@ # python-oracledb -python-oracledb is a [Python programming language][python] extension module +Python-oracledb is a [Python programming language][python] extension module allowing Python programs to connect to [Oracle Database][oracledb]. -Python-oracledb is the new name for Oracle's popular cx_Oracle driver. +Python-oracledb is the new name for the obsolete cx_Oracle driver. + +Python-oracledb uses the same Python DB API as cx_Oracle, and has many new +features. The module conforms to the [Python Database API 2.0 specification][pep249] with a considerable number of additions and a couple of minor exclusions, see the @@ -10,11 +13,41 @@ a considerable number of additions and a couple of minor exclusions, see the Synchronous and [concurrent][concurrent] coding styles are supported. +Python-oracledb is available under an open source license, see below. + ## Installation -Run `python -m pip install oracledb` +Run: + +``` +python -m pip install oracledb --upgrade +``` + +See [python-oracledb Installation][installation] for details. + +## Samples + +Examples can be found in the [/samples][samples] directory and the +[Python and Oracle Database Tutorial][tutorial]. -See [python-oracledb Installation][installation]. +A basic example: + +``` +import oracledb +import getpass + +un = "scott" +cs = "localhost/orclpdb" +# cs = "localhost/freepdb1" # for Oracle Database Free users +# cs = "localhost/orclpdb1" # some databases may have this service +pw = getpass.getpass(f"Enter password for {un}@{cs}: ") + +with oracledb.connect(user=un, password=pw, dsn=cs) as connection: + with connection.cursor() as cursor: + sql = "select sysdate from dual" + for r in cursor.execute(sql): + print(r) +``` ## Dependencies and Interoperability @@ -52,11 +85,6 @@ See [python-oracledb Installation][installation]. See the [python-oracledb Documentation][documentation] and [Release Notes][relnotes]. -## Samples - -Examples can be found in the [/samples][samples] directory and the -[Python and Oracle Database Tutorial][tutorial]. - ## Help Questions can be asked in [GitHub Discussions][ghdiscussions]. diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index 70773042..dcd3d4b2 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -32,7 +32,7 @@ Specification. cx_Oracle always runs in a Thick mode using Oracle Client libraries. The features in python-oracledb Thick mode and cx_Oracle 8.3 are the same, subject to the :ref:`new features `, some :ref:`deprecations -`, and to other changes noted in this section. +`, and to other changes noted in the documentation. Oracle Client Library Loading Differences from cx_Oracle -------------------------------------------------------- @@ -187,8 +187,9 @@ SessionPool Object Differences The SessionPool object (which is an alias for the :ref:`ConnectionPool object `) differences between the python-oracledb and cx_Oracle drivers are: -- A Python type() will show the class as ``oracledb.ConnectionPool`` instead - of ``cx_Oracle.SessionPool``. +- A Python `type() `__ + will show the class as ``oracledb.ConnectionPool`` instead of + ``cx_Oracle.SessionPool``. - A new boolean attribute, ``SessionPool.thin`` (see :attr:`ConnectionPool.thin`) is available. This attribute is *True* if the @@ -374,9 +375,6 @@ Example error messages are: Upgrading from cx_Oracle 8.3 to python-oracledb =============================================== -This section provides the detailed steps needed to upgrade from the obsolete -cx_Oracle driver to python-oracledb. - Things to Know Before the Upgrade --------------------------------- @@ -384,23 +382,39 @@ Below is a list of some useful things to know before upgrading from cx_Oracle to python-oracledb: - You can have both cx_Oracle and python-oracledb installed, and can use both - in the same application. + in the same application. Install python-oracledb like:: + + python -m pip install oracledb --upgrade + + See :ref:`installation` for details. + +- By default, python-oracledb runs in a 'Thin' mode which connects directly to + Oracle Database. This mode does not need Oracle Client libraries to be + installed. However, some additional functionality is available when + python-oracledb uses them. Python-oracledb is said to be in 'Thick' mode + when Oracle Client libraries are used. The Thick mode is equivalent to + cx_Oracle. -- If you only want to use the python-oracledb driver in Thin mode, then you do - not need Oracle Client libraries such as from Oracle Instant Client. You - only need to :ref:`install ` the driver itself:: +- python-oracledb Thin and Thick modes have the same level of support for the + `Python Database API specification `_ and + can be used to connect to on-premises databases and Oracle Cloud + databases. See :ref:`driverdiff`. - python -m pip install oracledb + Examples can be found in the `GitHub samples directory + `__. A basic + example is: - See :ref:`driverdiff`. + .. code-block:: python + + import oracledb + import getpass -- python-oracledb Thin and Thick modes have the same level of support for - the `Python Database API specification `_ - and can be used to connect to on-premises databases and Oracle Cloud - databases. However, python-oracledb Thin mode does not support some - advanced Oracle Database features such as Application Continuity (AC), - Continuous Query Notification (CQN), and Sharding. See :ref:`Features - Supported ` for details. + pw = getpass.getpass(f"Enter password for hr@localhost/orclpdb: ") + + with oracledb.connect(user="hr", password=userpwd, dsn="localhost/orclpdb") as connection: + with connection.cursor() as cursor: + for r in cursor.execute("select sysdate from dual"): + print(r) - python-oracledb can be used in SQLAlchemy, Django, Pandas, Superset and other frameworks and Object-relational Mappers (ORMs). To use python-oracledb in @@ -414,17 +428,19 @@ to python-oracledb: .. code-block:: python - oracledb.connect(user="scott", password=pw, dsn="localhost/orclpdb") + connection = oracledb.connect(user="scott", password=pw, dsn="localhost/orclpdb") This no longer works: .. code-block:: python - oracledb.connect("scott", pw, "localhost/orclpdb") + connection = oracledb.connect("scott", pw, "localhost/orclpdb") - Some previously deprecated features are no longer available. See :ref:`deprecations`. +- There are many new features, see the :ref:`release notes `. + .. _commonupgrade: Steps to Upgrade to python-oracledb @@ -577,8 +593,8 @@ following steps: Additional Upgrade Steps to use python-oracledb Thin Mode +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To use python-oracledb Thin mode, the following changes need to be made in -addition to the common :ref:`commonupgrade`: +To upgrade from cx_Oracle to python-oracledb Thin mode, the following changes +need to be made in addition to the common :ref:`commonupgrade`: 1. Remove calls to :func:`~oracledb.init_oracle_client` since this turns on python-oracledb Thick mode. @@ -635,7 +651,7 @@ addition to the common :ref:`commonupgrade`: :ref:`oraaccess.xml ` files. The Thin mode lets equivalent properties be set in the application when connecting. -6. To use python-oracledb Thin mode in an ORACLE_HOME database installation +6. To use python-oracledb Thin mode in an ``ORACLE_HOME`` database installation environment, you must use an explicit connection string since the ``ORACLE_SID``, ``TWO_TASK``, and ``LOCAL`` environment variables are not used. They are used in Thick mode. @@ -664,8 +680,8 @@ addition to the common :ref:`commonupgrade`: Additional Upgrade Steps to use python-oracledb Thick Mode ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To use python-oracledb Thick mode, the following changes need to be made in -addition to the common :ref:`commonupgrade`: +To upgrade from cx_Oracle to python-oracledb Thick mode, the following changes +need to be made in addition to the common :ref:`commonupgrade`: 1. The function :func:`oracledb.init_oracle_client()` *must* be called to enable python-oracle Thick mode. It can be called anywhere before the first diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 24876705..f64bda28 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -633,8 +633,8 @@ known to python-oracledb are discarded and not passed to the database. .. _pyoparams: -Python-oracledb Parameters Settable in Easy Connect Strings or Central Configuration Providers ----------------------------------------------------------------------------------------------- +Python-oracledb Parameters Settable in Easy Connect Strings or Centralized Configuration Providers +-------------------------------------------------------------------------------------------------- Some python-oracledb connection and pool creation parameters can be set in :ref:`Easy Connect strings ` or via a :ref:`Centralized diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 000c9de4..a053270f 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -65,17 +65,18 @@ Python-oracledb is typically installed from Python's package repository .. code-block:: python - import getpass - import oracledb + import getpass - un = 'scott' - cs = 'localhost/orclpdb' - pw = getpass.getpass(f'Enter password for {un}@{cs}: ') + un = "scott" + cs = "localhost/orclpdb" + # cs = "localhost/freepdb1" # for Oracle Database Free users + # cs = "localhost/orclpdb1" # some databases may have this service + pw = getpass.getpass(f"Enter password for {un}@{cs}: ") with oracledb.connect(user=un, password=pw, dsn=cs) as connection: with connection.cursor() as cursor: - sql = """select sysdate from dual""" + sql = "select sysdate from dual" for r in cursor.execute(sql): print(r) @@ -131,8 +132,8 @@ In python-oracledb Thick mode, Oracle Database's standard client-server network interoperability allows connections between different versions of Oracle Client libraries and Oracle Database. For current or previously certified configurations, see Oracle Support's `Doc ID 207303.1 -`__. In -summary: +`__. +In summary: - Oracle Client 23 can connect to Oracle Database 19 or later - Oracle Client 21 can connect to Oracle Database 12.1 or later diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 565c7132..4cf7582f 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -816,8 +816,9 @@ support makes use of `Apache nanoarrow `__ libraries to build data frames. The following data type mapping occurs from Oracle Database types to the Arrow -types used in OracleDataFrame objects. Querying any other data types from -Oracle Database will result in an exception. +types used in OracleDataFrame objects. Querying any other data types from +Oracle Database will result in an exception. :ref:`Output type handlers +` cannot be used to map data types. .. list-table-with-summary:: :header-rows: 1 diff --git a/samples/async_gather.py b/samples/async_gather.py index 78b12937..81f45487 100644 --- a/samples/async_gather.py +++ b/samples/async_gather.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,8 +27,13 @@ # # Demonstrates using a connection pool with asyncio and gather(). # -# Multiple database sessions will be opened and used by each coroutine. The -# number of connections opened can depend on the speed of your environment. +# This also shows the use of pool_alias for connection pool caching, so the +# pool handle does not need to passed through the app. +# +# Each coroutine invocation will acquire a connection from the connection pool. +# The number of connections in the pool will depend on the speed of your +# environment. In some cases existing connections will get reused. In other +# cases up to CONCURRENCY connections will be created by the pool. # ----------------------------------------------------------------------------- import asyncio @@ -36,33 +41,38 @@ import oracledb import sample_env +# Pool name for the connection pool cache +POOL_ALIAS_NAME = "mypool" + # Number of coroutines to run CONCURRENCY = 5 # Query the unique session identifier/serial number combination of a connection -SQL = """SELECT UNIQUE CURRENT_TIMESTAMP AS CT, sid||'-'||serial# AS SIDSER - FROM v$session_connect_info - WHERE sid = SYS_CONTEXT('USERENV', 'SID')""" +SQL = """select unique current_timestamp as ct, sid||'-'||serial# as sidser + from v$session_connect_info + where sid = sys_context('userenv', 'sid')""" # Show the unique session identifier/serial number of each connection that the -# pool opens +# pool creates async def init_session(connection, requested_tag): res = await connection.fetchone(SQL) print(res[0].strftime("%H:%M:%S.%f"), "- init_session SID-SERIAL#", res[1]) # The coroutine simply shows the session identifier/serial number of the -# connection returned by the pool.acquire() call -async def query(pool): - async with pool.acquire() as connection: +# connection returned from the pool +async def query(): + async with oracledb.connect_async( + pool_alias=POOL_ALIAS_NAME + ) as connection: await connection.callproc("dbms_session.sleep", [1]) res = await connection.fetchone(SQL) print(res[0].strftime("%H:%M:%S.%f"), "- query SID-SERIAL#", res[1]) async def main(): - pool = oracledb.create_pool_async( + oracledb.create_pool_async( user=sample_env.get_main_user(), password=sample_env.get_main_password(), dsn=sample_env.get_connect_string(), @@ -70,12 +80,14 @@ async def main(): min=1, max=CONCURRENCY, session_callback=init_session, + pool_alias=POOL_ALIAS_NAME, ) - coroutines = [query(pool) for i in range(CONCURRENCY)] + coroutines = [query() for i in range(CONCURRENCY)] await asyncio.gather(*coroutines) + pool = oracledb.get_pool(POOL_ALIAS_NAME) await pool.close() diff --git a/samples/connection_pool.py b/samples/connection_pool.py index dd76c05e..6a7647ea 100644 --- a/samples/connection_pool.py +++ b/samples/connection_pool.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,6 +27,9 @@ # # Demonstrates the use of connection pooling using a Flask web application. # +# This also shows the use of pool_alias for connection pool caching, so the +# pool handle does not need to passed through the app. +# # Connection Pools can significantly reduce connection times for long running # applications that repeatedly open and close connections. Connection pools # allow multiple, concurrent web requests to be efficiently handled. Internal @@ -37,7 +40,7 @@ # # 1. Install Flask, for example like: # -# python -m pip install Flask +# python -m pip install flask # # 2. (Optional) Set environment variables referenced in sample_env.py # @@ -67,37 +70,42 @@ import sample_env # Port to listen on -port = int(os.environ.get("PORT", "8080")) +PORT = int(os.environ.get("PORT", "8080")) + +# Generally a fixed-size pool is recommended, i.e. POOL_MIN=POOL_MAX. Here +# the pool contains 4 connections, which will allow 4 concurrent users. +POOL_MIN = 4 +POOL_MAX = 4 +POOL_INC = 0 + +# Pool name for the connection pool cache +POOL_ALIAS_NAME = "mypool" # determine whether to use python-oracledb thin mode or thick mode if not sample_env.get_is_thin(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) + # ----------------------------------------------------------------------------- # start_pool(): starts the connection pool +# +# The pool is stored in the pool cache. Connections can later be acquired from +# the pool by passing the same pool_alias value to oracledb.connect() def start_pool(): - # Generally a fixed-size pool is recommended, i.e. pool_min=pool_max. Here - # the pool contains 4 connections, which will allow 4 concurrent users. - - pool_min = 4 - pool_max = 4 - pool_inc = 0 - - pool = oracledb.create_pool( + oracledb.create_pool( user=sample_env.get_main_user(), password=sample_env.get_main_password(), dsn=sample_env.get_connect_string(), params=sample_env.get_pool_params(), - min=pool_min, - max=pool_max, - increment=pool_inc, + min=POOL_MIN, + max=POOL_MAX, + increment=POOL_INC, session_callback=init_session, + pool_alias=POOL_ALIAS_NAME, ) - return pool - # init_session(): a 'session callback' to efficiently set any initial state # that each connection should have. @@ -105,9 +113,9 @@ def start_pool(): # This particular demo doesn't use dates, so sessionCallback could be omitted, # but it does show the kinds of settings many apps would use. # -# If you have multiple SQL statements, then call them all in a PL/SQL anonymous -# block with BEGIN/END so you only use execute() once. This is shown later in -# create_schema(). +# If you have multiple SQL statements, an optimization is to call them all in a +# PL/SQL anonymous block with BEGIN/END so you only use cursor.execute() once. +# This is shown later in create_schema(). # def init_session(connection, requestedTag_ignored): with connection.cursor() as cursor: @@ -125,7 +133,7 @@ def init_session(connection, requestedTag_ignored): # create_schema(): drop and create the demo table, and add a row def create_schema(): - with pool.acquire() as connection: + with oracledb.connect(pool_alias=POOL_ALIAS_NAME) as connection: with connection.cursor() as cursor: cursor.execute( """ @@ -169,7 +177,7 @@ def index(): # variable 'idbv'. @app.route("/post/") def post(username): - with pool.acquire() as connection: + with oracledb.connect(pool_alias=POOL_ALIAS_NAME) as connection: with connection.cursor() as cursor: connection.autocommit = True idbv = cursor.var(int) @@ -187,7 +195,7 @@ def post(username): # Show the username for a given id @app.route("/user/") def show_username(id): - with pool.acquire() as connection: + with oracledb.connect(pool_alias=POOL_ALIAS_NAME) as connection: with connection.cursor() as cursor: cursor.execute("select username from demo where id = :idbv", [id]) r = cursor.fetchone() @@ -198,13 +206,13 @@ def show_username(id): if __name__ == "__main__": # Start a pool of connections - pool = start_pool() + start_pool() # Create a demo table create_schema() - m = f"\nTry loading http://127.0.0.1:{port}/user/1 in a browser\n" + m = f"\nTry loading http://127.0.0.1:{PORT}/user/1 in a browser\n" sys.modules["flask.cli"].show_server_banner = lambda *x: print(m) # Start a webserver - app.run(port=port) + app.run(port=PORT) From 724717e363978a8090ef2f3799f9b51b02397a80 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 21 May 2025 11:12:16 -0600 Subject: [PATCH 078/239] Perform name resolution on the proxy instead of on the client. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/base/connect_params.pyx | 7 ++++++- src/oracledb/impl/thin/protocol.pyx | 24 +++++++++++++++-------- 3 files changed, 25 insertions(+), 9 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0fc47bd6..1ccd52ad 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -20,6 +20,9 @@ Thin Mode Changes #) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many times as needed. +#) Fixed bug when connecting with asyncio using the parameter ``https_proxy``. +#) Fixed regression when connecting where only the host specified by the + ``https_proxy`` parameter can successfully perform name resolution. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/base/connect_params.pyx b/src/oracledb/impl/base/connect_params.pyx index 20c389af..1d0a72ce 100644 --- a/src/oracledb/impl/base/connect_params.pyx +++ b/src/oracledb/impl/base/connect_params.pyx @@ -698,12 +698,17 @@ cdef class Address(ConnectParamsNode): """ Resolve the host name associated with the address and store the IP address and family on the address. If multiple IP addresses are found, - duplicate the address and return one address for each IP address. + duplicate the address and return one address for each IP address. If a + proxy is being used, ensure that the proxy performs name resolution + instead. """ cdef: list results = [] Address address object info + if self.https_proxy is not None: + self.ip_address = self.host + return [self] for info in socket.getaddrinfo(self.host, self.port, proto=socket.IPPROTO_TCP, type=socket.SOCK_STREAM): diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index 15e9326d..faabb36c 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -357,7 +357,7 @@ cdef class Protocol(BaseProtocol): if not use_tcps and (params._token is not None or params.access_token_callback is not None): errors._raise_err(errors.ERR_ACCESS_TOKEN_REQUIRES_TCPS) - if description.use_tcp_fast_open: + if not use_proxy and description.use_tcp_fast_open: sock = socket.socket(address.ip_family, socket.SOCK_STREAM) sock.sendto(connect_string.encode(), socket.MSG_FASTOPEN, connect_info) @@ -506,6 +506,9 @@ cdef class Protocol(BaseProtocol): cdef class BaseAsyncProtocol(BaseProtocol): + cdef: + object _proxy_waiter + def __init__(self): BaseProtocol.__init__(self) self._request_lock = asyncio.Lock() @@ -727,9 +730,10 @@ cdef class BaseAsyncProtocol(BaseProtocol): # complete connection through proxy, if applicable if use_proxy: + self._proxy_waiter = self._read_buf._loop.create_future() data = f"CONNECT {host}:{port} HTTP/1.0\r\n\r\n" transport.write(data.encode()) - reply = transport.read(1024) + reply = await self._proxy_waiter m = re.search('HTTP/1.[01]\\s+(\\d+)\\s+', reply.decode()) if m is None or m.groups()[0] != '200': errors._raise_err(errors.ERR_PROXY_FAILURE, @@ -913,12 +917,16 @@ cdef class BaseAsyncProtocol(BaseProtocol): cdef: bint notify_waiter = False Packet packet - packet = self._transport.extract_packet(data) - while packet is not None: - self._read_buf._process_packet(packet, ¬ify_waiter, False) - if notify_waiter: - self._read_buf.notify_packet_received() - packet = self._transport.extract_packet() + if self._proxy_waiter is not None: + self._proxy_waiter.set_result(data) + self._proxy_waiter = None + else: + packet = self._transport.extract_packet(data) + while packet is not None: + self._read_buf._process_packet(packet, ¬ify_waiter, False) + if notify_waiter: + self._read_buf.notify_packet_received() + packet = self._transport.extract_packet() async def end_pipeline(self, BaseThinConnImpl conn_impl, list messages, bint continue_on_error): From b35c50b11d30bcb4ab5d93b40f809756aa3ad2b1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 21 May 2025 11:12:44 -0600 Subject: [PATCH 079/239] Doc tweaks. --- README.md | 27 +- doc/src/index.rst | 4 +- doc/src/release_notes.rst | 8 +- doc/src/user_guide/appendix_a.rst | 71 ++- doc/src/user_guide/appendix_c.rst | 818 +++++++------------------ doc/src/user_guide/appendix_d.rst | 290 +++++++++ doc/src/user_guide/batch_statement.rst | 81 ++- doc/src/user_guide/dataframes.rst | 386 ++++++++++++ doc/src/user_guide/ha.rst | 6 +- doc/src/user_guide/initialization.rst | 2 +- doc/src/user_guide/plsql_execution.rst | 17 +- doc/src/user_guide/sql_execution.rst | 377 +----------- 12 files changed, 1040 insertions(+), 1047 deletions(-) create mode 100644 doc/src/user_guide/appendix_d.rst create mode 100644 doc/src/user_guide/dataframes.rst diff --git a/README.md b/README.md index 76c03c0f..f867079f 100644 --- a/README.md +++ b/README.md @@ -1,21 +1,23 @@ # python-oracledb -Python-oracledb is a [Python programming language][python] extension module -allowing Python programs to connect to [Oracle Database][oracledb]. -Python-oracledb is the new name for the obsolete cx_Oracle driver. +Python-oracledb is an open-source [Python][python] extension module allowing +Python programs to connect to [Oracle Database][oracledb]. The module conforms +to the [Python Database API 2.0 specification][pep249] with a considerable +number of additions and a couple of minor exclusions, see the [feature +list][features]. It is maintained by Oracle. -Python-oracledb uses the same Python DB API as cx_Oracle, and has many new -features. +Python-oracledb is used for executing SQL and PL/SQL; for calling NoSQL-style +document APIs; for working with data frames; for receiving database +notifications and messages; and for starting and stopping the database. It has +features for high availability and security. It is used by many Python +Frameworks, SQL Generators, ORMs, and libraries. -The module conforms to the [Python Database API 2.0 specification][pep249] with -a considerable number of additions and a couple of minor exclusions, see the -[feature list][features]. +Synchronous and [concurrent][concurrent] coding styles are supported. Database +operations can optionally be [pipelined][pipelining]. -Synchronous and [concurrent][concurrent] coding styles are supported. +Python-oracledb is the successor to the now obsolete cx_Oracle driver. -Python-oracledb is available under an open source license, see below. - -## Installation +## Python-oracledb Installation Run: @@ -127,4 +129,5 @@ See [LICENSE][license], [THIRD_PARTY_LICENSES][tplicense], and [installation]: https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html [features]: https://oracle.github.io/python-oracledb/#features [concurrent]: https://python-oracledb.readthedocs.io/en/latest/user_guide/asyncio.html +[pipelining]: https://python-oracledb.readthedocs.io/en/latest/user_guide/asyncio.html#pipelining-database-operations [pypi]: https://pypi.org/project/oracledb diff --git a/doc/src/index.rst b/doc/src/index.rst index 97f535d4..43ed1d5a 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -31,14 +31,15 @@ User Guide user_guide/json_data_type.rst user_guide/xml_data_type.rst user_guide/vector_data_type.rst + user_guide/dataframes.rst user_guide/soda.rst user_guide/aq.rst user_guide/cqn.rst user_guide/two_phase_commit.rst user_guide/startup.rst user_guide/ha.rst - user_guide/globalization.rst user_guide/asyncio.rst + user_guide/globalization.rst user_guide/exception_handling.rst user_guide/tracing.rst user_guide/extending.rst @@ -46,6 +47,7 @@ User Guide user_guide/appendix_a.rst user_guide/appendix_b.rst user_guide/appendix_c.rst + user_guide/appendix_d.rst API Manual ========== diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 1ccd52ad..0f4487be 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -1619,16 +1619,16 @@ Common Changes oracledb 1.0.0 (May 2022) ------------------------- -#) Renamed cx_Oracle to python-oracledb. See :ref:`upgradecomparison`. +#) Renamed cx_Oracle to python-oracledb. See :ref:`upgrading83`. #) Python-oracledb is a 'Thin' driver by default that connects directly to Oracle Database. Optional use of Oracle Client libraries enables a :ref:`'Thick' mode ` with some additional functionality. Both modes support the Python Database API v2.0 Specification. #) Added a :attr:`Connection.thin` attribute which shows whether the connection was established in the python-oracledb Thin mode or Thick mode. -#) Creating connections or connection pools now requires :ref:`keyword - parameters ` be passed. This brings python-oracledb into - compliance with the Python Database API specification PEP 249. +#) Creating connections or connection pools now requires keyword parameters be + passed. This brings python-oracledb into compliance with the Python + Database API specification PEP 249. #) Threaded mode is now always enabled for standalone connections (Thick mode). #) The function :func:`oracledb.init_oracle_client()` must now always be diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 5560ff54..350987ae 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -15,7 +15,7 @@ enable Thick mode. The following table summarizes the Oracle Database features supported by python-oracledb Thin and Thick modes, and by the obsolete cx_Oracle driver. -For more details see :ref:`driverdiff` and :ref:`compatibility`. +For more details see :ref:`driverdiff` and :ref:`upgrading83`. .. list-table-with-summary:: Features Supported by python-oracledb and cx_Oracle 8.3 :header-rows: 1 @@ -369,137 +369,140 @@ example when binding numeric values. :header-rows: 1 :class: wy-table-responsive :align: center - :summary: The first column displays the database data type. The second column displays the python-oracledb constant Name. The third column contains notes. The fourth column shows Python types that can be used. + :summary: The first column displays the database data type. The second column displays the python-oracledb constant Name. The third column shows Python types that can be used. The fourth column contains notes. * - Oracle Database Type - python-oracledb Constant Name - - Notes - Supported Python Types + - Notes * - VARCHAR2 - :data:`~oracledb.DB_TYPE_VARCHAR` - - No relevant notes - bytes, str + - No relevant notes * - NVARCHAR2 - :data:`~oracledb.DB_TYPE_NVARCHAR` - - No relevant notes - bytes, str + - No relevant notes * - NUMBER, FLOAT - :data:`~oracledb.DB_TYPE_NUMBER` - - No relevant notes - bool, int, float, decimal.Decimal + - No relevant notes * - DATE - :data:`~oracledb.DB_TYPE_DATE` - - No relevant notes - datetime.date, datetime.datetime + - No relevant notes * - BOOLEAN (PL/SQL and Oracle Database 23ai SQL) - :data:`~oracledb.DB_TYPE_BOOLEAN` - - No relevant notes - Any type convertible to bool + - No relevant notes * - BINARY_DOUBLE - :data:`~oracledb.DB_TYPE_BINARY_DOUBLE` - - No relevant notes - bool, int, float, decimal.Decimal + - No relevant notes * - BINARY_FLOAT - :data:`~oracledb.DB_TYPE_BINARY_FLOAT` - - No relevant notes - bool, int, float, decimal.Decimal + - No relevant notes * - TIMESTAMP - :data:`~oracledb.DB_TYPE_TIMESTAMP` - - No relevant notes - datetime.date, datetime.datetime + - No relevant notes * - TIMESTAMP WITH TIME ZONE - :data:`~oracledb.DB_TYPE_TIMESTAMP_TZ` - - No relevant notes - datetime.date, datetime.datetime + - No relevant notes * - TIMESTAMP WITH LOCAL TIME ZONE - :data:`~oracledb.DB_TYPE_TIMESTAMP_LTZ` - - No relevant notes - datetime.date, datetime.datetime + - No relevant notes * - INTERVAL YEAR TO MONTH - :data:`~oracledb.DB_TYPE_INTERVAL_YM` - - No relevant notes - :ref:`oracledb.IntervalYM ` + - No relevant notes * - INTERVAL DAY TO SECOND - :data:`~oracledb.DB_TYPE_INTERVAL_DS` - - No relevant notes - datetime.timedelta + - No relevant notes * - RAW - :data:`~oracledb.DB_TYPE_RAW` - - No relevant notes - bytes, str + - No relevant notes * - LONG - :data:`~oracledb.DB_TYPE_LONG` - - No relevant notes - bytes, str + - No relevant notes * - LONG RAW - :data:`~oracledb.DB_TYPE_LONG_RAW` - - No relevant notes - bytes, str + - No relevant notes * - ROWID - :data:`~oracledb.DB_TYPE_ROWID` - - No relevant notes - bytes, str + - No relevant notes * - UROWID - :data:`~oracledb.DB_TYPE_ROWID`, :data:`~oracledb.DB_TYPE_UROWID` (only supported in python-oracledb Thin mode) - - May show :data:`~oracledb.DB_TYPE_UROWID` in metadata. See :ref:`Query Metadata Differences `. - bytes, str + - May show :data:`~oracledb.DB_TYPE_UROWID` in metadata. See :ref:`Query Metadata Differences `. * - CHAR - :data:`~oracledb.DB_TYPE_CHAR` - - No relevant notes - bytes, str + - No relevant notes * - BLOB - :data:`~oracledb.DB_TYPE_BLOB` - - No relevant notes - :ref:`oracledb.LOB `, bytes, str + - No relevant notes * - CLOB - :data:`~oracledb.DB_TYPE_CLOB` - - No relevant notes - :ref:`oracledb.LOB `, bytes, str + - No relevant notes * - NCHAR - :data:`~oracledb.DB_TYPE_NCHAR` - - No relevant notes - bytes, str + - No relevant notes * - NCLOB - :data:`~oracledb.DB_TYPE_NCLOB`, :data:`~oracledb.DB_TYPE_LONG_NVARCHAR` (if fetching NCLOB as a string) - - No relevant notes - :ref:`oracledb.LOB `, bytes, str + - No relevant notes * - BFILE - :data:`~oracledb.DB_TYPE_BFILE` - - Can fetch a BFILE object and insert that object in a table. Cannot create BFILE objects. - :ref:`oracledb.LOB `, bytes + - Can fetch a BFILE object and insert that object in a table. Cannot create BFILE objects. * - JSON - :data:`~oracledb.DB_TYPE_JSON` - - No relevant notes - Any type convertible to Oracle JSON + - No relevant notes * - REF CURSOR (PL/SQL OR nested cursor) - :data:`~oracledb.DB_TYPE_CURSOR` - - No relevant notes - :ref:`oracledb.Cursor ` + - No relevant notes * - PLS_INTEGER - :data:`~oracledb.DB_TYPE_BINARY_INTEGER` - - No relevant notes - bool, int, float, decimal.Decimal + - No relevant notes * - BINARY_INTEGER - :data:`~oracledb.DB_TYPE_BINARY_INTEGER` - - No relevant notes - bool, int, float, decimal.Decimal + - No relevant notes * - REF - n/a - - Not supported in python-oracledb Thin mode - n/a + - Not supported in python-oracledb Thin mode * - XMLType - :data:`~oracledb.DB_TYPE_XMLTYPE` - - May need to use ``xmltype.getclobval()`` to fetch in python-oracledb Thick mode. See :ref:`xmldatatype` - bytes, str + - May need to use ``xmltype.getclobval()`` to fetch in python-oracledb Thick mode. See :ref:`xmldatatype` * - User-defined types (object type, VARRAY, records, collections, SDO_*types) - :data:`~oracledb.DB_TYPE_OBJECT` - - No relevant notes - OBJECT of specific type + - No relevant notes * - VECTOR - :data:`~oracledb.DB_TYPE_VECTOR` - - No relevant notes - array.array + - No relevant notes Binding of contiguous PL/SQL Index-by BINARY_INTEGER arrays of string, number, and date are supported in python-oracledb Thin and Thick modes. Use :meth:`Cursor.arrayvar()` to build these arrays. + +When fetching :ref:`data frames `, see :ref:`Data Frame Type +Mapping ` for type information. diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index dcd3d4b2..0d8fa3d8 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -1,383 +1,17 @@ -.. _upgradecomparison: - ***************************************************** Appendix C: The python-oracledb and cx_Oracle Drivers ***************************************************** The python-oracledb driver is the renamed, major version successor to cx_Oracle 8.3. The python-oracledb driver has many :ref:`new features ` and -some :ref:`deprecations` compared with cx_Oracle. Also, see -:ref:`upgrading83`. The cx_Oracle driver is obsolete and should not be used -for new development. - -.. _compatibility: - -Differences between the python-oracledb and cx_Oracle Drivers -============================================================= - -The differences between python-oracledb and the obsolete cx_Oracle driver are -listed here. - -Mode differences from cx_Oracle -------------------------------- - -By default, python-oracledb runs in a 'Thin' mode which connects directly to -Oracle Database. This mode does not need Oracle Client libraries. However, -some :ref:`additional functionality ` is available when -python-oracledb uses them. Python-oracledb is said to be in 'Thick' mode when -Oracle Client libraries are used. See :ref:`enablingthick`. Both modes have -comprehensive functionality supporting the Python Database API v2.0 -Specification. - -cx_Oracle always runs in a Thick mode using Oracle Client libraries. The -features in python-oracledb Thick mode and cx_Oracle 8.3 are the same, subject -to the :ref:`new features `, some :ref:`deprecations -`, and to other changes noted in the documentation. - -Oracle Client Library Loading Differences from cx_Oracle --------------------------------------------------------- - -Oracle Client libraries are now only loaded if -:func:`oracledb.init_oracle_client()` is called in your application. This -changes python-oracledb to Thick mode. The ``init_oracle_client()`` method must -be called before any :ref:`standalone connection ` or -:ref:`connection pool ` is created. If a connection or pool is -created first in the default Thin mode, then Thick mode cannot be enabled. - -See :ref:`enablingthick` for more information. - -Calling the ``init_oracle_client()`` method immediately loads Oracle Client -libraries. To emulate the cx_Oracle behavior of deferring library loading -until the creation of the first connection (in the case when -``init_oracle_client()`` is not called), your application will need to -explicitly defer calling ``init_oracle_client()`` as appropriate. - -In python-oracledb, ``init_oracle_client()`` can now be called multiple times -in the one Python process as long as its arguments are the same each time. - -oracledb.clientversion() -++++++++++++++++++++++++ - -The :func:`oracledb.clientversion()` method shows the version of the Oracle -Client libraries being used. There is no Oracle Client used in the -python-oracledb Thin mode so this function can only be called in -python-oracledb Thick mode. If this function is called before -:func:`oracledb.init_oracle_client()`, an exception is thrown. - -Connection Differences from cx_Oracle -------------------------------------- - -.. _connectdiffs: - -oracledb.connect() Differences -++++++++++++++++++++++++++++++ - -The :func:`oracledb.connect()` function in the python-oracledb driver differs -from cx_Oracle: - -- Keyword parameters **must** be used in calls to :func:`oracledb.connect()`. - This change makes the driver compliant with the Python Database API - specification PEP 249. See - :ref:`Standalone Connections ` and :ref:`connerrors`. - -- New optional keyword arguments can be passed to :func:`~oracledb.connect()`. - For example you can pass the hostname, port and servicename as separate - parameters instead of using an Easy Connect connection string. In - python-oracledb Thin mode, some of the new arguments replace :ref:`sqlnet.ora - ` settings. - -- A new optional parameter ``params`` of type :ref:`ConnectParams ` - can be used to encapsulate connection properties. See :ref:`usingconnparams` - for more information. - -- The following parameters are desupported: - - - ``encoding`` and ``nencoding``: The encodings in use are always UTF-8. - - - ``threaded``: Threaded Oracle Call Interface (OCI) is now always enabled - in python-oracledb Thick mode. This option is not relevant to the Thin - mode. - - See :ref:`deprecations` for more information. - -The use of the class constructor method ``oracledb.Connection()`` to create -connections is no longer recommended for creating connections. Use -:func:`~oracledb.connect()` instead. - -The :meth:`oracledb.makedsn()` method for creating the ``dsn`` value has been -deprecated. New code should use :meth:`oracledb.ConnectParams()` or use the -new keyword arguments in :func:`oracledb.connect()`. - - -Connection Object Differences -+++++++++++++++++++++++++++++ - -The :ref:`Connection object ` differences between the python-oracledb -and cx_Oracle drivers are: - -- The attribute :attr:`Connection.maxBytesPerCharacter` is deprecated. This - will return a constant value of *4* since encodings are always UTF-8. - -- A new boolean attribute, :attr:`Connection.thin` is available. This - attribute is *True* if the connection was established in python-oracledb Thin - mode. In Thick mode, the value of this attribute is *False*. - -- The new method signature of :attr:`Connection.outputtypehandler` is - ``handler(cursor, metadata)``. The old signature ``handler(cursor, name, - default_type, length, precision, scale)`` was deprecated in python-oracledb - 1.4 but will still work and will be removed in a future version. - -See :ref:`connattrs` for more information. - -Pooling Differences from cx_Oracle ----------------------------------- - -It is recommended to use the new :ref:`ConnectionPool Object ` -instead of the equivalent SessionPool object, which is deprecated. To create a -connection pool, use :meth:`oracledb.create_pool()`, which is equivalent to -calling ``cx_Oracle.SessionPool()``. - -For more information, see :ref:`connpooling`. - -oracledb.SessionPool() Differences -++++++++++++++++++++++++++++++++++ - -The python-oracledb ``oracledb.SessionPool()`` method (which is an alias of -:func:`oracledb.create_pool()`) differs from ``cx_Oracle.SessionPool()`` as -follows: - -- Keyword parameters **must** be used in calls. This change makes the driver - compliant with the Python Database API specification PEP 249. See - :ref:`Connection pooling ` and :ref:`connerrors`. - -- Passing a value to the ``dsn`` parameter that contains the user name and - password is now supported in the same way as :func:`oracledb.connect()`. For - example ``dsn="un/pw@cs"`` can be used. - -- New optional keyword arguments can be passed to - :func:`~oracledb.create_pool()`. For example you can pass the hostname, port - and servicename as separate parameters instead of using an Easy Connect - connection string. In python-oracledb Thin mode, some of the new arguments - replace :ref:`sqlnet.ora ` settings. - -- A new optional parameter ``params`` of type :ref:`PoolParams ` - can be used to encapsulate connection properties. See :ref:`usingconnparams` - for more information. - -- The default mode is :data:`~oracledb.POOL_GETMODE_WAIT` instead of - :data:`~oracledb.POOL_GETMODE_NOWAIT`. If the mode - :data:`~oracledb.POOL_GETMODE_NOWAIT` is truly desired, modify any pool - creation code to specify this value instead. Note the namespace of - constants has been improved. Old names like ``SPOOL_ATTRVAL_NOWAIT`` can be - used but are now deprecated. - -- The ``encoding`` and ``nenecoding`` parameters are deprecated and - ignored. The encodings in use are always UTF-8. - -- New keyword arguments that are used internally to create a :ref:`PoolParams - object ` before creating the connection. - -The :meth:`oracledb.makedsn()` method for creating the ``dsn`` value has been -deprecated. New code should use :meth:`oracledb.ConnectParams()` or use the -new keyword arguments to :func:`oracledb.create_pool()`. - -SessionPool Object Differences -++++++++++++++++++++++++++++++ - -The SessionPool object (which is an alias for the :ref:`ConnectionPool object -`) differences between the python-oracledb and cx_Oracle drivers are: - -- A Python `type() `__ - will show the class as ``oracledb.ConnectionPool`` instead of - ``cx_Oracle.SessionPool``. - -- A new boolean attribute, ``SessionPool.thin`` (see - :attr:`ConnectionPool.thin`) is available. This attribute is *True* if the - connection was established in python-oracledb Thin mode. In Thick mode, the - value of this attribute is *False*. - -Cursor Object Differences from cx_Oracle ----------------------------------------- - -The differences between the :ref:`Cursor object ` in -python-oracledb and cx_Oracle drivers are: - -- :meth:`Cursor.fetchmany()`: The name of the size argument of ``fetchmany()`` - is ``size``. This change was done to comply with `PEP 249 - `_. The previous keyword argument name, - ``numRows`` is deprecated. - -- ``Cursor.fetchraw()``: This method was previously deprecated in cx_Oracle - 8.2 and has been removed in python-oracledb. Instead, use one of the other - fetch methods such as :meth:`Cursor.fetchmany()`. - -- ``Cursor.executemanyprepared()``: This method was previously deprecated in - cx_Oracle 6.4 and has been removed in python-oracledb. Instead, use - :meth:`Cursor.executemany()`, by passing *None* for the statement argument and - an integer for the parameters argument. - -- ``Cursor.bindarraysize``: This attribute is desupported and removed in - python-oracledb. It is not needed in the application code. - -- :attr:`Cursor.rowcount`: After :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()` with PL/SQL statements, ``Cursor.rowcount`` - will return *0*. If the cursor or connection are not open, then the value *-1* - will be returned as required by the Python Database API. - -- :attr:`Cursor.description`: This attribute was previously a sequence of - 7-item sequences in cx_Oracle and python-oracledb. Each of these sequences - contained information describing one result column, that is, (name, type, - display_size, internal_size, precision, scale, null_ok). In - python-oracledb 1.4, this attribute was changed to a sequence of - :ref:`FetchInfo ` objects. Each FetchInfo object describes one - result column and can behave as a 7-tuple like before, but contains - additional information that may be helpful when using - :ref:`output type handlers `. - -- :attr:`Cursor.outputtypehandler`: The new method signature of this attribute - is ``handler(cursor, metadata)``. The old signature ``handler(cursor, name, - default_type, length, precision, scale)`` was deprecated in python-oracledb - 1.4 but will still work and will be removed in a future version. - -.. _fetchisjson: - -Fetching IS JSON Column Differences from cx_Oracle --------------------------------------------------- - -In python-oracledb, VARCHAR2 and LOB columns that have the ``IS JSON`` -constraint enabled are fetched as Python objects. These columns are fetched in -the same way that :ref:`JSON type columns ` are fetched when -using Oracle Database 21c (or later). The returned value varies depending on -the JSON data. If the JSON data is an object, then a dictionary is returned. -If it is an array, then a list is returned. If it is a scalar value, then that -particular scalar value is returned. - -In cx_Oracle, VARCHAR2 and LOB columns that have the ``IS JSON`` constraint -enabled are fetched as strings and LOB objects respectively. To enable this -same fetch behavior in python-oracledb, you must use an -:ref:`output type handler ` as shown below. - -.. code-block:: python - - def type_handler(cursor, fetch_info): - if fetch_info.is_json: - return cursor.var(fetch_info.type_code, cursor.arraysize) - -Advanced Queuing (AQ) Differences from cx_Oracle ------------------------------------------------- - -Use the new :ref:`Advanced Queuing (AQ) ` API instead of the -older API which was deprecated in cx_Oracle 7.2 and is not available in -python-oracledb. - -Replace: - -- ``Connection.deq()`` with :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` -- ``Connection.deqoptions()`` with attribute :attr:`Queue.deqoptions` -- ``Connection.enq()`` with :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` -- ``Connection.enqoptions()`` with attribute :attr:`Queue.enqoptions` - -The AQ support in python-oracledb has the following enhancements from -cx_Oracle: - -- AQ messages can be enqueued and dequeued as a JSON payload type -- Recipient lists can be enqueued and dequeued -- Enqueue options, dequeue options, and message properties can be set - -See :ref:`Oracle Advanced Queuing (AQ) `. - -.. _errordiff: - -Error Handling Differences from cx_Oracle ------------------------------------------ - -In python-oracledb Thick mode, error messages generated by the Oracle Client -libraries and the `ODPI-C `_ layer used by -cx_Oracle and python-oracledb in Thick mode are mostly returned unchanged from -cx_Oracle 8.3. Some exceptions shown below. - -Note that the python-oracledb driver error messages can also vary between Thin -and Thick modes. See :ref:`errorhandling`. - -ConnectionPool.acquire() Message Differences -++++++++++++++++++++++++++++++++++++++++++++ - -:meth:`ConnectionPool.acquire()` ORA errors will be mapped to DPY errors. For -example:: - - DPY-4005: timed out waiting for the connection pool to return a connection - -replaces the cx_Oracle 8.3 error:: - - ORA-24459: OCISessionGet() timed out waiting for pool to create new connections - -Dead Connection Detection and Timeout Message Differences -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - -Application code which detects connection failures or statement execution -timeouts will need to check for new errors, ``DPY-4011`` and ``DPY-4024`` -respectively. The error ``DPY-1001`` is returned if an already dead connection -is attempted to be used. - -The new Error object attribute :attr:`~oracledb._Error.full_code` may be -useful for checking the error code. - -Example error messages are: - -* Scenario 1: An already closed or dead connection was attempted to be used. - - python-oracledb Thin Error:: - - DPY-1001: not connected to database - - python-oracledb Thick Error:: - - DPY-1001: not connected to database - - cx_Oracle Error:: - - not connected - -* Scenario 2: The database side of the connection was terminated while the - connection was being used. - - python-oracledb Thin Error:: - - DPY-4011: the database or network closed the connection - - python-oracledb Thick Error:: - - DPY-4011: the database or network closed the connection - DPI-1080: connection was closed by ORA-%d - - cx_Oracle Error:: - - DPI-1080: connection was closed by ORA-%d - -* Scenario 3: Statement execution exceeded the :attr:`connection.call_timeout` - value. - - python-oracledb Thin Error:: - - DPY-4024: call timeout of {timeout} ms exceeded - - python-oracledb Thick Error:: - - DPY-4024: call timeout of {timeout} ms exceeded - DPI-1067: call timeout of %u ms exceeded with ORA-%d - - cx_Oracle Error:: - - DPI-1067: call timeout of %u ms exceeded with ORA-%d +some :ref:`deprecations` compared with cx_Oracle. The cx_Oracle driver is +obsolete and should not be used for new development. .. _upgrading83: Upgrading from cx_Oracle 8.3 to python-oracledb =============================================== -Things to Know Before the Upgrade ---------------------------------- - Below is a list of some useful things to know before upgrading from cx_Oracle to python-oracledb: @@ -389,11 +23,17 @@ to python-oracledb: See :ref:`installation` for details. - By default, python-oracledb runs in a 'Thin' mode which connects directly to - Oracle Database. This mode does not need Oracle Client libraries to be - installed. However, some additional functionality is available when + Oracle Database. This mode does not need Oracle Client libraries. However, + some :ref:`additional functionality ` is available when python-oracledb uses them. Python-oracledb is said to be in 'Thick' mode - when Oracle Client libraries are used. The Thick mode is equivalent to - cx_Oracle. + when Oracle Client libraries are used. See :ref:`enablingthick`. Both modes + have comprehensive functionality supporting the Python Database API v2.0 + Specification. The Thick mode is equivalent to cx_Oracle. + + cx_Oracle always runs in a Thick mode using Oracle Client libraries. The + features in python-oracledb Thick mode and cx_Oracle 8.3 are the same, subject + to the :ref:`new features `, some :ref:`deprecations + `, and to other changes noted in the documentation. - python-oracledb Thin and Thick modes have the same level of support for the `Python Database API specification `_ and @@ -417,10 +57,7 @@ to python-oracledb: print(r) - python-oracledb can be used in SQLAlchemy, Django, Pandas, Superset and other - frameworks and Object-relational Mappers (ORMs). To use python-oracledb in - older versions of these libraries that do not have native support for the new - name, you can override the use of cx_Oracle with a few lines of code. See - :ref:`frameworks`. + frameworks and Object-relational Mappers (ORMs). See :ref:`frameworks`. - python-oracledb connection and pool creation calls require keyword arguments to conform with the Python Database API specification. For example you must @@ -436,6 +73,12 @@ to python-oracledb: connection = oracledb.connect("scott", pw, "localhost/orclpdb") +- New optional keyword arguments can be passed to connection and pool creation + functions. For example you can pass the hostname, port and servicename as + separate parameters instead of using an Easy Connect connection string. In + python-oracledb Thin mode, some of the new arguments replace :ref:`sqlnet.ora + ` settings. + - Some previously deprecated features are no longer available. See :ref:`deprecations`. @@ -527,14 +170,13 @@ following steps: parameter was already ignored in ``oracledb.SessionPool()`` from cx_Oracle 8.2. -5. Remove all references to :meth:`Cursor.fetchraw()` as this method was - deprecated in cx_Oracle 8.2 and has been removed in python-oracledb. - Instead, use one of the other fetch methods such as - :meth:`Cursor.fetchmany()`. +5. Remove all references to ``Cursor.fetchraw()`` as this method was deprecated + in cx_Oracle 8.2 and has been removed in python-oracledb. Instead, use one + of the other fetch methods such as :meth:`Cursor.fetchmany()`. -6. The default value of the ``oracledb.SessionPool()`` parameter - :attr:`~Connection.getmode` now waits for an available connection. That - is, the default is now :data:`~oracledb.POOL_GETMODE_WAIT` instead of +6. The default value of the ``oracledb.SessionPool()`` parameter ``getmode`` + now waits for an available connection. That is, the default is now + :data:`~oracledb.POOL_GETMODE_WAIT` instead of :data:`~oracledb.POOL_GETMODE_NOWAIT`. The new default value improves the behavior for most applications. If the pool is in the middle of growing, the new value prevents transient connection creation errors from occurring @@ -550,43 +192,148 @@ following steps: as :data:`~oracledb.POOL_GETMODE_NOWAIT` and :data:`~oracledb.PURITY_SELF` are now preferred. The old namespaces still work. -7. VARCHAR2 and LOB columns that have the ``IS JSON`` constraint enabled are - fetched by default as Python objects in python-oracledb. In cx_Oracle, - VARCHAR2 and LOB columns that contain JSON data were fetched by default as - strings and LOB objects respectively. See :ref:`fetchisjson`. +7. A Python `type() `__ + will show the class of a connection pool as ``oracledb.ConnectionPool`` + instead of ``cx_Oracle.SessionPool``. Update code as needed. + +8. Use the new :ref:`Advanced Queuing (AQ) ` API instead of the + older API which was deprecated in cx_Oracle 7.2 and is not available in + python-oracledb. + + Replace: + + - ``Connection.deq()`` with :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` + - ``Connection.deqoptions()`` with attribute :attr:`Queue.deqoptions` + - ``Connection.enq()`` with :meth:`Queue.enqone()` or :meth:`Queue.enqmany()` + - ``Connection.enqoptions()`` with attribute :attr:`Queue.enqoptions` + + See :ref:`aqusermanual`. + +9. Remove calls to ``Cursor.executemanyprepared()``. This method was previously + deprecated in cx_Oracle 6.4 and has been removed in + python-oracledb. Instead, use :meth:`Cursor.executemany()` by passing *None* + for the statement argument and an integer for the ``parameters`` argument. + +10. Remove the use of the ``Cursor.bindarraysize``. It is desupported and not + needed in the application code. + +11. In python-oracledb, VARCHAR2 and LOB columns that have the ``IS JSON`` + constraint enabled are fetched by default as Python objects. These columns + are fetched in the same way that :ref:`JSON type columns ` are + fetched when using Oracle Database 21c (or later). The returned value + varies depending on the JSON data. If the JSON data is an object, then a + dictionary is returned. If it is an array, then a list is returned. If it + is a scalar value, then that particular scalar value is returned. + + In cx_Oracle, VARCHAR2 and LOB columns that have the ``IS JSON`` constraint + enabled are fetched by default as strings and LOB objects respectively. To + enable this same fetch behavior in python-oracledb, you can use an + :ref:`output type handler ` as shown below. + + .. code-block:: python + + def type_handler(cursor, fetch_info): + if fetch_info.is_json: + return cursor.var(fetch_info.type_code, cursor.arraysize) + +12. Review uses of :attr:`Cursor.rowcount`. After :meth:`Cursor.execute()` or + :meth:`Cursor.executemany()` with PL/SQL statements, :attr:`Cursor.rowcount` + will return *0*. If the cursor or connection are not open, then the value + *-1* will be returned as required by the Python Database API. + +13. In python-oracledb Thick mode, error messages generated by the Oracle + Client libraries and the `ODPI-C `_ layer + used by cx_Oracle and python-oracledb in Thick mode are mostly returned + unchanged from cx_Oracle 8.3. Some exceptions shown below. + + Note that the python-oracledb driver error messages can also vary between Thin + and Thick modes. See :ref:`errorhandling`. + + **ConnectionPool.acquire() Message Differences** + + :meth:`ConnectionPool.acquire()` ORA errors will be mapped to DPY errors. For + example:: + + DPY-4005: timed out waiting for the connection pool to return a connection + + replaces the cx_Oracle 8.3 error:: + + ORA-24459: OCISessionGet() timed out waiting for pool to create new connections + + **Dead Connection Detection and Timeout Message Differences** + + Application code which detects connection failures or statement execution + timeouts will need to check for new errors, ``DPY-4011`` and ``DPY-4024`` + respectively. The error ``DPY-1001`` is returned if an already dead connection + is attempted to be used. + + The new Error object attribute :attr:`~oracledb._Error.full_code` may be + useful for checking the error code. + + Example error messages are: + + * Scenario 1: An already closed or dead connection was attempted to be used. -8. Review :ref:`compatibility`. + python-oracledb Thin mode Error:: - If your code base uses an older cx_Oracle version, review the previous - :ref:`release notes ` for additional changes to modernize - the code. + DPY-1001: not connected to database -9. Modernize code as needed or desired. + python-oracledb Thick mode Error:: - For example, replace all usages of the deprecated Advanced Queuing API with - the new API originally introduced in cx_Oracle 7.2, see - :ref:`aqusermanual`. + DPY-1001: not connected to database - The method signature of the :ref:`output type handler ` - which can be specified on a :attr:`connection - ` or on a :attr:`cursor - ` is ``handler(cursor, metadata)``. The old - signature ``handler(cursor, name, default_type, length, precision, scale)`` - was deprecated in python-oracledb 1.4 but will still work and will be - removed in a future version. + cx_Oracle Error:: - See :ref:`deprecations` for the list of all deprecations in python-oracledb. + not connected -10. Review the following sections to see if your application requirements are + * Scenario 2: The database side of the connection was terminated while the + connection was being used. + + python-oracledb Thin mode Error:: + + DPY-4011: the database or network closed the connection + + python-oracledb Thick mode Error:: + + DPY-4011: the database or network closed the connection + DPI-1080: connection was closed by ORA-%d + + cx_Oracle Error:: + + DPI-1080: connection was closed by ORA-%d + + * Scenario 3: Statement execution exceeded the :attr:`connection.call_timeout` + value. + + python-oracledb Thin mode Error:: + + DPY-4024: call timeout of {timeout} ms exceeded + + python-oracledb Thick mode Error:: + + DPY-4024: call timeout of {timeout} ms exceeded + DPI-1067: call timeout of %u ms exceeded with ORA-%d + + cx_Oracle Error:: + + DPI-1067: call timeout of %u ms exceeded with ORA-%d + +14. If your code base uses an older cx_Oracle version, review + :ref:`deprecations` for additional changes that may be necessary. + +15. Modernize code to take advantage of new features, if desired. See the + :ref:`release notes `. + +16. Review the following sections to see if your application requirements are satisfied by python-oracledb Thin mode: - - :ref:`featuresummary` - - :ref:`driverdiff` + - :ref:`featuresummary` + - :ref:`driverdiff` - If so, then follow :ref:`upgradethin`. + If so, then follow :ref:`upgradethin`. - If your application requirements are not supported by python-oracledb Thin - mode, then use Thick mode, see :ref:`upgradethick`. + If your application requirements are not supported by python-oracledb Thin + mode, then use Thick mode, see :ref:`upgradethick`. .. _upgradethin: @@ -630,9 +377,13 @@ need to be made in addition to the common :ref:`commonupgrade`: See :ref:`otherinit`. -4. Remove calls to :func:`oracledb.clientversion()` which is only available in - python-oracledb Thick mode. Oracle Client libraries are not available - in Thin mode. +4. Remove calls to :func:`oracledb.clientversion()`. + + The :func:`oracledb.clientversion()` function shows the version of the + Oracle Client libraries being used. Since Oracle Client libraries are not + used in the python-oracledb Thin mode, this function cannot be called. If it + is called before calling :func:`oracledb.init_oracle_client()`, an exception + is thrown. 5. To connect using a :ref:`TNS Alias ` from a ``tnsnames.ora`` file (see :ref:`optnetfiles`) in python-oracledb Thin mode, you should @@ -668,9 +419,7 @@ need to be made in addition to the common :ref:`commonupgrade`: :meth:`ConnectionPool.acquire()` until sufficient time has passed for connections in the pool to be created. -8. Review error handling improvements. See :ref:`errorhandling`. - -9. Review locale and globalization usage. Python-oracledb Thin mode ignores +8. Review locale and globalization usage. Python-oracledb Thin mode ignores all NLS environment variables. It also ignores the ``ORA_TZFILE`` environment variable. Thick mode does use these variables. See :ref:`globalization`. @@ -680,28 +429,67 @@ need to be made in addition to the common :ref:`commonupgrade`: Additional Upgrade Steps to use python-oracledb Thick Mode ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -To upgrade from cx_Oracle to python-oracledb Thick mode, the following changes -need to be made in addition to the common :ref:`commonupgrade`: +To upgrade from cx_Oracle to python-oracledb Thick mode, in addition to the +common :ref:`commonupgrade`, the function :func:`oracledb.init_oracle_client()` +*must* be called to enable the Thick mode. It can be called anywhere before +the first call to :func:`oracledb.connect()`, ``oracledb.Connection()``, or +``oracledb.SessionPool()``. If a connection or pool is created first in the +default Thin mode, then Thick mode cannot be enabled. See +:ref:`enablingthick` for more details. + +The requirement to call :func:`~oracledb.init_oracle_client()` means that +Oracle Client library loading is not automatically deferred until the driver +is first used, such as when a connection is opened. To emulate the cx_Oracle +behavior of deferring library loading until the creation of the first +connection (in the case when :func:`~oracledb.init_oracle_client()` is not +called), your application will need to explicitly defer calling +:func:`~oracledb.init_oracle_client()` as appropriate. + +In python-oracledb, :func:`~oracledb.init_oracle_client()` can be called +multiple times in a Python process as long as the arguments are the same. + +Note that on Linux and related operating systems, the +:func:`~oracledb.init_oracle_client()` parameter ``lib_dir`` should not be +passed. Instead, set the system library search path with ``ldconfig`` or +``LD_LIBRARY_PATH`` prior to running Python. + +Modernizing Code +---------------- + +Many significant new features have been added to python-oracledb. You may want +to take advantage of them when upgrading from cx_Oracle. See the rest of the +documentation, the :ref:`release notes `, and +:ref:`featuresummary`. + +The following points summarize some of the smaller changes that you may find +interesting: + +- The :meth:`oracledb.makedsn()` method for creating the ``dsn`` value has been + deprecated. New code should use keyword arguments when creating connections + or connection pools, or make use of a ``params`` object described below. -1. The function :func:`oracledb.init_oracle_client()` *must* be called to - enable python-oracle Thick mode. It can be called anywhere before the first - call to :func:`oracledb.connect()`, ``oracledb.Connection()``, or - ``oracledb.SessionPool()``. See :ref:`enablingthick` for more details. +- A new optional parameter ``params`` of type :ref:`ConnectParams ` + can be used to encapsulate connection properties. Similarly a new optional + parameter ``params`` of type :ref:`PoolParams ` can be used to + encapsulate pool creation properties. See :ref:`usingconnparams` for more + information. - The requirement to call :func:`~oracledb.init_oracle_client()` means that - Oracle Client library loading is not automatically deferred until the driver - is first used, such as when a connection is opened. The application must - explicitly manage this if deferral is required. +- The use of the class constructor method ``oracledb.Connection()`` to create + connections is no longer recommended for creating connections. Use + :func:`~oracledb.connect()` instead. - In python-oracledb, :func:`~oracledb.init_oracle_client()` can be called - multiple times in a Python process as long as the arguments are the same. +- The new method signature of :attr:`Connection.outputtypehandler` is + ``handler(cursor, metadata)``. The old signature ``handler(cursor, name, + default_type, length, precision, scale)`` was deprecated in python-oracledb + 1.4 but will still work and will be removed in a future version. - Note that on Linux and related operating systems, the - :func:`~oracledb.init_oracle_client()` parameter ``lib_dir`` should not be - passed. Instead, set the system library search path with ``ldconfig`` or - ``LD_LIBRARY_PATH`` prior to running Python. +- The attribute :attr:`Connection.maxBytesPerCharacter` is deprecated. This + will return a constant value of *4* since encodings are always UTF-8. -2. Review error handling improvements. See :ref:`errorhandling`. +- In python-oracledb, the name of the size argument of + :meth:`Cursor.fetchmany()` is ``size``. This change was done to comply with + `PEP 249 `_. The previous keyword + argument name, ``numRows`` is deprecated. Code to Aid the Upgrade to python-oracledb ------------------------------------------ @@ -714,8 +502,8 @@ Toggling between Drivers The sample `oracledb_upgrade.py `__ shows a way to toggle applications between cx_Oracle and the two python-oracledb modes. Note this script cannot -map some functionality such as :ref:`obsolete cx_Oracle ` -features or error message changes. +map some functionality such as obsolete cx_Oracle features or error message +changes. An example application showing this module in use is: @@ -773,165 +561,3 @@ similar to: Another method that can be used to check which driver is in use is to query the view V$SESSION_CONNECT_INFO, see :ref:`vsessconinfo`. - -.. _frameworks: - -Python Frameworks, SQL Generators, and ORMs -------------------------------------------- - -Python-oracledb's Thin and :ref:`Thick ` modes cover the feature -needs of frameworks that depend upon the Python Database API. - -For versions of SQLAlchemy, Django, Superset, other frameworks, -object-relational mappers (ORMs), and libraries that do not have native support -for python-oracledb, you can add code like this to use python-oracledb instead -of cx_Oracle: - -.. code-block:: python - - import sys - import oracledb - oracledb.version = "8.3.0" - sys.modules["cx_Oracle"] = oracledb - -.. note:: - - This must occur before any import of cx_Oracle by your code or the library. - -To use Thick mode, for example if you need to connect to Oracle Database 11gR2, -also add a call to :meth:`oracledb.init_oracle_client()` with the appropriate -parameters for your environment, see :ref:`enablingthick`. - -SQLAlchemy 2 and Django 5 have native support for python-oracledb so the above -code snippet is not needed in those versions. - -Connecting with SQLAlchemy -++++++++++++++++++++++++++ - -**SQLAlchemy 1.4** - -.. code-block:: python - - # Using python-oracledb in SQLAlchemy 1.4 - - import os - import getpass - import oracledb - from sqlalchemy import create_engine - from sqlalchemy import text - - import sys - oracledb.version = "8.3.0" - sys.modules["cx_Oracle"] = oracledb - - # Uncomment to use python-oracledb Thick mode - # Review the doc for the appropriate parameters - #oracledb.init_oracle_client() - - un = os.environ.get("PYTHON_USERNAME") - cs = os.environ.get("PYTHON_CONNECTSTRING") - pw = getpass.getpass(f'Enter password for {un}@{cs}: ') - - # Note the first argument is different for SQLAlchemy 1.4 and 2 - engine = create_engine('oracle://@', - connect_args={ - # Pass any python-oracledb connect() parameters - "user": un, - "password": pw, - "dsn": cs - } - ) - - with engine.connect() as connection: - print(connection.scalar(text( - """select unique client_driver - from v$session_connect_info - where sid = sys_context('userenv', 'sid')"""))) - - -Note that the ``create_engine()`` argument driver declaration uses -``oracle://`` for SQLAlchemy 1.4 and ``oracle+oracledb://`` for SQLAlchemy 2. - -The ``connect_args`` dictionary can use any appropriate -:meth:`oracledb.connect()` parameter. - -**SQLAlchemy 2** - -.. code-block:: python - - # Using python-oracledb in SQLAlchemy 2 - - import os - import getpass - import oracledb - from sqlalchemy import create_engine - from sqlalchemy import text - - # Uncomment to use python-oracledb Thick mode - # Review the doc for the appropriate parameters - #oracledb.init_oracle_client() - - un = os.environ.get("PYTHON_USERNAME") - cs = os.environ.get("PYTHON_CONNECTSTRING") - pw = getpass.getpass(f'Enter password for {un}@{cs}: ') - - # Note the first argument is different for SQLAlchemy 1.4 and 2 - engine = create_engine('oracle+oracledb://@', - connect_args={ - # Pass any python-oracledb connect() parameters - "user": un, - "password": pw, - "dsn": cs - } - ) - - with engine.connect() as connection: - print(connection.scalar(text( - """select unique client_driver - from v$session_connect_info - where sid = sys_context('userenv', 'sid')"""))) - - -Note that the ``create_engine()`` argument driver declaration uses -``oracle://`` for SQLAlchemy 1.4 and ``oracle+oracledb://`` for SQLAlchemy 2. - -The ``connect_args`` dictionary can use any appropriate -:meth:`oracledb.connect()` parameter. - -**SQLAlchemy Connection Pools** - -Most multi-user applications should use a :ref:`connection pool `. -The python-oracledb pool is preferred because of its high availability support. -For example: - -.. code-block:: python - - # Using python-oracledb in SQLAlchemy 2 - - import os, platform - import getpass - import oracledb - from sqlalchemy import create_engine - from sqlalchemy import text - from sqlalchemy.pool import NullPool - - # Uncomment to use python-oracledb Thick mode - # Review the doc for the appropriate parameters - #oracledb.init_oracle_client() - - un = os.environ.get("PYTHON_USERNAME") - cs = os.environ.get("PYTHON_CONNECTSTRING") - pw = getpass.getpass(f'Enter password for {un}@{cs}: ') - - pool = oracledb.create_pool(user=un, password=pw, dsn=cs, - min=4, max=4, increment=0) - engine = create_engine("oracle+oracledb://", creator=pool.acquire, poolclass=NullPool) - - with engine.connect() as connection: - print(connection.scalar(text("""select unique client_driver - from v$session_connect_info - where sid = sys_context('userenv', 'sid')"""))) - - -You can also use python-oracledb connection pooling with SQLAlchemy 1.4. Use -the appropriate name mapping code and first argument to ``create_engine()``. diff --git a/doc/src/user_guide/appendix_d.rst b/doc/src/user_guide/appendix_d.rst new file mode 100644 index 00000000..3487de72 --- /dev/null +++ b/doc/src/user_guide/appendix_d.rst @@ -0,0 +1,290 @@ +.. _frameworks: + +******************************************************* +Appendix D: Python Frameworks, SQL Generators, and ORMs +******************************************************* + +Python-oracledb's Thin and :ref:`Thick ` modes cover the feature +needs of frameworks that depend upon the Python Database API. + +Using python-oracledb with Data Frame Libraries +=============================================== + +Python-oracledb can fetch directly to data frames that expose an Apache Arrow +PyCapsule interface. This is an efficient way to work with data using Python +libraries such as `Apache PyArrow +`__, `Pandas +`__, `Polars `__, `NumPy +`__, `PyTorch `__, or to write files +in `Apache Parquet `__ format. + +See :ref:`dataframeformat` for more information. + +.. _flask: + +Connecting with Flask +===================== + +The Flask web application framework works well with python-oracledb, either +directly or by using a library such as :ref:`SQLAlchemy `. + +Examples using python-oracledb directly are available in `connection_pool.py +`__, `drcp_pool.py `__, and `session_callback.py +`__. + +.. _sqlalchemy: + +Connecting with SQLAlchemy +========================== + +`SQLAlchemy `__, and libraries such as `Pandas +`__ that internally use SQLAlchemy, can connect +easily in python-oracledb as shown in this section. + +Also, see `SQLAlchemy documentation on connecting `__ and +`SQLAlchemy general documentation about Oracle Database +`__. + +Connecting with SQLAlchemy 2 +---------------------------- + +SQLAlchemy 2 supports python-oracledb directly. + +Standalone Connections in SQLAlchemy +++++++++++++++++++++++++++++++++++++ + +An example of creating a standalone connection in SQLAlchemy 2 is: + +.. code-block:: python + + # Using python-oracledb in SQLAlchemy 2 + + import os + import getpass + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + + # Uncomment to use python-oracledb Thick mode + # Review the doc for the appropriate parameters + #oracledb.init_oracle_client() + + un = os.environ.get("PYTHON_USERNAME") + cs = os.environ.get("PYTHON_CONNECTSTRING") + pw = getpass.getpass(f'Enter password for {un}@{cs}: ') + + # Note the first argument is different for SQLAlchemy 1.4 and 2 + engine = create_engine('oracle+oracledb://@', + connect_args={ + # Pass any python-oracledb connect() parameters + "user": un, + "password": pw, + "dsn": cs + } + ) + + with engine.connect() as connection: + print(connection.scalar(text( + """select unique client_driver + from v$session_connect_info + where sid = sys_context('userenv', 'sid')"""))) + + +Note that the ``create_engine()`` argument driver declaration uses +``oracle+oracledb://`` for SQLAlchemy 2 but ``oracle://`` for SQLAlchemy 1.4. + +The ``connect_args`` dictionary can use any appropriate +:meth:`oracledb.connect()` parameter. + +.. _sqlalchemy2conpool: + +Pooled Connections in SQLAlchemy +++++++++++++++++++++++++++++++++ + +Most multi-user applications should use a :ref:`connection pool `. +The python-oracledb pool is preferred because of its high availability +support. Some single-user applications may also benfit from these availability +features. + +To use a python-oracledb connection pool in SQLAlchemy: + +.. code-block:: python + + # Using python-oracledb in SQLAlchemy 2 + + import os, platform + import getpass + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + from sqlalchemy.pool import NullPool + + # Uncomment to use python-oracledb Thick mode + # Review the doc for the appropriate parameters + #oracledb.init_oracle_client() + + un = os.environ.get("PYTHON_USERNAME") + cs = os.environ.get("PYTHON_CONNECTSTRING") + pw = getpass.getpass(f'Enter password for {un}@{cs}: ') + + pool = oracledb.create_pool(user=un, password=pw, dsn=cs, + min=4, max=4, increment=0) + engine = create_engine("oracle+oracledb://", creator=pool.acquire, poolclass=NullPool) + + with engine.connect() as connection: + print(connection.scalar(text("""select unique client_driver + from v$session_connect_info + where sid = sys_context('userenv', 'sid')"""))) + + +.. _sqlalchemy1: + +Connecting with SQLAlchemy 1.4 +------------------------------ + +SQLAlchemy 1.4 can use python-oracledb with the help of the module name mapping +code shown in :ref:`boilerplatemapping`. An example is: + +.. code-block:: python + + # Using python-oracledb in SQLAlchemy 1.4 + + import os + import getpass + import oracledb + from sqlalchemy import create_engine + from sqlalchemy import text + + import sys + oracledb.version = "8.3.0" + sys.modules["cx_Oracle"] = oracledb + + # Uncomment to use python-oracledb Thick mode + # Review the doc for the appropriate parameters + #oracledb.init_oracle_client() + + un = os.environ.get("PYTHON_USERNAME") + cs = os.environ.get("PYTHON_CONNECTSTRING") + pw = getpass.getpass(f'Enter password for {un}@{cs}: ') + + # Note the first argument is different for SQLAlchemy 1.4 and 2 + engine = create_engine('oracle://@', + connect_args={ + # Pass any python-oracledb connect() parameters + "user": un, + "password": pw, + "dsn": cs + } + ) + + with engine.connect() as connection: + print(connection.scalar(text( + """select unique client_driver + from v$session_connect_info + where sid = sys_context('userenv', 'sid')"""))) + + +Note that the ``create_engine()`` argument driver declaration uses +``oracle://`` for SQLAlchemy 1.4 but ``oracle+oracledb://`` for SQLAlchemy 2. + +The ``connect_args`` dictionary can use any appropriate +:meth:`oracledb.connect()` parameter. + +You can also use python-oracledb connection pooling with SQLAlchemy 1.4. This +is similar to :ref:`pooled connections in SQLAlchemy 2 ` +but use the appropriate :ref:`name mapping code ` and first +argument to ``create_engine()``. + +.. _django: + +Connecting with Django +====================== + +Django 5 supports python-oracledb directly. Earlier versions should use +:ref:`name mapping code `. + +See `Django 5.2 documentation for Oracle Database +`__. + +Standalone Connections +---------------------- + +To connect in Django 5, an example settings.py file is: + +.. code-block:: python + + DATABASES = { + "default": { + "ENGINE": "django.db.backends.oracle", + "NAME": "example.com:1521/orclpdb", + "USER": "hr", + "PASSWORD": "the-hr-password" + } + } + +Pooled Connections +------------------ + +Django 5.2 supports python-oracledb :ref:`connection pools `. +Most multi-user applications should use a connection pool. The python-oracledb +pool is preferred because of its high availability support. Some single-user +applications may also benfit from these availability features. + +.. _djangoconpool: + +To use a connection pool in Django 5.2, an example settings.py file is: + +.. code-block:: python + + DATABASES = { + "default": { + "ENGINE": "django.db.backends.oracle", + "NAME": "example.com:1521/orclpdb", + "USER": "hr", + "PASSWORD": "the-hr-password" + "OPTIONS": { + "pool": { + "min": 0, + "max": 4, + "increment": 1, + # Additional python-oracledb pool parameters can be added here + } + } + }, + } + +.. _boilerplatemapping: + +Older Versions of Python Frameworks, SQL Generators, and ORMs +============================================================= + +For versions of SQLAlchemy, Django, Superset, other frameworks, +object-relational mappers (ORMs), and libraries that support the obsolere +cx_Oracle driver but do not have native support for python-oracledb, you can +add code like this to use python-oracledb: + +.. code-block:: python + + import sys + import oracledb + oracledb.version = "8.3.0" + sys.modules["cx_Oracle"] = oracledb + +.. note:: + + This must occur before any import of cx_Oracle by your code or the library. + +See :ref:`sqlalchemy1` for an example. + +To use Thick mode, for example, if you need to connect to Oracle Database +11gR2, add a call to :meth:`oracledb.init_oracle_client()` with the appropriate +parameters for your environment, see :ref:`enablingthick`. + +SQLAlchemy 2 and Django 5 have native support for python-oracledb so the above +code snippet is not needed in those versions. Check your preferred library for +which Oracle Database driver it requires. diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 3d0061dd..17a2831a 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -1,8 +1,11 @@ .. _batchstmnt: -******************************************* -Executing Batch Statements and Bulk Loading -******************************************* +**************************************** +Batch Statement and Bulk Copy Operations +**************************************** + +Batch Statement Execution +========================= Inserting, updating or deleting multiple rows can be performed efficiently with :meth:`Cursor.executemany()`, making it easy to work with large data sets with @@ -36,7 +39,7 @@ The following tables will be used in the samples that follow: Batch Execution of SQL -====================== +---------------------- The following example inserts five rows into the table ``ParentTable``: @@ -156,7 +159,7 @@ With named bind variables, use named parameters when calling .. _batchplsql: Batch Execution of PL/SQL -========================= +------------------------- Using :meth:`~Cursor.executemany()` can improve performance when PL/SQL functions, procedures, or anonymous blocks need to be called multiple times. @@ -285,7 +288,7 @@ The equivalent code using named binds is: .. _batcherrors: Handling Data Errors -==================== +-------------------- Large datasets may contain some invalid data. When using batch execution as discussed above, the entire batch will be discarded if a single error is @@ -331,7 +334,7 @@ committing. Identifying Affected Rows -========================= +------------------------- When executing a DML statement using :meth:`~Cursor.execute()`, the number of rows affected can be examined by looking at the attribute @@ -361,7 +364,7 @@ is as follows:: DML RETURNING -============= +------------- DML statements like INSERT, UPDATE, DELETE, and MERGE can return values by using the DML RETURNING syntax. A bind variable can be created to accept this data. @@ -396,8 +399,18 @@ arraysize large enough to hold data for each row that is processed. Also, the call to :meth:`Cursor.setinputsizes()` binds this variable immediately so that it does not have to be passed in each row of data. +Bulk Copy Operations +==================== + +Bulk copy operations are facilitated with the use of +:meth:`Cursor.executemany()`, the use of appropriate SQL statements, and the +use of Python modules. + +Also, see :ref:`dataframeformat` and :ref:`Oracle Database Pipelining +`. + Loading CSV Files into Oracle Database -====================================== +-------------------------------------- The :meth:`Cursor.executemany()` method and Python's `csv module `__ can be used to @@ -461,15 +474,38 @@ Depending on data sizes and business requirements, database changes such as temporarily disabling redo logging on the table, or disabling indexes may also be beneficial. -See `load_csv.py `__ for a runnable example. +Creating CSV Files from Oracle Database +--------------------------------------- + +Python's `csv module `__ +can be used to efficiently create CSV (Comma Separated Values) files. For +example: + +.. code-block:: python + + cursor.arraysize = 1000 # tune this for large queries + print(f"Writing to {FILE_NAME}") + with open(FILE_NAME, "w") as f: + writer = csv.writer( + f, lineterminator="\n", quoting=csv.QUOTE_NONNUMERIC + ) + cursor.execute("""select rownum, sysdate, mycol from BigTab""") + writer.writerow(info.name for info in cursor.description) + writer.writerows(cursor) + + +See `samples/write_csv.py `__ for a runnable example. -Copying Data between Databases -============================== -The :meth:`Cursor.executemany()` function is useful for efficiently copying -data from one database to another: +Bulk Copying Data between Databases +----------------------------------- + +The :meth:`Cursor.executemany()` function is useful for copying data from one +database to another: .. code-block:: python @@ -498,7 +534,16 @@ Tune the :attr:`~Cursor.arraysize` value according to notes in :ref:`tuningfetch`. Use ``setinputsizes()`` according to `Predefining Memory Areas`_. -Note that it may be preferable to create a `database link -`__ -between the databases and use an INSERT INTO SELECT statement so that data is -not copied to, and back from, the Python process. +Note that when copying data to another table in the same database, it may be +preferable to use INSERT INTO SELECT or CREATE AS SELECT to avoid the overhead +of copying data to, and back from, the Python process. This also avoids any +data type changes. For example to create a complete copy of a table: + +.. code-block:: python + + cursor.execute("create table new_table as select * from old_table") + +Similarly, when copying to a different database, consider creating a `database +link `__ between the databases and using +INSERT INTO SELECT or CREATE AS SELECT. diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst new file mode 100644 index 00000000..95987f45 --- /dev/null +++ b/doc/src/user_guide/dataframes.rst @@ -0,0 +1,386 @@ +.. _dataframeformat: + +************************ +Working with Data Frames +************************ + +Python-oracledb can fetch directly to data frames that expose an Apache Arrow +PyCapsule Interface. This can reduce application memory requirements and allow +zero-copy data interchanges between Python data frame libraries. It is an +efficient way to work with data using Python libraries such as `Apache PyArrow +`__, `Pandas +`__, `Polars `__, `NumPy +`__, `PyTorch `__, or to write files +in `Apache Parquet `__ format. + +.. note:: + + The data frame support in python-oracledb 3.1 is a pre-release and may + change in a future version. + +**Fetching Data Frames** + +The method :meth:`Connection.fetch_df_all()` fetches all rows from a query. +The method :meth:`Connection.fetch_df_batches()` implements an iterator for +fetching batches of rows. The methods return :ref:`OracleDataFrame +` objects. + +For example, to fetch all rows from a query and print some information about +the results: + +.. code-block:: python + + sql = "select * from departments" + # Adjust arraysize to tune the query fetch performance + odf = connection.fetch_df_all(statement=sql, arraysize=100) + + print(odf.column_names()) + print(f"{odf.num_columns()} columns") + print(f"{odf.num_rows()} rows") + +With Oracle Database's standard DEPARTMENTS table, this would display:: + + ['DEPARTMENT_ID', 'DEPARTMENT_NAME', 'MANAGER_ID', 'LOCATION_ID'] + 4 columns + 27 rows + +**Inserting OracleDataFrames into Oracle Database** + +To insert data currently in :ref:`OracleDataFrame ` format +into Oracle Database requires it to be converted. For example, you could +convert it into a Pandas DataFrame for insert with the Pandas method +``to_sql()``. Or convert into a Python list via the PyArrow +``Table.to_pylist()`` method and then use standard python-oracledb +functionality to execute a SQL INSERT statement. + +.. _dftypemapping: + +Data Frame Type Mapping +----------------------- + +Internally, python-oracledb's :ref:`OracleDataFrame ` +support makes use of `Apache nanoarrow `__ +libraries to build data frames. + +The following data type mapping occurs from Oracle Database types to the Arrow +types used in OracleDataFrame objects. Querying any other data types from +Oracle Database will result in an exception. :ref:`Output type handlers +` cannot be used to map data types. + +.. list-table-with-summary:: + :header-rows: 1 + :class: wy-table-responsive + :widths: 1 1 + :width: 100% + :align: left + :summary: The first column is the Oracle Database type. The second column is the Arrow data type used in the OracleDataFrame object. + + * - Oracle Database Type + - Arrow Data Type + * - DB_TYPE_NUMBER + - DECIMAL128, INT64, or DOUBLE + * - DB_TYPE_CHAR + - STRING + * - DB_TYPE_VARCHAR + - STRING + * - DB_TYPE_BINARY_FLOAT + - FLOAT + * - DB_TYPE_BINARY_DOUBLE + - DOUBLE + * - DB_TYPE_BOOLEAN + - BOOLEAN + * - DB_TYPE_DATE + - TIMESTAMP + * - DB_TYPE_TIMESTAMP + - TIMESTAMP + * - DB_TYPE_TIMESTAMP_LTZ + - TIMESTAMP + * - DB_TYPE_TIMESTAMP_TZ + - TIMESTAMP + * - DB_TYPE_CLOB + - LARGE_STRING + * - DB_TYPE_BLOB + - LARGE_BINARY + * - DB_TYPE_RAW + - BINARY + +When converting Oracle Database NUMBERs: + +- If the column has been created without a precision and scale, then the Arrow + data type will be DOUBLE. + +- If :attr:`defaults.fetch_decimals` is set to *True*, then the Arrow data + type is DECIMAL128. + +- If the column has been created with a scale of *0*, and a precision value + that is less than or equal to *18*, then the Arrow data type is INT64. + +- In all other cases, the Arrow data type is DOUBLE. + +When converting Oracle Database CLOBs and BLOBs: + +- The LOBs must be no more than 1 GB in length. + +When converting Oracle Database DATEs and TIMESTAMPs: + +- For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of + "seconds". + +- For Oracle Database TIMESTAMP types, the Arrow TIMESTAMP time unit depends on + the Oracle type's fractional precision as shown in the table below: + + .. list-table-with-summary:: + :header-rows: 1 + :class: wy-table-responsive + :widths: 1 1 + :align: left + :summary: The first column is the Oracle Database TIMESTAMP-type fractional second precision. The second column is the resulting Arrow TIMESTAMP time unit. + + * - Oracle Database TIMESTAMP fractional second precision range + - Arrow TIMESTAMP time unit + * - 0 + - seconds + * - 1 - 3 + - milliseconds + * - 4 - 6 + - microconds + * - 7 - 9 + - nanoseconds + +Arrow TIMESTAMPs will not have timezone data. + +Converting OracleDataFrame to Other Data Frames +----------------------------------------------- + +To do more extensive operations, :ref:`OracleDataFrames ` +can be converted to your chosen library data frame, and then methods of that +library can be used. + +Some examples are shown in the following sections. Other libraries will have +similar methods. + +**Conversion Overview** + +To convert :ref:`OracleDataFrame ` to a `PyArrow Table +`__, use +`pyarrow.Table.from_arrays() +`__ +which leverages the Arrow PyCapsule interface. + +To convert :ref:`OracleDataFrame ` to a `Pandas DataFrame +`__, +use `pyarrow.Table.to_pandas() +`__. + +If you want to use a data frame library other than Pandas or PyArrow, use the +library's ``from_arrow()`` method to convert a PyArrow Table to the applicable +data frame, if your library supports this. For example, with `Polars +`__ use `polars.from_arrow() +`__. + +Lastly, if your data frame library does not support ``from_arrow()``, then use +``from_dataframe()`` if the library supports it. This can be slower, depending +on the implementation. + +The general recommendation is to use Apache Arrow as much as possible but if +there are no options, then use ``from_dataframe()``. + +Creating PyArrow Tables ++++++++++++++++++++++++ + +An example that creates and uses a `PyArrow Table +`__ is: + +.. code-block:: python + + # Get an OracleDataFrame + # Adjust arraysize to tune the query fetch performance + sql = "select id, name from SampleQueryTab order by id" + odf = connection.fetch_df_all(statement=sql, arraysize=100) + + # Create a PyArrow table + pyarrow_table = pyarrow.Table.from_arrays( + arrays=odf.column_arrays(), names=odf.column_names() + ) + + print("\nNumber of rows and columns:") + (r, c) = pyarrow_table.shape + print(f"{r} rows, {c} columns") + +This makes use of :meth:`OracleDataFrame.column_arrays()` which returns a list +of :ref:`OracleArrowArray Objects `. + +Internally `pyarrow.Table.from_arrays() `__ leverages the Apache +Arrow PyCapsule interface that :ref:`OracleDataFrame ` +exposes. + +See `samples/dataframe_pyarrow.py `__ for a runnable example. + +Creating Pandas DataFrames +++++++++++++++++++++++++++ + +An example that creates and uses a `Pandas DataFrame `__ is: + +.. code-block:: python + + import pandas + import pyarrow + + # Get an OracleDataFrame + # Adjust arraysize to tune the query fetch performance + sql = "select * from mytable where id = :1" + myid = 12345 # the bind variable value + odf = connection.fetch_df_all(statement=sql, parameters=[myid], arraysize=1000) + + # Get a Pandas DataFrame from the data. + df = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ).to_pandas() + + # Perform various Pandas operations on the DataFrame + print(df.T) # transform + print(df.tail(3)) # last three rows + +The `to_pandas() `__ method supports arguments like +``types_mapper=pandas.ArrowDtype`` and ``deduplicate_objects=False``, which may +be useful for some data sets. + +See `samples/dataframe_pandas.py `__ for a runnable example. + +Creating Polars DataFrames +++++++++++++++++++++++++++ + +An example that creates and uses a `Polars DataFrame +`__ is: + +.. code-block:: python + + import pyarrow + import polars + + # Get an OracleDataFrame + # Adjust arraysize to tune the query fetch performance + sql = "select id from SampleQueryTab order by id" + odf = connection.fetch_df_all(statement=sql, arraysize=100) + + # Convert to a Polars DataFrame + pyarrow_table = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ) + df = polars.from_arrow(pyarrow_table) + + # Perform various Polars operations on the DataFrame + r, c = df.shape + print(f"{r} rows, {c} columns") + print(p.sum()) + +See `samples/dataframe_polars.py `__ for a runnable example. + +Writing Apache Parquet Files +++++++++++++++++++++++++++++ + +To write output in `Apache Parquet `__ file +format, you can use data frames as an efficient intermediary. Use the +:meth:`Connection.fetch_df_batches()` iterator and convert to a `PyArrow Table +`__ that can +be written by the PyArrow library. + +.. code-block:: python + + import pyarrow + import pyarrow.parquet as pq + + FILE_NAME = "sample.parquet" + + # Tune the fetch batch size for your query + BATCH_SIZE = 10000 + + sql = "select * from mytable" + pqwriter = None + for odf in connection.fetch_df_batches(statement=sql, size=BATCH_SIZE): + + # Get a PyArrow table from the query results + pyarrow_table = pyarrow.Table.from_arrays( + arrays=odf.column_arrays(), names=odf.column_names() + ) + + if not pqwriter: + pqwriter = pq.ParquetWriter(FILE_NAME, pyarrow_table.schema) + + pqwriter.write_table(pyarrow_table) + + pqwriter.close() + +See `samples/dataframe_parquet_write.py `__ +for a runnable example. + +The DLPack Protocol ++++++++++++++++++++ + +The DataFrame format facilitates working with query results as +tensors. Conversion can be done using the standard `DLPack Protocol +`__ implemented by PyArrow. + +**Using NumPy Arrays** + +For example, to convert to `NumPy `__ ``ndarray`` format: + +.. code-block:: python + + import pyarrow + import numpy + + SQL = "select id from SampleQueryTab order by id" + + # Get an OracleDataFrame + # Adjust arraysize to tune the query fetch performance + odf = connection.fetch_df_all(statement=SQL, arraysize=100) + + # Convert to an ndarray via the Python DLPack specification + pyarrow_array = pyarrow.array(odf.get_column_by_name("ID")) + np = numpy.from_dlpack(pyarrow_array) + + # Perform various numpy operations on the ndarray + + print(numpy.sum(np)) + print(numpy.log10(np)) + + +See `samples/dataframe_numpy.py `__ for a runnable example. + +Using Torch ++++++++++++ + +An example of working with data as a `Torch tensor +`__ is: + +.. code-block:: python + + import pyarrow + import torch + + SQL = "select id from SampleQueryTab order by id" + + # Get an OracleDataFrame + # Adjust arraysize to tune the query fetch performance + odf = connection.fetch_df_all(statement=SQL, arraysize=100) + + # Convert to a Torch tensor via the Python DLPack specification + pyarrow_array = pyarrow.array(odf.get_column_by_name("ID")) + tt = torch.from_dlpack(pyarrow_array) + + # Perform various Torch operations on the tensor + + print(torch.sum(tt)) + print(torch.log10(tt)) + +See `samples/dataframe_torch.py `__ for a runnable example. diff --git a/doc/src/user_guide/ha.rst b/doc/src/user_guide/ha.rst index 42474b7c..e8ce25d8 100644 --- a/doc/src/user_guide/ha.rst +++ b/doc/src/user_guide/ha.rst @@ -1,8 +1,8 @@ .. _highavailability: -********************************************* -Using High Availability with python-oracledb -********************************************* +************************************** +High Availability with python-oracledb +************************************** Applications can use many features for high availability (HA) during planned and unplanned outages in order to: diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index bb30a61b..bc860224 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -102,7 +102,7 @@ More details and options are shown in the later sections: platform-specific instructions below or see :ref:`DPI-1047 `. Alternatively, remove the call to :meth:`~oracledb.init_oracle_client()` and use Thin mode. The features supported by Thin mode can be found in - :ref:`driverdiff`. + :ref:`featuresummary`. - On any operating system, if you set ``lib_dir`` to the library directory of a full database or full client installation (such as from running diff --git a/doc/src/user_guide/plsql_execution.rst b/doc/src/user_guide/plsql_execution.rst index 1129022d..918fc16f 100644 --- a/doc/src/user_guide/plsql_execution.rst +++ b/doc/src/user_guide/plsql_execution.rst @@ -4,14 +4,27 @@ Executing PL/SQL **************** -PL/SQL stored procedures, functions, and anonymous blocks can be called from -python-oracledb. +PL/SQL is a procedural language used for creating user-defined procedures, +functions, and anonymous blocks. PL/SQL program units are compiled and run +inside Oracle Database, letting them efficiently work on data. Procedures and +functions can be stored in the database, encapsulating business logic for reuse +in other applications. + +PL/SQL code can be stored in the database, and executed using python-oracledb. Examples in this chapter show single invocations using :meth:`Cursor.callproc()`, :meth:`Cursor.callfunc()`, or :meth:`Cursor.execute()`. Examples of repeated calls using :meth:`Cursor.executemany()` are shown in :ref:`batchplsql`. +**User-defined procedures in JavaScript** + +You may also be interested in creating user-defined procedures in JavaScript +instead of PL/SQL, see `Introduction to Oracle Database Multilingual Engine for +JavaScript `__. These procedures can be invoked in +python-oracledb the same way PL/SQL is. + .. _plsqlproc: PL/SQL Stored Procedures diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 4cf7582f..4ecc2ed4 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -131,7 +131,7 @@ Rows can be fetched in various ways. The fetch methods return data as tuples. To return results as dictionaries, see :ref:`rowfactories`. -- Data can also be fetched in Arrow data format, see :ref:`dataframeformat`. +- Data can also be fetched in data frame format, see :ref:`dataframeformat`. Closing Cursors --------------- @@ -735,381 +735,6 @@ Performance-sensitive applications should consider using scalar types instead of objects. If you do use objects, avoid calling :meth:`Connection.gettype()` unnecessarily, and avoid objects with large numbers of attributes. -.. _dataframeformat: - -Fetching Data Frames --------------------- - -Python-oracledb can fetch directly to data frames that expose an Apache Arrow -PyCapsule Interface. This can reduce application memory requirements and allow -zero-copy data interchanges between Python data frame libraries. It is an -efficient way to work with data using Python libraries such as `Apache PyArrow -`__, `Pandas -`__, `Polars `__, `NumPy -`__, `PyTorch `__, or to write files -in `Apache Parquet `__ format. - -.. note:: - - The data frame support in python-oracledb 3.1 is a pre-release and may - change in a future version. - -The method :meth:`Connection.fetch_df_all()` fetches all rows from a query. -The method :meth:`Connection.fetch_df_batches()` implements an iterator for -fetching batches of rows. The methods return :ref:`OracleDataFrame -` objects. - -For example, to fetch all rows from a query and print some information about -the results: - -.. code-block:: python - - sql = "select * from departments" - # Adjust arraysize to tune the query fetch performance - odf = connection.fetch_df_all(statement=sql, arraysize=100) - - print(odf.column_names()) - print(f"{odf.num_columns()} columns") - print(f"{odf.num_rows()} rows") - -With Oracle Database's standard DEPARTMENTS table, this would display:: - - ['DEPARTMENT_ID', 'DEPARTMENT_NAME', 'MANAGER_ID', 'LOCATION_ID'] - 4 columns - 27 rows - -**Summary of Converting OracleDataFrame to Other Data Frames** - -To do more extensive operations, :ref:`OracleDataFrames ` -can be converted to your chosen library data frame, and then methods of that -library can be used. This section has an overview of how best to do -conversions. Some examples are shown in subsequent sections. - -To convert :ref:`OracleDataFrame ` to a `PyArrow Table -`__, use -`pyarrow.Table.from_arrays() -`__ -which leverages the Arrow PyCapsule interface. - -To convert :ref:`OracleDataFrame ` to a `Pandas DataFrame -`__, -use `pyarrow.Table.to_pandas() -`__. - -If you want to use a data frame library other than Pandas or PyArrow, use the -library's ``from_arrow()`` method to convert a PyArrow Table to the applicable -data frame, if your library supports this. For example, with `Polars -`__ use `polars.from_arrow() -`__. - -Lastly, if your data frame library does not support ``from_arrow()``, then use -``from_dataframe()`` if the library supports it. This can be slower, depending -on the implementation. - -The general recommendation is to use Apache Arrow as much as possible but if -there are no options, then use ``from_dataframe()``. - -**Data Frame Type Mapping** - -Internally, python-oracledb's :ref:`OracleDataFrame ` -support makes use of `Apache nanoarrow `__ -libraries to build data frames. - -The following data type mapping occurs from Oracle Database types to the Arrow -types used in OracleDataFrame objects. Querying any other data types from -Oracle Database will result in an exception. :ref:`Output type handlers -` cannot be used to map data types. - -.. list-table-with-summary:: - :header-rows: 1 - :class: wy-table-responsive - :widths: 1 1 - :width: 100% - :align: left - :summary: The first column is the Oracle Database type. The second column is the Arrow data type used in the OracleDataFrame object. - - * - Oracle Database Type - - Arrow Data Type - * - DB_TYPE_NUMBER - - DECIMAL128, INT64, or DOUBLE - * - DB_TYPE_CHAR - - STRING - * - DB_TYPE_VARCHAR - - STRING - * - DB_TYPE_BINARY_FLOAT - - FLOAT - * - DB_TYPE_BINARY_DOUBLE - - DOUBLE - * - DB_TYPE_BOOLEAN - - BOOLEAN - * - DB_TYPE_DATE - - TIMESTAMP - * - DB_TYPE_TIMESTAMP - - TIMESTAMP - * - DB_TYPE_TIMESTAMP_LTZ - - TIMESTAMP - * - DB_TYPE_TIMESTAMP_TZ - - TIMESTAMP - * - DB_TYPE_CLOB - - LARGE_STRING - * - DB_TYPE_BLOB - - LARGE_BINARY - * - DB_TYPE_RAW - - BINARY - -When converting Oracle Database NUMBERs: - -- If the column has been created without a precision and scale, then the Arrow - data type will be DOUBLE. - -- If :attr:`defaults.fetch_decimals` is set to *True*, then the Arrow data - type is DECIMAL128. - -- If the column has been created with a scale of *0*, and a precision value - that is less than or equal to *18*, then the Arrow data type is INT64. - -- In all other cases, the Arrow data type is DOUBLE. - -When converting Oracle Database CLOBs and BLOBs: - -- The LOBs must be no more than 1 GB in length. - -When converting Oracle Database DATEs and TIMESTAMPs: - -- For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of - "seconds". - -- For Oracle Database TIMESTAMP types, the Arrow TIMESTAMP time unit depends on - the Oracle type's fractional precision as shown in the table below: - - .. list-table-with-summary:: - :header-rows: 1 - :class: wy-table-responsive - :widths: 1 1 - :align: left - :summary: The first column is the Oracle Database TIMESTAMP-type fractional second precision. The second column is the resulting Arrow TIMESTAMP time unit. - - * - Oracle Database TIMESTAMP fractional second precision range - - Arrow TIMESTAMP time unit - * - 0 - - seconds - * - 1 - 3 - - milliseconds - * - 4 - 6 - - microconds - * - 7 - 9 - - nanoseconds - -Arrow TIMESTAMPs will not have timezone data. - -**Inserting OracleDataFrames into Oracle Database** - -To insert data currently in :ref:`OracleDataFrame ` format -into Oracle Database requires it to be converted. For example, you could -convert it into a Pandas DataFrame for insert with the Pandas method -``to_sql()``. Or convert into a Python list via the PyArrow -``Table.to_pylist()`` method and then use standard python-oracledb -functionality to execute a SQL INSERT statement. - -Creating PyArrow Tables -+++++++++++++++++++++++ - -An example that creates and uses a `PyArrow Table -`__ is: - -.. code-block:: python - - # Get an OracleDataFrame - # Adjust arraysize to tune the query fetch performance - sql = "select id, name from SampleQueryTab order by id" - odf = connection.fetch_df_all(statement=sql, arraysize=100) - - # Create a PyArrow table - pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() - ) - - print("\nNumber of rows and columns:") - (r, c) = pyarrow_table.shape - print(f"{r} rows, {c} columns") - -This makes use of :meth:`OracleDataFrame.column_arrays()` which returns a list -of :ref:`OracleArrowArray Objects `. - -Internally `pyarrow.Table.from_arrays() `__ leverages the Apache -Arrow PyCapsule interface that :ref:`OracleDataFrame ` -exposes. - -See `samples/dataframe_pyarrow.py `__ for a runnable example. - -Creating Pandas DataFrames -++++++++++++++++++++++++++ - -An example that creates and uses a `Pandas DataFrame `__ is: - -.. code-block:: python - - import pandas - import pyarrow - - # Get an OracleDataFrame - # Adjust arraysize to tune the query fetch performance - sql = "select * from mytable where id = :1" - myid = 12345 # the bind variable value - odf = connection.fetch_df_all(statement=sql, parameters=[myid], arraysize=1000) - - # Get a Pandas DataFrame from the data. - df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() - - # Perform various Pandas operations on the DataFrame - print(df.T) # transform - print(df.tail(3)) # last three rows - -The `to_pandas() `__ method supports arguments like -``types_mapper=pandas.ArrowDtype`` and ``deduplicate_objects=False``, which may -be useful for some data sets. - -See `samples/dataframe_pandas.py `__ for a runnable example. - -Creating Polars DataFrames -++++++++++++++++++++++++++ - -An example that creates and uses a `Polars DataFrame -`__ is: - -.. code-block:: python - - import pyarrow - import polars - - # Get an OracleDataFrame - # Adjust arraysize to tune the query fetch performance - sql = "select id from SampleQueryTab order by id" - odf = connection.fetch_df_all(statement=sql, arraysize=100) - - # Convert to a Polars DataFrame - pyarrow_table = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ) - df = polars.from_arrow(pyarrow_table) - - # Perform various Polars operations on the DataFrame - r, c = df.shape - print(f"{r} rows, {c} columns") - print(p.sum()) - -See `samples/dataframe_polars.py `__ for a runnable example. - -Writing Apache Parquet Files -++++++++++++++++++++++++++++ - -To write output in `Apache Parquet `__ file -format, you can use data frames as an efficient intermediary. Use the -:meth:`Connection.fetch_df_batches()` iterator and convert to a `PyArrow Table -`__ that can -be written by the PyArrow library. - -.. code-block:: python - - import pyarrow - import pyarrow.parquet as pq - - FILE_NAME = "sample.parquet" - - # Tune the fetch batch size for your query - BATCH_SIZE = 10000 - - sql = "select * from mytable" - pqwriter = None - for odf in connection.fetch_df_batches(statement=sql, size=BATCH_SIZE): - - # Get a PyArrow table from the query results - pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() - ) - - if not pqwriter: - pqwriter = pq.ParquetWriter(FILE_NAME, pyarrow_table.schema) - - pqwriter.write_table(pyarrow_table) - - pqwriter.close() - -See `samples/dataframe_parquet_write.py `__ -for a runnable example. - -The DLPack Protocol -+++++++++++++++++++ - -The DataFrame format facilitates working with query results as -tensors. Conversion can be done using the standard `DLPack Protocol -`__ implemented by PyArrow. - -**Using NumPy Arrays** - -For example, to convert to `NumPy `__ ``ndarray`` format: - -.. code-block:: python - - import pyarrow - import numpy - - SQL = "select id from SampleQueryTab order by id" - - # Get an OracleDataFrame - # Adjust arraysize to tune the query fetch performance - odf = connection.fetch_df_all(statement=SQL, arraysize=100) - - # Convert to an ndarray via the Python DLPack specification - pyarrow_array = pyarrow.array(odf.get_column_by_name("ID")) - np = numpy.from_dlpack(pyarrow_array) - - # Perform various numpy operations on the ndarray - - print(numpy.sum(np)) - print(numpy.log10(np)) - - -See `samples/dataframe_numpy.py `__ for a runnable example. - -**Using Torch** - -An example of working with data as a `Torch tensor -`__ is: - -.. code-block:: python - - import pyarrow - import torch - - SQL = "select id from SampleQueryTab order by id" - - # Get an OracleDataFrame - # Adjust arraysize to tune the query fetch performance - odf = connection.fetch_df_all(statement=SQL, arraysize=100) - - # Convert to a Torch tensor via the Python DLPack specification - pyarrow_array = pyarrow.array(odf.get_column_by_name("ID")) - tt = torch.from_dlpack(pyarrow_array) - - # Perform various Torch operations on the tensor - - print(torch.sum(tt)) - print(torch.log10(tt)) - -See `samples/dataframe_torch.py `__ for a runnable example. - .. _rowlimit: Limiting Rows From c74575caa4921db17c0e41ea0fb0b52b2579b9a0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 21 May 2025 11:13:05 -0600 Subject: [PATCH 080/239] Use GitHub ARM Linux runner for builds (PR #496) and create source package independently in order to avoid package corruption (#495). --- .github/workflows/build.yaml | 43 +++++++++++++++++++++++++++--------- doc/src/release_notes.rst | 2 ++ 2 files changed, 35 insertions(+), 10 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 3628a3ca..a0db8524 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -8,15 +8,43 @@ on: jobs: + build_source_package: + name: Build source package + runs-on: ubuntu-latest + steps: + + - uses: actions/checkout@v4 + with: + submodules: true + + - uses: actions/setup-python@v5 + with: + cache: 'pip' + python-version: 3.13 + + - name: Ensure build package is present + run: python -m pip install setuptools + + - name: Build wheel for Python ${{ matrix.python-version }} + run: python setup.py sdist + + - name: Upload the artifact + uses: actions/upload-artifact@v4 + with: + name: python-oracledb-source + path: dist + build_linux_wheels: name: Build wheels for Linux - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} strategy: matrix: include: - - platform: amd64 + - os: ubuntu-24.04 + platform: amd64 container: quay.io/pypa/manylinux_2_28_x86_64 - - platform: arm64 + - os: ubuntu-24.04-arm + platform: arm64 container: quay.io/pypa/manylinux_2_28_aarch64 steps: @@ -25,11 +53,6 @@ jobs: with: submodules: true - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 - with: - platforms: ${{ matrix.platform }} - - name: Generate script run: | echo export PYO_COMPILE_ARGS=-g0 > build.sh @@ -57,7 +80,7 @@ jobs: uses: actions/upload-artifact@v4 with: name: Linux_${{ matrix.platform }}_wheels - path: dist + path: dist/*.whl build_non_linux_wheels: name: Build wheels for ${{ matrix.os }} Python-${{ matrix.python-version}}-${{ matrix.arch }} @@ -97,7 +120,7 @@ jobs: combine_artifacts: name: Combine artifacts into single artifact - needs: [build_linux_wheels, build_non_linux_wheels] + needs: [build_source_package, build_linux_wheels, build_non_linux_wheels] runs-on: ubuntu-latest steps: diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0f4487be..ecb506ec 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,6 +30,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai + (`PR 496 `__). #) Improved the test suite and documentation. From f7292352d4e28166653d34b0fe7c95a4c0bbcdb4 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:42:28 -0600 Subject: [PATCH 081/239] Add release note for second fix. --- doc/src/release_notes.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ecb506ec..ac7d43bf 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -32,6 +32,8 @@ Common Changes #) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai (`PR 496 `__). +#) Fix bug with GitHub build action merge artifacts step + (`issue 495 `__). #) Improved the test suite and documentation. From 2701ab214ef5eed1b14876a7f48bcdcf64d57392 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:43:15 -0600 Subject: [PATCH 082/239] Fixed a bug resulting in error "DPI-1046: parameter value cannot be a NULL pointer" when attributes "condition", "consumername", "correlation", "msgid", "transformation" and "exceptionq" are set on the dequeue options, enqueue options and message properties objects. --- doc/src/release_notes.rst | 7 ++++++ src/oracledb/impl/thick/queue.pyx | 16 ++++++------ tests/test_7800_aq_raw.py | 41 +++++++++++++++++++++++++++++++ 3 files changed, 56 insertions(+), 8 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ac7d43bf..a52f1d25 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -27,6 +27,13 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Fixed a bug resulting in error ``DPI-1046: parameter value cannot be a NULL + pointer`` when the attributes :attr:`DeqOptions.condition`, + :attr:`DeqOptions.consumername`, :attr:`DeqOptions.correlation`, + :attr:`DeqOptions.msgid`, :attr:`DeqOptions.transformation`, + :attr:`EnqOptions.transformation`, :attr:`MessageProperties.correlation`, + or :attr:`MessageProperties.exceptionq` are set to ``None``. + Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/queue.pyx b/src/oracledb/impl/thick/queue.pyx index 2a969558..dd34e431 100644 --- a/src/oracledb/impl/thick/queue.pyx +++ b/src/oracledb/impl/thick/queue.pyx @@ -263,7 +263,7 @@ cdef class ThickDeqOptionsImpl(BaseDeqOptionsImpl): Internal method for setting the condition. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiDeqOptions_setCondition(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -272,7 +272,7 @@ cdef class ThickDeqOptionsImpl(BaseDeqOptionsImpl): Internal method for setting the consumer name. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiDeqOptions_setConsumerName(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -282,7 +282,7 @@ cdef class ThickDeqOptionsImpl(BaseDeqOptionsImpl): Internal method for setting the correlation. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiDeqOptions_setCorrelation(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -305,7 +305,7 @@ cdef class ThickDeqOptionsImpl(BaseDeqOptionsImpl): Internal method for setting the message id. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiDeqOptions_setMsgId(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -321,7 +321,7 @@ cdef class ThickDeqOptionsImpl(BaseDeqOptionsImpl): Internal method for setting the transformation. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiDeqOptions_setTransformation(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -383,7 +383,7 @@ cdef class ThickEnqOptionsImpl(BaseEnqOptionsImpl): Internal method for setting the transformation. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiEnqOptions_setTransformation(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -543,7 +543,7 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): Internal method for setting the correlation. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiMsgProps_setCorrelation(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() @@ -559,7 +559,7 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): Internal method for setting the exception queue. """ cdef StringBuffer buf = StringBuffer() - buf.set_value(value) + buf.set_value(value or "") if dpiMsgProps_setExceptionQ(self._handle, buf.ptr, buf.length) < 0: _raise_from_odpi() diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index 04bbc572..2795a703 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -421,6 +421,47 @@ def test_7825(self): with self.assertRaisesFullCode("DPY-2062"): queue.enqone(props) + def test_7826(self): + "7826 - test providing null values on queue dequeue options" + queue = self.conn.queue("TEST_RAW_QUEUE") + str_value = "test - 7826" + bytes_value = str_value.encode() + for name in [ + "condition", + "consumername", + "correlation", + "msgid", + "transformation", + ]: + value = bytes_value if name == "msgid" else str_value + with self.subTest(name=name): + setattr(queue.deqoptions, name, value) + self.assertEqual(getattr(queue.deqoptions, name), value) + setattr(queue.deqoptions, name, None) + self.assertIsNone(getattr(queue.deqoptions, name)) + + def test_7827(self): + "7827 - test providing null values on queue enqueue options" + queue = self.conn.queue("TEST_RAW_QUEUE") + value = "test - 7827" + for name in ["transformation"]: + with self.subTest(name=name): + setattr(queue.enqoptions, name, value) + self.assertEqual(getattr(queue.enqoptions, name), value) + setattr(queue.enqoptions, name, None) + self.assertIsNone(getattr(queue.enqoptions, name)) + + def test_7828(self): + "7828 - test providing null correlation on message properties" + props = self.conn.msgproperties() + value = "test - 7828" + for name in ["correlation", "exceptionq"]: + with self.subTest(name=name): + setattr(props, name, value) + self.assertEqual(getattr(props, name), value) + setattr(props, name, None) + self.assertIsNone(getattr(props, name)) + if __name__ == "__main__": test_env.run_test_cases() From 41adca10154855bad33ec23f50400edf962dfb3f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:44:48 -0600 Subject: [PATCH 083/239] Refactor: perform calculation of whether a connection is healthy in one place. --- src/oracledb/impl/thin/connection.pyx | 3 +-- src/oracledb/impl/thin/protocol.pyx | 26 +++++++++++++------------- 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index f8859388..d57e18a9 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -269,8 +269,7 @@ cdef class BaseThinConnImpl(BaseConnImpl): return self._internal_name def get_is_healthy(self): - return self._protocol._transport is not None \ - and self._protocol._read_buf._pending_error_num == 0 + return self._protocol._get_is_healthy() def get_ltxid(self): return self._ltxid or b'' diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index faabb36c..01669912 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -88,6 +88,17 @@ cdef class BaseProtocol: self._write_buf._transport = None transport.disconnect() + cdef bint _get_is_healthy(self): + """ + Returns a boolean indicating if the connection is considered healthy. + """ + # if a read failed on the socket earlier, clear the socket + if self._read_buf._transport is None \ + or self._read_buf._transport._transport is None: + self._transport = None + return self._transport is not None \ + and self._read_buf._pending_error_num == 0 + cdef int _post_connect(self, BaseThinConnImpl conn_impl, AuthMessage auth_message) except -1: """" @@ -155,15 +166,9 @@ cdef class Protocol(BaseProtocol): with self._request_lock: - # if a read failed on the socket earlier, clear the socket - if self._read_buf._transport is None \ - or self._read_buf._transport._transport is None: - self._transport = None - # if the session was marked as needing to be closed, force it # closed immediately (unless it was already closed) - if self._read_buf._pending_error_num != 0 \ - and self._transport is not None: + if not self._get_is_healthy() and self._transport is not None: self._force_close() # rollback any open transaction and release the DRCP session, if @@ -529,14 +534,9 @@ cdef class BaseAsyncProtocol(BaseProtocol): async with self._request_lock: - # if a read failed on the socket earlier, clear the socket - if self._read_buf._transport is None: - self._transport = None - # if the session was marked as needing to be closed, force it # closed immediately (unless it was already closed) - if self._read_buf._pending_error_num != 0 \ - and self._transport is not None: + if not self._get_is_healthy() and self._transport is not None: self._force_close() # rollback any open transaction and release the DRCP session, if From 2012b3d2458af4d177c252e9d21d88bbec305127 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:45:10 -0600 Subject: [PATCH 084/239] Added Instance Principal authentication support when using OCI Cloud Native Authentication. --- doc/src/release_notes.rst | 2 + doc/src/user_guide/connection_handling.rst | 19 +++++-- src/oracledb/plugins/oci_tokens.py | 59 +++++++++++----------- 3 files changed, 47 insertions(+), 33 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index a52f1d25..942672d8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -37,6 +37,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added Instance Principal authentication support when using + :ref:`OCI Cloud Native Authentication `. #) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai (`PR 496 `__). #) Fix bug with GitHub build action merge artifacts step diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index f64bda28..df7e0fd1 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -4528,11 +4528,15 @@ the following table. - Description - Required or Optional * - ``auth_type`` - - The authentication type. The value should be the string "ConfigFileAuthentication" or "SimpleAuthentication". + - The authentication type. The value should be the string "ConfigFileAuthentication", "SimpleAuthentication", or "InstancePrincipal". - In Configuration File Authentication, the location of the configuration file containing the necessary information must be provided. By default, this file is located at */home/username/.oci/config*, unless a custom location is specified during OCI IAM setup. + With Configuration File Authentication, the location of a configuration file containing the necessary information must be provided. By default, this file is located at */home/username/.oci/config*, unless a custom location is specified during OCI IAM setup. - In Simple Authentication, the individual configuration parameters can be provided at runtime. + With Simple Authentication, the individual configuration parameters can be provided at runtime. + + With Instance Principal Authentication, OCI compute instances can be authorized to access services on Oracle Cloud such as Oracle Autonomous Database. Python-oracledb applications running on such a compute instance are automatically authenticated, eliminating the need to provide database user credentials. This authentication method will only work on compute instances where internal network endpoints are reachable. For more information on OCI compute instances, see `OCI Compute Instances `__, `Creating a Compute Instance `__, and `Calling Services from a Compute Instance `__. + + See `OCI SDK Authentication Methods `__ for more information. - Required * - ``user`` - The Oracle Cloud Identifier (OCID) of the user invoking the API. For example, *ocid1.user.oc1..*. @@ -4571,6 +4575,15 @@ the following table. This parameter can be specified when the value of the ``auth_type`` key is "ConfigFileAuthentication". - Optional + * - ``scope`` + - This parameter identifies all databases in the cloud tenancy of the authenticated user. The default value is *urn:oracle:db::id::**. + + A scope that authorizes access to all databases within a compartment has the format *urn:oracle:db::id::*, for example, urn:oracle:db::id::ocid1.compartment.oc1..xxxxxxxx. + + A scope that authorizes access to a single database within a compartment has the format *urn:oracle:db::id::::*, for example, urn:oracle:db::id::ocid1.compartment.oc1..xxxxxx::ocid1.autonomousdatabase.oc1.phx.xxxxxx. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication", "ConfigFileAuthentication", or "InstancePrincipal". + - Optional All keys and values other than ``auth_type`` are used by the `OCI SDK `__ API diff --git a/src/oracledb/plugins/oci_tokens.py b/src/oracledb/plugins/oci_tokens.py index f7b31220..50723de2 100644 --- a/src/oracledb/plugins/oci_tokens.py +++ b/src/oracledb/plugins/oci_tokens.py @@ -44,6 +44,8 @@ def generate_token(token_auth_config, refresh=False): return _config_file_based_authentication(token_auth_config) elif auth_type == "simpleauthentication": return _simple_authentication(token_auth_config) + elif auth_type == "instanceprincipal": + return _instance_principal_authentication(token_auth_config) else: raise ValueError( f"Unrecognized auth_type authentication method {user_auth_type}" @@ -86,6 +88,23 @@ def _get_key_pair(): return {"private_key": private_key_pem, "public_key": public_key_pem} +def _generate_access_token(client, token_auth_config): + """ + Token generation logic used by authentication methods. + """ + key_pair = _get_key_pair() + scope = token_auth_config.get("scope", "urn:oracle:db::id::*") + + details = oci.identity_data_plane.models.GenerateScopedAccessTokenDetails( + scope=scope, public_key=key_pair["public_key"] + ) + response = client.generate_scoped_access_token( + generate_scoped_access_token_details=details + ) + + return (response.data.token, key_pair["private_key"]) + + def _config_file_based_authentication(token_auth_config): """ Config file base authentication implementation: config parameters @@ -103,21 +122,7 @@ def _config_file_based_authentication(token_auth_config): # Initialize service client with default config file client = oci.identity_data_plane.DataplaneClient(config) - key_pair = _get_key_pair() - - response = client.generate_scoped_access_token( - generate_scoped_access_token_details=oci.identity_data_plane.models.GenerateScopedAccessTokenDetails( - scope="urn:oracle:db::id::*", public_key=key_pair["public_key"] - ) - ) - - # access_token is a tuple holding token and private key - access_token = ( - response.data.token, - key_pair["private_key"], - ) - - return access_token + return _generate_access_token(client, token_auth_config) def _simple_authentication(token_auth_config): @@ -134,24 +139,18 @@ def _simple_authentication(token_auth_config): } oci.config.validate_config(config) - # Initialize service client with given configuration client = oci.identity_data_plane.DataplaneClient(config) + return _generate_access_token(client, token_auth_config) - key_pair = _get_key_pair() - response = client.generate_scoped_access_token( - generate_scoped_access_token_details=oci.identity_data_plane.models.GenerateScopedAccessTokenDetails( - scope="urn:oracle:db::id::*", public_key=key_pair["public_key"] - ) - ) - - # access_token is a tuple holding token and private key - access_token = ( - response.data.token, - key_pair["private_key"], - ) - - return access_token +def _instance_principal_authentication(token_auth_config): + """ + Instance principal authentication: for compute instances + with dynamic group access. + """ + signer = oci.auth.signers.InstancePrincipalsSecurityTokenSigner() + client = oci.identity_data_plane.DataplaneClient(config={}, signer=signer) + return _generate_access_token(client, token_auth_config) def oci_token_hook(params: oracledb.ConnectParams): From 0a46264f269b88645d061f038fa942b444017255 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:45:48 -0600 Subject: [PATCH 085/239] Added support for recipient lists in Oracle Avanced Queueing in thin mode. --- doc/src/release_notes.rst | 2 ++ doc/src/user_guide/aq.rst | 4 +-- src/oracledb/aq.py | 3 -- src/oracledb/impl/thin/messages/aq_array.pyx | 6 +++- src/oracledb/impl/thin/messages/aq_base.pyx | 30 +++++++++++++++----- src/oracledb/impl/thin/messages/aq_enq.pyx | 10 +++++-- src/oracledb/impl/thin/packet.pyx | 27 +++++++++--------- tests/test_2700_aq_dbobject.py | 3 -- 8 files changed, 53 insertions(+), 32 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 942672d8..1f5550fb 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,8 @@ oracledb 3.2.0 (TBD) Thin Mode Changes +++++++++++++++++ +#) Added support for :ref:`recipient lists ` in Oracle Advanced + Queuing. #) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many times as needed. diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index c99fa7d2..123de2c5 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -51,8 +51,8 @@ types which are detailed below. **Usage Notes** For classic queues, the use of :data:`oracledb.ENQ_IMMEDIATE` with bulk -enqueuing, JMS payloads, and :ref:`Recipient Lists ` are only -supported in python-oracledb :ref:`Thick mode `. +enqueuing, and JMS payloads are only supported in python-oracledb +:ref:`Thick mode `. Transactional Event Queues do not support :attr:`EnqOptions.transformation`, :attr:`DeqOptions.transformation`, or :ref:`Recipient Lists `. diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index 2e290d35..d67e41e8 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -68,9 +68,6 @@ def _verify_message(self, message: "MessageProperties") -> None: else: if not isinstance(message.payload, (str, bytes)): errors._raise_err(errors.ERR_PAYLOAD_CANNOT_BE_ENQUEUED) - if self.connection.thin: - if message.recipients: - errors._raise_not_supported("specifying AQ message recipients") @property def connection(self) -> "connection_module.Connection": diff --git a/src/oracledb/impl/thin/messages/aq_array.pyx b/src/oracledb/impl/thin/messages/aq_array.pyx index 2dc511ad..ebad7f69 100644 --- a/src/oracledb/impl/thin/messages/aq_array.pyx +++ b/src/oracledb/impl/thin/messages/aq_array.pyx @@ -159,7 +159,11 @@ cdef class AqArrayMessage(AqBaseMessage): buf.write_uint8(TNS_MSG_TYPE_ROW_DATA) buf.write_ub4(flags) # aqi flags self._write_msg_props(buf, props_impl) - buf.write_ub4(0) # num recipients + if props_impl.recipients is None: + buf.write_ub4(0) # num recipients + else: + buf.write_ub4(3 * len(props_impl.recipients)) + self._write_recipients(buf, props_impl) buf.write_sb4(self.enq_options_impl.visibility) buf.write_ub4(0) # relative msg id buf.write_sb4(0) # seq deviation diff --git a/src/oracledb/impl/thin/messages/aq_base.pyx b/src/oracledb/impl/thin/messages/aq_base.pyx index af22f8ba..4a432e8e 100644 --- a/src/oracledb/impl/thin/messages/aq_base.pyx +++ b/src/oracledb/impl/thin/messages/aq_base.pyx @@ -179,13 +179,13 @@ cdef class AqBaseMessage(Message): self._write_value_with_length(buf, props_impl.enq_txn_id) buf.write_ub4(4) # number of extensions buf.write_uint8(0x0e) # unknown extra byte - buf.write_extension_values(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) - buf.write_extension_values(None, None, - TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) - buf.write_extension_values(None, b'\x00', - TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) - buf.write_extension_values(None, None, - TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) + buf.write_keyword_value_pair(None, None, TNS_AQ_EXT_KEYWORD_AGENT_NAME) + buf.write_keyword_value_pair(None, None, + TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS) + buf.write_keyword_value_pair(None, b'\x00', + TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL) + buf.write_keyword_value_pair(None, None, + TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID) buf.write_ub4(0) # user property buf.write_ub4(0) # cscn buf.write_ub4(0) # dscn @@ -206,6 +206,22 @@ cdef class AqBaseMessage(Message): else: buf.write_bytes(props_impl.payload_obj) + + cdef int _write_recipients(self, WriteBuffer buf, + ThinMsgPropsImpl props_impl) except -1: + """ + Write the recipient list of the message property object to the + buffer. + """ + cdef: + uint16_t index = 0 + str recipient + for recipient in props_impl.recipients: + buf.write_keyword_value_pair(recipient, None, index) + buf.write_keyword_value_pair(None, None, index + 1) + buf.write_keyword_value_pair(None, b'\x00', index + 2) + index += 3 + cdef int _write_value_with_length(self, WriteBuffer buf, object value) except -1: """ diff --git a/src/oracledb/impl/thin/messages/aq_enq.pyx b/src/oracledb/impl/thin/messages/aq_enq.pyx index ed226766..0f0b5163 100644 --- a/src/oracledb/impl/thin/messages/aq_enq.pyx +++ b/src/oracledb/impl/thin/messages/aq_enq.pyx @@ -64,8 +64,12 @@ cdef class AqEnqMessage(AqBaseMessage): buf.write_uint8(1) # queue name (pointer) buf.write_ub4(len(queue_name_bytes)) # queue name length self._write_msg_props(buf, self.props_impl) - buf.write_uint8(0) # recipients (pointer) - buf.write_ub4(0) # number of key/value pairs + if self.props_impl.recipients is None: + buf.write_uint8(0) # recipients (pointer) + buf.write_ub4(0) # number of key/value pairs + else: + buf.write_uint8(1) # recipients (pointer) + buf.write_ub4(3 * len(self.props_impl.recipients)) buf.write_ub4(self.enq_options_impl.visibility) buf.write_uint8(0) # relative message id buf.write_ub4(0) # relative message length @@ -115,5 +119,7 @@ cdef class AqEnqMessage(AqBaseMessage): buf.write_uint8(0) # JSON payload (pointer) buf.write_bytes_with_length(queue_name_bytes) + if self.props_impl.recipients is not None: + self._write_recipients(buf, self.props_impl) buf.write_bytes(self.queue_impl.payload_toid) self._write_payload(buf, self.props_impl) diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index 4a2e907d..76bcd8e9 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -895,24 +895,23 @@ cdef class WriteBuffer(Buffer): self.write_ub4(obj_impl.flags) # flags self.write_bytes_with_length(packed_data) - cdef int write_extension_values(self, str txt_value, bytes bytes_value, - uint16_t keyword) except -1: + cdef int write_keyword_value_pair(self, str text_value, bytes binary_value, + uint16_t keyword) except -1: """ - Writes extension's text value, binary value and keyword entry to the - buffer. + Writes a keyword/value pair (text and binary values) to the buffer. """ - cdef bytes txt_value_bytes - if txt_value is None: - self.write_uint8(0) + cdef bytes text_value_bytes + if text_value is None: + self.write_ub4(0) else: - txt_value_bytes = txt_value.encode() - self.write_ub4(len(txt_value_bytes)) - self.write_bytes_with_length(txt_value_bytes) - if bytes_value is None: - self.write_uint8(0) + text_value_bytes = text_value.encode() + self.write_ub4(len(text_value_bytes)) + self.write_bytes_with_length(text_value_bytes) + if binary_value is None: + self.write_ub4(0) else: - self.write_ub4(len(bytes_value)) - self.write_bytes_with_length(bytes_value) + self.write_ub4(len(binary_value)) + self.write_bytes_with_length(binary_value) self.write_ub2(keyword) cdef int write_lob_with_length(self, BaseThinLobImpl lob_impl) except -1: diff --git a/tests/test_2700_aq_dbobject.py b/tests/test_2700_aq_dbobject.py index de47fdd5..d6400d51 100644 --- a/tests/test_2700_aq_dbobject.py +++ b/tests/test_2700_aq_dbobject.py @@ -410,9 +410,6 @@ def test_2718(self): props = queue.deqone() self.assertEqual(props.msgid, actual_msgid) - @unittest.skipIf( - test_env.get_is_thin(), "Thin mode doesn't support recipient list yet" - ) def test_2719(self): "2719 - verify use of recipients property" books_type = self.conn.gettype(self.book_type_name) From 4e4a3da2c97a524dfba638874e1a61058725d0e2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 28 May 2025 09:46:21 -0600 Subject: [PATCH 086/239] Fixed bug resulting in explicit request boundaries to aid planned database maintenance not being sent when using connection pools with asyncio; pooled connections that are no longer needed are now closed normally if possible instead of simply having the socket disconnected (#393). --- doc/src/release_notes.rst | 5 + src/oracledb/base_impl.pxd | 1 + src/oracledb/connection.py | 45 +++- src/oracledb/impl/base/connect_params.pyx | 11 + src/oracledb/impl/base/pool.pyx | 3 + src/oracledb/impl/thick/pool.pyx | 6 + src/oracledb/impl/thin/connection.pyx | 40 +-- src/oracledb/impl/thin/messages/auth.pyx | 2 +- src/oracledb/impl/thin/messages/base.pyx | 2 +- src/oracledb/impl/thin/pool.pyx | 148 +++++------ src/oracledb/impl/thin/protocol.pyx | 291 +++++++++++++--------- src/oracledb/pool.py | 7 +- utils/templates/connection.py | 45 +++- utils/templates/pool.py | 7 +- 14 files changed, 374 insertions(+), 239 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 1f5550fb..afb8d992 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -25,6 +25,11 @@ Thin Mode Changes #) Fixed bug when connecting with asyncio using the parameter ``https_proxy``. #) Fixed regression when connecting where only the host specified by the ``https_proxy`` parameter can successfully perform name resolution. +#) Fixed bug resulting in explicit request boundaries to aid planned database + maintenance not being sent when using connection pools with asyncio. +#) Pooled connections that are no longer needed are now closed normally if + possible instead of simply having the socket disconnected + (`issue 393 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 44daa751..31d3fcc5 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -586,6 +586,7 @@ cdef class ConnectParamsImpl: cdef str _get_token(self) cdef object _get_public_instance(self) cdef object _get_token_expires(self, str token) + cdef bint _get_uses_drcp(self) cdef str _get_wallet_password(self) cdef int _parse_connect_string(self, str connect_string) except -1 cdef int _set_access_token(self, object val, int error_num) except -1 diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 56424ab8..01c31edb 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -610,6 +610,7 @@ def __init__( """ super().__init__() + self._pool = pool # determine if thin mode is being used with driver_mode.get_manager() as mode_mgr: @@ -664,8 +665,7 @@ def __init__( def __del__(self): if self._impl is not None: - self._impl.close(in_del=True) - self._impl = None + self._close(in_del=True) def __enter__(self): self._verify_connected() @@ -673,8 +673,21 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, exc_tb): if self._impl is not None: - self._impl.close(in_del=True) - self._impl = None + self._close() + + def _close(self, in_del=False): + """ + Closes the connection and makes it unusable for further operations. An + Error exception will be raised if any operation is attempted with this + connection after this method completes successfully. + """ + if self._pool is not None: + pool_impl = self._pool._impl + if pool_impl is not None: + pool_impl.return_connection(self._impl, in_del) + else: + self._impl.close(in_del) + self._impl = None def _create_queue(self, impl): """ @@ -743,8 +756,7 @@ def close(self) -> None: connection after this method completes successfully. """ self._verify_connected() - self._impl.close() - self._impl = None + self._close() def commit(self) -> None: """ @@ -1574,6 +1586,7 @@ def __init__( directly but only indirectly through async_connect(). """ super().__init__() + self._pool = pool self._connect_coroutine = self._connect(dsn, pool, params, kwargs) def __await__(self): @@ -1590,8 +1603,21 @@ async def __aenter__(self): async def __aexit__(self, *exc_info): if self._impl is not None: - await self._impl.close() - self._impl = None + await self._close() + + async def _close(self, in_del=False): + """ + Closes the connection and makes it unusable for further operations. An + Error exception will be raised if any operation is attempted with this + connection after this method completes successfully. + """ + if self._pool is not None: + pool_impl = self._pool._impl + if pool_impl is not None: + await pool_impl.return_connection(self._impl, in_del) + else: + await self._impl.close(in_del) + self._impl = None async def _connect(self, dsn, pool, params, kwargs): """ @@ -1718,8 +1744,7 @@ async def close(self) -> None: Closes the connection. """ self._verify_connected() - await self._impl.close() - self._impl = None + await self._close() async def commit(self) -> None: """ diff --git a/src/oracledb/impl/base/connect_params.pyx b/src/oracledb/impl/base/connect_params.pyx index 1d0a72ce..1ad6b32f 100644 --- a/src/oracledb/impl/base/connect_params.pyx +++ b/src/oracledb/impl/base/connect_params.pyx @@ -294,6 +294,17 @@ cdef class ConnectParamsImpl: header = json.loads(base64.b64decode(header_seg)) return datetime.datetime.utcfromtimestamp(header["exp"]) + cdef bint _get_uses_drcp(self): + """ + Returns a boolean indicating if any of the descriptions associated with + the parameters make use of DRCP. + """ + cdef Description description + for description in self.description_list.children: + if description.server_type == "pooled": + return True + return False + cdef str _get_wallet_password(self): """ Returns the wallet password, after removing the obfuscation. diff --git a/src/oracledb/impl/base/pool.pyx b/src/oracledb/impl/base/pool.pyx index 0ead6d92..702d01b1 100644 --- a/src/oracledb/impl/base/pool.pyx +++ b/src/oracledb/impl/base/pool.pyx @@ -89,6 +89,9 @@ cdef class BasePoolImpl: def reconfigure(self, uint32_t min, uint32_t max, uint32_t increment): errors._raise_not_supported("reconfiguring a pool") + def return_connection(self, BaseConnImpl conn_impl, in_del=False): + errors._raise_not_supported("returning a connection to a pool") + def set_getmode(self, uint8_t value): errors._raise_not_supported("setting the 'get' mode of a pool") diff --git a/src/oracledb/impl/thick/pool.pyx b/src/oracledb/impl/thick/pool.pyx index 9fbd87a5..b068b180 100644 --- a/src/oracledb/impl/thick/pool.pyx +++ b/src/oracledb/impl/thick/pool.pyx @@ -320,6 +320,12 @@ cdef class ThickPoolImpl(BasePoolImpl): self.max = max self.increment = increment + def return_connection(self, ThickConnImpl conn_impl, bint in_del=False): + """ + Internal method for returning a connection to the pool. + """ + conn_impl.close(in_del) + def set_getmode(self, uint8_t value): """ Internal method for setting the method by which connections are diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index d57e18a9..ad3ce333 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -46,7 +46,6 @@ cdef class BaseThinConnImpl(BaseConnImpl): bint _client_identifier_modified str _module bint _module_modified - BaseThinPoolImpl _pool bytes _ltxid str _current_schema bint _current_schema_modified @@ -70,6 +69,7 @@ cdef class BaseThinConnImpl(BaseConnImpl): int _dbobject_type_cache_num bytes _combo_key str _connection_id + bint _is_pooled bint _is_pool_extra bytes _transaction_context uint8_t pipeline_mode @@ -174,11 +174,6 @@ cdef class BaseThinConnImpl(BaseConnImpl): message.context = self._transaction_context return message - cdef int _force_close(self) except -1: - self._pool = None - self._clear_dbobject_type_cache() - self._protocol._force_close() - cdef Statement _get_statement(self, str sql = None, bint cache_statement = False): """ @@ -198,11 +193,8 @@ cdef class BaseThinConnImpl(BaseConnImpl): self._drcp_enabled = description.server_type == "pooled" if self._cclass is None: self._cclass = description.cclass - if self._cclass is None and self._pool is not None \ - and self._drcp_enabled: - gen_uuid = uuid.uuid4() - self._cclass = f"DPY:{base64.b64encode(gen_uuid.bytes).decode()}" - params._default_description.cclass = self._cclass + if self._cclass is None: + self._cclass = params._default_description.cclass cdef int _post_connect_phase_two(self, ConnectParamsImpl params) except -1: """ @@ -350,6 +342,13 @@ cdef class ThinConnImpl(BaseThinConnImpl): BaseThinConnImpl.__init__(self, dsn, params) self._protocol = Protocol() + cdef int _close(self): + """ + Internal method for closing the connection. + """ + cdef Protocol protocol = self._protocol + protocol._close(self) + cdef int _connect_with_address(self, Address address, Description description, ConnectParamsImpl params, @@ -441,9 +440,12 @@ cdef class ThinConnImpl(BaseThinConnImpl): protocol._process_single_message(message) def close(self, bint in_del=False): + """ + Internal method for closing the connection to the database. + """ cdef Protocol protocol = self._protocol try: - protocol._close(self) + protocol.close(self, in_del) except (ssl.SSLError, exceptions.DatabaseError): pass @@ -455,17 +457,17 @@ cdef class ThinConnImpl(BaseThinConnImpl): protocol._process_single_message(message) def connect(self, ConnectParamsImpl params): - # specify that binding a string to a LOB value is possible in thin - # mode without the use of asyncio (will be removed in a future release) - self._allow_bind_str_to_lob = True - + cdef Protocol protocol = self._protocol try: self._pre_connect(params) self._connect_with_params(params) self._post_connect_phase_two(params) except: - self._force_close() + protocol._disconnect() raise + # specify that binding a string to a LOB value is possible in thin + # mode without the use of asyncio (will be removed in a future release) + self._allow_bind_str_to_lob = True def create_queue_impl(self): return ThinQueueImpl.__new__(ThinQueueImpl) @@ -954,7 +956,7 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): """ cdef BaseAsyncProtocol protocol = self._protocol try: - await protocol._close(self) + await protocol.close(self, in_del) except (ssl.SSLError, exceptions.DatabaseError): pass @@ -979,7 +981,7 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): await self._connect_with_params(params) self._post_connect_phase_two(params) except: - self._force_close() + protocol._disconnect() raise def create_queue_impl(self): diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx index 0c957c61..cf48925f 100644 --- a/src/oracledb/impl/thin/messages/auth.pyx +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -271,7 +271,7 @@ cdef class AuthMessage(Message): # to a pool if description.purity == PURITY_DEFAULT \ and self.conn_impl._drcp_enabled: - if self.conn_impl._pool is None: + if self.conn_impl._is_pooled: self.purity = PURITY_NEW else: self.purity = PURITY_SELF diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 83c05075..5853b210 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -72,7 +72,7 @@ cdef class Message: code=self.error_info.num, offset=self.error_info.pos) if error.is_session_dead: - self.conn_impl._protocol._force_close() + self.conn_impl._protocol._disconnect() raise error.exc_type(error) cdef int _initialize(self, BaseThinConnImpl conn_impl) except -1: diff --git a/src/oracledb/impl/thin/pool.pyx b/src/oracledb/impl/thin/pool.pyx index beee9c53..4377540d 100644 --- a/src/oracledb/impl/thin/pool.pyx +++ b/src/oracledb/impl/thin/pool.pyx @@ -79,6 +79,10 @@ cdef class BaseThinPoolImpl(BasePoolImpl): self._requests = [] self._num_to_create = self.min self._auth_mode = AUTH_MODE_DEFAULT + if params._default_description.cclass is None \ + and params._get_uses_drcp(): + params._default_description.cclass = \ + f"DPY:{base64.b64encode(uuid.uuid4().bytes).decode()}" self._open = True cdef int _add_request(self, PooledConnRequest request) except -1: @@ -102,30 +106,36 @@ cdef class BaseThinPoolImpl(BasePoolImpl): and self._open_count > self.min: self._start_timeout_task() - cdef int _close_helper(self, bint force) except -1: + cdef int _close_all_connections(self) except -1: """ - Helper function that closes all of the connections in the pool. + Closes all connections in the pool and marks the pool as closed. The + background task is notified to perform the work of closing the + connections, if applicable. """ cdef BaseThinConnImpl conn_impl - - # if force parameter is not True and busy connections exist in the - # pool or there are outstanding requests, raise an exception - if not force and (self.get_busy_count() > 0 or self._requests): - errors._raise_err(errors.ERR_POOL_HAS_BUSY_CONNECTIONS) - - # close all connections in the pool; this is done by simply adding - # to the list of connections that require closing and then notifying - # the background task to perform the work self._open = False for lst in (self._free_used_conn_impls, self._free_new_conn_impls, self._busy_conn_impls): self._conn_impls_to_drop.extend(lst) for conn_impl in lst: - conn_impl._pool = None + conn_impl._is_pooled = False lst.clear() self._notify_bg_task() + cdef int _close_helper(self, bint force) except -1: + """ + Helper function that closes all of the connections in the pool. + """ + + # if force parameter is not True and busy connections exist in the + # pool or there are outstanding requests, raise an exception + if not force and (self.get_busy_count() > 0 or self._requests): + errors._raise_err(errors.ERR_POOL_HAS_BUSY_CONNECTIONS) + + # close all connections in the pool + self._close_all_connections() + cdef PooledConnRequest _create_request(self, ConnectParamsImpl params): """ Returns a poooled connection request suitable for establishing a @@ -150,24 +160,12 @@ cdef class BaseThinPoolImpl(BasePoolImpl): Helper method which adds a connection to the list of connections to be closed and notifies the background task. """ - conn_impl._pool = None + conn_impl._is_pooled = False if conn_impl._protocol._transport is not None: self._conn_impls_to_drop.append(conn_impl) self._notify_bg_task() self._ensure_min_connections() - cdef int _drop_conn_impls_helper(self, list conn_impls_to_drop) except -1: - """ - Helper method which drops the requested list of connections. Exceptions - that take place while attempting to close the connection are ignored. - """ - cdef BaseThinConnImpl conn_impl - for conn_impl in conn_impls_to_drop: - try: - conn_impl._force_close() - except: - pass - cdef int _ensure_min_connections(self) except -1: """ Ensure that the minimum number of connections in the pool is @@ -214,7 +212,7 @@ cdef class BaseThinPoolImpl(BasePoolImpl): if conn_impl is None: self._num_to_create = 0 elif not self._open: - conn_impl._force_close() + conn_impl._protocol._disconnect() else: self._open_count += 1 if self._num_to_create > 0: @@ -274,7 +272,7 @@ cdef class BaseThinPoolImpl(BasePoolImpl): conn_impl._cclass = params._default_description.cclass else: conn_impl._cclass = self.connect_params._default_description.cclass - conn_impl._pool = self + conn_impl._is_pooled = True conn_impl._time_created = time.monotonic() conn_impl._time_returned = conn_impl._time_created @@ -302,15 +300,9 @@ cdef class BaseThinPoolImpl(BasePoolImpl): """ cdef: bint is_open = conn_impl._protocol._transport is not None - BaseThinDbObjectTypeCache type_cache PooledConnRequest request double tstamp - int cache_num self._busy_conn_impls.remove(conn_impl) - if conn_impl._dbobject_type_cache_num > 0: - cache_num = conn_impl._dbobject_type_cache_num - type_cache = get_dbobject_type_cache(cache_num) - type_cache._clear_cursors() if not is_open: self._open_count -= 1 self._ensure_min_connections() @@ -324,7 +316,6 @@ cdef class BaseThinPoolImpl(BasePoolImpl): self._drop_conn_impl(conn_impl) is_open = False if is_open: - conn_impl.warning = None conn_impl._time_returned = time.monotonic() if self._max_lifetime_session != 0: tstamp = conn_impl._time_created + self._max_lifetime_session @@ -351,20 +342,12 @@ cdef class BaseThinPoolImpl(BasePoolImpl): cdef int _shutdown(self) except -1: """ Called when the main interpreter has completed and only shutdown code - is being executed. All connections in the pool are marked as non-pooled - and the pool itself terminated. + is being executed. """ cdef BaseThinConnImpl conn_impl with self._condition: - self._open = False - for lst in (self._free_used_conn_impls, - self._free_new_conn_impls, - self._busy_conn_impls): - for conn_impl in lst: - conn_impl._pool = None - lst.clear() self._requests.clear() - self._notify_bg_task() + self._close_all_connections() self._bg_task.join() cdef int _start_timeout_task(self) except -1: @@ -440,6 +423,19 @@ cdef class BaseThinPoolImpl(BasePoolImpl): return self._wait_timeout return 0 + def return_connection(self, BaseThinConnImpl conn_impl, bint in_del=False): + """ + Internal method for returning a connection to the pool. + """ + cdef Protocol protocol = conn_impl._protocol + with self._condition: + try: + protocol._end_request(conn_impl) + except: + if not in_del: + raise + self._return_connection_helper(conn_impl) + def set_getmode(self, uint32_t value): """ Internal method for setting the method by which connections are @@ -507,8 +503,7 @@ cdef class ThinPoolImpl(BaseThinPoolImpl): """ cdef: PooledConnRequest request = None - BaseThinConnImpl conn_impl - list conn_impls_to_drop + ThinConnImpl conn_impl uint32_t num_to_create # add to the list of pools that require closing @@ -542,11 +537,13 @@ cdef class ThinPoolImpl(BaseThinPoolImpl): # check to see if there are any connections to drop with self._condition: - conn_impls_to_drop = self._conn_impls_to_drop - self._conn_impls_to_drop = [] - if conn_impls_to_drop: - self._drop_conn_impls_helper(conn_impls_to_drop) - continue + if self._conn_impls_to_drop: + conn_impl = self._conn_impls_to_drop.pop() + try: + conn_impl._close() + except: + pass + continue # otherwise, nothing to do yet, wait for notifications! with self._bg_task_condition: @@ -590,24 +587,17 @@ cdef class ThinPoolImpl(BaseThinPoolImpl): request.conn_impl.ping() request.conn_impl.set_call_timeout(0) except exceptions.Error: - request.conn_impl._force_close() + request.conn_impl._protocol.disconnect() request.conn_impl = None else: conn_impl = self._create_conn_impl(request.params) if request.conn_impl is not None: - request.conn_impl._force_close() + self._drop_conn_impl(request.conn_impl) request.conn_impl = conn_impl request.conn_impl._is_pool_extra = request.is_extra except Exception as e: request.exception = e - cdef int _return_connection(self, BaseThinConnImpl conn_impl) except -1: - """ - Returns the connection to the pool. - """ - with self._condition: - self._return_connection_helper(conn_impl) - cdef int _start_timeout_task(self) except -1: """ Starts the task for checking timeouts. The timeout value is increased @@ -737,11 +727,13 @@ cdef class AsyncThinPoolImpl(BaseThinPoolImpl): # check to see if there are any connections to drop async with self._condition: - conn_impls_to_drop = self._conn_impls_to_drop - self._conn_impls_to_drop = [] - if conn_impls_to_drop: - self._drop_conn_impls_helper(conn_impls_to_drop) - continue + if self._conn_impls_to_drop: + conn_impl = self._conn_impls_to_drop.pop() + try: + await conn_impl._protocol._close(conn_impl) + except: + pass + continue # otherwise, nothing to do yet, wait for notifications! async with self._bg_task_condition: @@ -785,24 +777,17 @@ cdef class AsyncThinPoolImpl(BaseThinPoolImpl): await request.conn_impl.ping() request.conn_impl.set_call_timeout(0) except exceptions.Error: - request.conn_impl._force_close() + request.conn_impl._protocol._disconnect() request.conn_impl = None else: conn_impl = await self._create_conn_impl(request.params) if request.conn_impl is not None: - request.conn_impl._force_close() + self._drop_conn_impl(request.conn_impl) request.conn_impl = conn_impl request.conn_impl._is_pool_extra = request.is_extra except Exception as e: request.exception = e - async def _return_connection(self, BaseThinConnImpl conn_impl): - """ - Returns the connection to the pool. - """ - async with self._condition: - self._return_connection_helper(conn_impl) - cdef int _start_timeout_task(self) except -1: """ Starts the task for checking timeouts. The timeout value is increased @@ -857,6 +842,21 @@ cdef class AsyncThinPoolImpl(BaseThinPoolImpl): self._drop_conn_impl(conn_impl) self._condition.notify() + async def return_connection(self, AsyncThinConnImpl conn_impl, + bint in_del=False): + """ + Internal method for returning a connection to the pool. + """ + cdef BaseAsyncProtocol protocol + async with self._condition: + try: + protocol = conn_impl._protocol + await protocol._end_request(conn_impl) + except: + if not in_del: + raise + self._return_connection_helper(conn_impl) + @cython.freelist(20) cdef class PooledConnRequest: diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index 01669912..ceefcd6b 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -68,18 +68,18 @@ cdef class BaseProtocol: buf = WriteBuffer(self._transport, self._caps) self._send_marker(buf, TNS_MARKER_TYPE_INTERRUPT) - cdef int _final_close(self, WriteBuffer buf) except -1: + cdef int _check_is_healthy(self) except -1: """ - Send the final close packet to the server and close the socket. + Checks to see if the connection is healthy. If a read failed on the + transport earlier, the transport is marked closed. """ - buf.start_request(TNS_PACKET_TYPE_DATA, 0, TNS_DATA_FLAGS_EOF) - buf.end_request() - self._force_close() + if self._read_buf._transport is None \ + or self._read_buf._transport._transport is None: + self._transport = None - cdef int _force_close(self) except -1: + cdef int _disconnect(self) except -1: """ - Forces the connection closed. This is used when an unrecoverable error - has taken place. + Disconnects from the transport. """ cdef Transport transport = self._transport if transport is not None: @@ -92,13 +92,80 @@ cdef class BaseProtocol: """ Returns a boolean indicating if the connection is considered healthy. """ - # if a read failed on the socket earlier, clear the socket - if self._read_buf._transport is None \ - or self._read_buf._transport._transport is None: - self._transport = None + self._check_is_healthy() return self._transport is not None \ and self._read_buf._pending_error_num == 0 + cdef Message _on_close_phase_one(self, BaseThinConnImpl conn_impl): + """ + Called when the connection to the database is being closed. The + database object type cache will be destroyed. If the connection is not + a DRCP session and is still open, a logoff message will be returned for + processing. + """ + conn_impl._clear_dbobject_type_cache() + self._check_is_healthy() + if self._transport is not None and not conn_impl._drcp_enabled: + return conn_impl._create_message(LogoffMessage) + + cdef int _on_close_phase_two(self, BaseThinConnImpl conn_impl): + """ + Called when the connection to the database is being closed. The final + close will be sent if the connection is still open. + """ + cdef WriteBuffer buf = self._write_buf + self._check_is_healthy() + if self._transport is not None: + buf.start_request(TNS_PACKET_TYPE_DATA, 0, TNS_DATA_FLAGS_EOF) + buf.end_request() + + cdef Message _on_request_end_phase_one(self, BaseThinConnImpl conn_impl): + """ + Called when a request to the database is ending. A check is made to see + if there is an open transaction and, if one exists, a rollback message + is returned. If a request is actually in progress, a rollback message + will always be returned in order to ensure that the database is aware + of the request being ended. + """ + cdef: + BaseThinDbObjectTypeCache type_cache + int cache_num + if conn_impl._dbobject_type_cache_num > 0: + cache_num = conn_impl._dbobject_type_cache_num + type_cache = get_dbobject_type_cache(cache_num) + type_cache._clear_cursors() + self._check_is_healthy() + if self._transport is not None: + if conn_impl._in_request and conn_impl._session_state_desired != 0: + conn_impl._in_request = False + if self._txn_in_progress or conn_impl._in_request: + if conn_impl._in_request: + conn_impl._session_state_desired = \ + TNS_SESSION_STATE_REQUEST_END + conn_impl._in_request = False + if conn_impl._transaction_context is not None: + conn_impl._transaction_context = None + return conn_impl._create_tpc_rollback_message() + else: + return conn_impl._create_message(RollbackMessage) + + cdef int _on_request_end_phase_two(self, + BaseThinConnImpl conn_impl) except -1: + """ + Called when a request to the database is ending. A check is made to see + if DRCP is in use, and if it is, a release takes place. Any warnings + that were set are cleared. + """ + cdef SessionReleaseMessage message + self._check_is_healthy() + if self._transport is not None and conn_impl._drcp_enabled: + message = conn_impl._create_message(SessionReleaseMessage) + if not conn_impl._is_pooled: + message.release_mode = DRCP_DEAUTHENTICATE + message.send(self._write_buf) + conn_impl._drcp_establish_session = True + conn_impl.warning = None + cdef int _post_connect(self, BaseThinConnImpl conn_impl, AuthMessage auth_message) except -1: """" @@ -113,17 +180,6 @@ cdef class BaseProtocol: self._read_buf._pending_error_num = 0 self._in_connect = False - cdef int _release_drcp_session(self, BaseThinConnImpl conn_impl, - uint32_t release_mode) except -1: - """ - Release the session back to DRCP. Standalone sessions are marked for - deauthentication. - """ - cdef SessionReleaseMessage message - message = conn_impl._create_message(SessionReleaseMessage) - message.release_mode = release_mode - message.send(self._write_buf) - cdef int _send_marker(self, WriteBuffer buf, uint8_t marker_type): """ Sends a marker of the specified type to the server. @@ -151,61 +207,18 @@ cdef class Protocol(BaseProtocol): BaseProtocol.__init__(self) self._request_lock = threading.Lock() - cdef int _close(self, ThinConnImpl conn_impl) except -1: + cdef int _close(self, BaseThinConnImpl conn_impl) except -1: """ - Closes the connection. If a transaction is in progress it will be - rolled back. DRCP sessions will be released. For standalone - connections, the session will be logged off. For pooled connections, - the connection will be returned to the pool for subsequent use. + Closes the connection to the database. """ - cdef: - uint32_t release_mode = DRCP_DEAUTHENTICATE \ - if conn_impl._pool is None else 0 - ThinPoolImpl pool_impl - Message message - - with self._request_lock: - - # if the session was marked as needing to be closed, force it - # closed immediately (unless it was already closed) - if not self._get_is_healthy() and self._transport is not None: - self._force_close() - - # rollback any open transaction and release the DRCP session, if - # applicable; end the request, if one was started (and that - # information made it to the database) - if self._transport is not None: - if conn_impl._in_request \ - and conn_impl._session_state_desired != 0: - conn_impl._in_request = False - if self._txn_in_progress or conn_impl._in_request: - if conn_impl._in_request: - conn_impl._session_state_desired = \ - TNS_SESSION_STATE_REQUEST_END - conn_impl._in_request = False - if conn_impl._transaction_context is not None: - message = conn_impl._create_tpc_rollback_message() - else: - message = conn_impl._create_message(RollbackMessage) - self._process_message(message) - conn_impl._transaction_context = None - if conn_impl._drcp_enabled: - self._release_drcp_session(conn_impl, release_mode) - conn_impl._drcp_establish_session = True - - # if the connection is part of a pool, return it to the pool - if conn_impl._pool is not None: - pool_impl = conn_impl._pool - return pool_impl._return_connection(conn_impl) - - # otherwise, destroy the database object type cache, send the - # logoff message and final close packet - conn_impl._clear_dbobject_type_cache() - if self._transport is not None: - if not conn_impl._drcp_enabled: - message = conn_impl._create_message(LogoffMessage) - self._process_message(message) - self._final_close(self._write_buf) + cdef Message message + try: + message = self._on_close_phase_one(conn_impl) + if message is not None: + self._process_message(message) + self._on_close_phase_two(conn_impl) + finally: + self._disconnect() cdef int _connect_phase_one(self, ThinConnImpl conn_impl, ConnectParamsImpl params, @@ -389,6 +402,22 @@ cdef class Protocol(BaseProtocol): self._transport.create_ssl_context(params, description, address) self._transport.negotiate_tls(sock, address, description) + cdef int _end_request(self, BaseThinConnImpl conn_impl) except -1: + """ + Ends the request on the database. This rolls back any open transaction + and releases any DRCP session, if applicable. + """ + cdef Message message + message = self._on_request_end_phase_one(conn_impl) + if message is not None: + self._process_message(message) + self._on_request_end_phase_two(conn_impl) + if not self._get_is_healthy(): + try: + self._close(conn_impl) + except: + pass + cdef int _process_message(self, Message message) except -1: cdef uint32_t timeout = message.conn_impl._call_timeout try: @@ -404,7 +433,7 @@ cdef class Protocol(BaseProtocol): errors._raise_err(errors.ERR_CALL_TIMEOUT_EXCEEDED, timeout=timeout) except socket.timeout: - self._force_close() + self._disconnect() errors._raise_err(errors.ERR_CONNECTION_CLOSED, "socket timed out while recovering from " \ "previous socket timeout") @@ -508,6 +537,20 @@ cdef class Protocol(BaseProtocol): packet_type = self._read_buf._current_packet.packet_type self._break_in_progress = False + cdef int close(self, ThinConnImpl conn_impl, bint in_del) except -1: + """ + Closes the connection. If a transaction is in progress it will be + rolled back. DRCP sessions will be released. For standalone + connections, the session will be logged off. + """ + with self._request_lock: + try: + self._end_request(conn_impl) + self._close(conn_impl) + except: + if not in_del: + raise + cdef class BaseAsyncProtocol(BaseProtocol): @@ -519,49 +562,18 @@ cdef class BaseAsyncProtocol(BaseProtocol): self._request_lock = asyncio.Lock() self._transport._is_async = True - async def _close(self, AsyncThinConnImpl conn_impl): + async def _close(self, BaseThinConnImpl conn_impl): """ - Closes the connection. If a transaction is in progress it will be - rolled back. DRCP sessions will be released. For standalone - connections, the session will be logged off. For pooled connections, - the connection will be returned to the pool for subsequent use. + Closes the connection to the database. """ - cdef: - uint32_t release_mode = DRCP_DEAUTHENTICATE \ - if conn_impl._pool is None else 0 - AsyncThinPoolImpl pool_impl - Message message - - async with self._request_lock: - - # if the session was marked as needing to be closed, force it - # closed immediately (unless it was already closed) - if not self._get_is_healthy() and self._transport is not None: - self._force_close() - - # rollback any open transaction and release the DRCP session, if - # applicable - if self._transport is not None: - if self._txn_in_progress: - message = conn_impl._create_message(RollbackMessage) - await self._process_message(message) - if conn_impl._drcp_enabled: - self._release_drcp_session(conn_impl, release_mode) - conn_impl._drcp_establish_session = True - - # if the connection is part of a pool, return it to the pool - if conn_impl._pool is not None: - pool_impl = conn_impl._pool - return await pool_impl._return_connection(conn_impl) - - # otherwise, destroy the database object type cache, send the - # logoff message and final close packet - conn_impl._clear_dbobject_type_cache() - if self._transport is not None: - if not conn_impl._drcp_enabled: - message = conn_impl._create_message(LogoffMessage) - await self._process_message(message) - self._final_close(self._write_buf) + cdef Message message + try: + message = self._on_close_phase_one(conn_impl) + if message is not None: + await self._process_message(message) + self._on_close_phase_two(conn_impl) + finally: + self._disconnect() async def _connect_phase_one(self, AsyncThinConnImpl conn_impl, @@ -749,6 +761,22 @@ cdef class BaseAsyncProtocol(BaseProtocol): return await self._transport.negotiate_tls_async(self, address, description) + async def _end_request(self, BaseThinConnImpl conn_impl): + """ + Ends the request on the database. This rolls back any open transaction + and releases any DRCP session, if applicable. + """ + cdef Message message + message = self._on_request_end_phase_one(conn_impl) + if message is not None: + await self._process_message(message) + self._on_request_end_phase_two(conn_impl) + if not self._get_is_healthy(): + try: + await self._close(conn_impl) + except: + pass + async def _process_message(self, Message message): """ Sends a message to the server and processes its response. @@ -764,7 +792,7 @@ cdef class BaseAsyncProtocol(BaseProtocol): coroutine = self._process_timeout_helper(message, timeout) await asyncio.wait_for(coroutine, timeout_obj) except asyncio.TimeoutError: - self._force_close() + self._disconnect() errors._raise_err(errors.ERR_CONNECTION_CLOSED, "socket timed out while recovering from " \ "previous socket timeout") @@ -790,7 +818,7 @@ cdef class BaseAsyncProtocol(BaseProtocol): coroutine = self._receive_packet(message) await asyncio.wait_for(coroutine, timeout_obj) except asyncio.TimeoutError: - self._force_close() + self._disconnect() errors._raise_err(errors.ERR_CONNECTION_CLOSED, "socket timed out while awaiting break " \ "response from server") @@ -893,6 +921,29 @@ cdef class BaseAsyncProtocol(BaseProtocol): packet_type = self._read_buf._current_packet.packet_type self._break_in_progress = False + async def close(self, AsyncThinConnImpl conn_impl, bint in_del): + """ + Closes the connection. If a transaction is in progress it will be + rolled back. DRCP sessions will be released. For standalone + connections, the session will be logged off. + """ + async with self._request_lock: + try: + await self._end_request(conn_impl) + await self._close(conn_impl) + except: + if not in_del: + raise + + # otherwise, destroy the database object type cache, send the + # logoff message and final close packet + conn_impl._clear_dbobject_type_cache() + if self._transport is not None: + if not conn_impl._drcp_enabled: + message = conn_impl._create_message(LogoffMessage) + await self._process_message(message) + self._final_close(self._write_buf) + def connection_lost(self, exc): """ Called when a connection has been lost. The presence of an exception diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index 216a2ad8..baba8834 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -487,9 +487,11 @@ def release( if not isinstance(connection, connection_module.Connection): message = "connection must be an instance of oracledb.Connection" raise TypeError(message) + connection._verify_connected() if tag is not None: connection.tag = tag - connection.close() + self._impl.return_connection(connection._impl) + connection._impl = None def reconfigure( self, @@ -1096,7 +1098,8 @@ async def release( raise TypeError(message) if tag is not None: connection.tag = tag - await connection.close() + await self._impl.return_connection(connection._impl) + connection._impl = None def _async_pool_factory( diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 7b2f7ae6..fd2599cd 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -608,6 +608,7 @@ def __init__( """ super().__init__() + self._pool = pool # determine if thin mode is being used with driver_mode.get_manager() as mode_mgr: @@ -662,8 +663,7 @@ def __init__( def __del__(self): if self._impl is not None: - self._impl.close(in_del=True) - self._impl = None + self._close(in_del=True) def __enter__(self): self._verify_connected() @@ -671,8 +671,21 @@ def __enter__(self): def __exit__(self, exc_type, exc_value, exc_tb): if self._impl is not None: - self._impl.close(in_del=True) - self._impl = None + self._close() + + def _close(self, in_del=False): + """ + Closes the connection and makes it unusable for further operations. An + Error exception will be raised if any operation is attempted with this + connection after this method completes successfully. + """ + if self._pool is not None: + pool_impl = self._pool._impl + if pool_impl is not None: + pool_impl.return_connection(self._impl, in_del) + else: + self._impl.close(in_del) + self._impl = None def _create_queue(self, impl): """ @@ -741,8 +754,7 @@ def close(self) -> None: connection after this method completes successfully. """ self._verify_connected() - self._impl.close() - self._impl = None + self._close() def commit(self) -> None: """ @@ -1324,6 +1336,7 @@ def __init__( directly but only indirectly through async_connect(). """ super().__init__() + self._pool = pool self._connect_coroutine = self._connect(dsn, pool, params, kwargs) def __await__(self): @@ -1340,8 +1353,21 @@ async def __aenter__(self): async def __aexit__(self, *exc_info): if self._impl is not None: - await self._impl.close() - self._impl = None + await self._close() + + async def _close(self, in_del=False): + """ + Closes the connection and makes it unusable for further operations. An + Error exception will be raised if any operation is attempted with this + connection after this method completes successfully. + """ + if self._pool is not None: + pool_impl = self._pool._impl + if pool_impl is not None: + await pool_impl.return_connection(self._impl, in_del) + else: + await self._impl.close(in_del) + self._impl = None async def _connect(self, dsn, pool, params, kwargs): """ @@ -1468,8 +1494,7 @@ async def close(self) -> None: Closes the connection. """ self._verify_connected() - await self._impl.close() - self._impl = None + await self._close() async def commit(self) -> None: """ diff --git a/utils/templates/pool.py b/utils/templates/pool.py index ed86d431..b9839b38 100644 --- a/utils/templates/pool.py +++ b/utils/templates/pool.py @@ -485,9 +485,11 @@ def release( if not isinstance(connection, connection_module.Connection): message = "connection must be an instance of oracledb.Connection" raise TypeError(message) + connection._verify_connected() if tag is not None: connection.tag = tag - connection.close() + self._impl.return_connection(connection._impl) + connection._impl = None def reconfigure( self, @@ -777,7 +779,8 @@ async def release( raise TypeError(message) if tag is not None: connection.tag = tag - await connection.close() + await self._impl.return_connection(connection._impl) + connection._impl = None def _async_pool_factory( From 3604bc9ccf0a0a397fe4d3838b78c61d77a93aac Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 31 May 2025 14:48:13 -0600 Subject: [PATCH 087/239] Doc updates. --- doc/src/api_manual/module.rst | 5 +- doc/src/user_guide/appendix_d.rst | 6 +- doc/src/user_guide/connection_handling.rst | 184 +++++---- doc/src/user_guide/extending.rst | 6 +- doc/src/user_guide/tracing.rst | 443 ++++++++++++++++----- doc/src/user_guide/troubleshooting.rst | 72 +++- doc/src/user_guide/tuning.rst | 59 ++- doc/src/user_guide/txn_management.rst | 33 +- 8 files changed, 604 insertions(+), 204 deletions(-) diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 45a33682..0dfc0ac1 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2974,7 +2974,10 @@ of the :ref:`message properties object ` passed as the .. data:: MSG_BUFFERED This constant is used to specify that enqueue or dequeue operations should - enqueue or dequeue buffered messages, respectively. + enqueue or dequeue buffered messages, respectively. For multi-consumer + queues, a `subscriber `__ with buffered delivery + mode needs to be created prior to enqueuing buffered messages. This mode is not supported for bulk array operations in python-oracledb Thick mode. diff --git a/doc/src/user_guide/appendix_d.rst b/doc/src/user_guide/appendix_d.rst index 3487de72..b9e113e4 100644 --- a/doc/src/user_guide/appendix_d.rst +++ b/doc/src/user_guide/appendix_d.rst @@ -264,7 +264,7 @@ Older Versions of Python Frameworks, SQL Generators, and ORMs ============================================================= For versions of SQLAlchemy, Django, Superset, other frameworks, -object-relational mappers (ORMs), and libraries that support the obsolere +object-relational mappers (ORMs), and libraries that support the obsolete cx_Oracle driver but do not have native support for python-oracledb, you can add code like this to use python-oracledb: @@ -288,3 +288,7 @@ parameters for your environment, see :ref:`enablingthick`. SQLAlchemy 2 and Django 5 have native support for python-oracledb so the above code snippet is not needed in those versions. Check your preferred library for which Oracle Database driver it requires. + +For details on using Superset with python-oracledb, refer to the blog post +`Steps to use Apache Superset and Oracle Database +`__. diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index df7e0fd1..1ca7db4a 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -2884,20 +2884,22 @@ id=GUID-82FF6896-F57E-41CF-89F7-755F3BC9C924>`__. Using DRCP with python-oracledb applications involves the following steps: -1. Configuring and enabling DRCP in the database -2. Configuring the application to use a DRCP connection -3. Deploying the application +1. Enabling DRCP in the database +2. Configuring the application to use DRCP pooled servers Enabling DRCP in Oracle Database -------------------------------- -Every Oracle Database uses a single, default DRCP connection pool. From Oracle -Database 21c, each pluggable database can optionally have its own pool. Note -that DRCP is already enabled in Oracle Autonomous Database and pool management -is different to the steps below. +Oracle Database versions prior to 21c can have a single DRCP connection pool. +From Oracle Database 21c, each pluggable database can optionally have its own +pool, or can use the container level pool. From Oracle Database 23ai, you can +create multiple pools at the pluggable, or container, database level. -DRCP pools can be configured and administered by a DBA using the -``DBMS_CONNECTION_POOL`` package: +Note that DRCP is already enabled in Oracle Autonomous Database and pool +management is different to the steps below. + +In the basic scenario, DRCP pools can be configured and administered by a DBA +using the ``DBMS_CONNECTION_POOL`` package: .. code-block:: sql @@ -2942,13 +2944,14 @@ Otherwise, server processes will continue to use old settings. There is a ``DBMS_CONNECTION_POOL.RESTORE_DEFAULTS()`` procedure to reset all values. -When DRCP is used with RAC, each database instance has its own connection -broker and pool of servers. Each pool has the identical configuration. For -example, all pools start with ``minsize`` server processes. A single -DBMS_CONNECTION_POOL command will alter the pool of each instance at the same -time. The pool needs to be started before connection requests begin. The -command below does this by bringing up the broker, which registers itself with -the database listener: +When DRCP is used with `Oracle RAC +`__, each database +instance has its own connection broker and pool of servers. Each pool has the +identical configuration. For example, all pools start with ``minsize`` server +processes. A single DBMS_CONNECTION_POOL command will alter the pool of each +instance at the same time. The pool needs to be started before connection +requests begin. The command below does this by bringing up the broker, which +registers itself with the database listener: .. code-block:: sql @@ -2962,29 +2965,46 @@ instance restarts, unless explicitly stopped with the EXECUTE DBMS_CONNECTION_POOL.STOP_POOL() -The pool cannot be stopped while connections are open. +Oracle Database 23ai allows a ``DRAINTIME`` argument to be passed to +``STOP_POOL()``, indicating that the pool will only be closed after the +specified time. This allows in-progress application work to continue. A +draintime value of 0 can be used to immediately close the pool. See the +database documentation on `DBMS_CONNECTION_POOL.STOP_POOL() +`__. + +In older database versions, the pool cannot be stopped while connections are +open. Coding Applications to use DRCP ------------------------------- To use DRCP, application connection establishment must request a DRCP pooled -server. The best practice is also to specify a user-chosen connection class -name when creating a connection pool. A 'purity' of the connection session -state can optionally be specified. See the Oracle Database documentation on -`benefiting from scalability `__ for more information on purity and connection classes. -Note that when using DRCP with a python-oracledb local :ref:`connection pool -` in Thick mode, the local connection pool ``min`` value is -ignored and the pool will be created with zero connections. +The best practice is to use DRCP in conjunction with a local driver +:ref:`connection pool ` created with +:meth:`oracledb.create_pool()` or :meth:`oracledb.create_pool_async()`. The +python-oracledb connection pool size does not need to match the DRCP pool size. +The limit on overall execution parallelism is determined by the DRCP pool +size. Note that when using DRCP with a python-oracledb local connection pool in +Thick mode, the local connection pool ``min`` value is ignored and the pool +will be created with zero connections. -**Requesting a Pooled Server** +See `drcp_pool.py +`__ +for a runnable example of DRCP. + +**Requesting Pooled Servers be Used** -To request a DRCP pooled server, you can: +To enable connections to use DRCP pooled servers, you can: - Use a specific connection string in :meth:`oracledb.create_pool()` or - :meth:`oracledb.connect()`. For example with the + :meth:`oracledb.connect()` to request a pooled server. For example with the :ref:`Easy Connect syntax `: .. code-block:: python @@ -3013,43 +3033,23 @@ To request a DRCP pooled server, you can: server_type="pooled", cclass="MYAPP") - **DRCP Connection Class Names** The best practice is to specify a ``cclass`` class name when creating a python-oracledb connection pool. This user-chosen name provides some partitioning of DRCP session memory so reuse is limited to similar applications. It provides maximum pool sharing if multiple application -processes are started. A class name also allows better DRCP usage tracking in -the database. In the database monitoring views, the class name shown will be -the value specified in the application prefixed with the user name. +processes are started and use the same class name. A class name also allows +better DRCP usage tracking in the database. In the database monitoring views, +the class name shown will be the value specified in the application prefixed +with the user name. If ``cclass`` was not specified during pool creation, then the python-oracledb Thin mode generates a unique connection class with the prefix "DPY" while the Thick mode generates a unique connection class with the prefix "OCI". -To create a connection pool requesting a DRCP pooled server and specifying a -class name, you can call: - -.. code-block:: python - - pool = oracledb.create_pool(user="hr", password=userpwd, dsn="dbhost.example.com/orclpdb:pooled", - min=2, max=5, increment=1, - cclass="MYAPP") - -Once the pool has been created, your application can get a connection from it -by calling: - -.. code-block:: python - - connection = pool.acquire() - -The python-oracledb connection pool size does not need to match the DRCP pool -size. The limit on overall execution parallelism is determined by the DRCP -pool size. - -Connection class names can also be passed to :meth:`~ConnectionPool.acquire()`, -if you want to use a connection with a different class: +To create a connection pool requesting DRCP pooled servers be used, and +specifying a class name, you can call: .. code-block:: python @@ -3057,12 +3057,6 @@ if you want to use a connection with a different class: min=2, max=5, increment=1, cclass="MYAPP") - connection = mypool.acquire(cclass="OTHERAPP") - -If a pooled server of a requested class is not available, a server with new -session state is used. If the DRCP pool cannot grow, a server with a different -class may be used and its session state cleared. - If ``cclass`` is not set, then the pooled server sessions will not be reused optimally, and the DRCP statistic views may record large values for NUM_MISSES. @@ -3085,24 +3079,30 @@ allocated each time :meth:`~ConnectionPool.acquire()` is called: min=2, max=5, increment=1, cclass="MYAPP", purity=oracledb.PURITY_NEW) -**Setting the Connection Class and Purity in the Connection String** +**Acquiring a DRCP Connection** -Using python-oracledb Thin mode with Oracle Database 21c, or later, you can -specify the class and purity in the connection string itself. This removes the -need to modify an existing application when you want to use DRCP: +Once DRCP has been enabled and the driver connection pool has been created with +the appropriate connection string, then your application can get a connection +that uses DRCP by calling: .. code-block:: python - dsn = "localhost/orclpdb:pooled?pool_connection_class=MYAPP&pool_purity=self" + connection = pool.acquire() + +Connection class names can also be passed to :meth:`~ConnectionPool.acquire()` +if you want to use a connection with a different class: -For python-oracledb Thick mode, this syntax is supported if you are using -Oracle Database 21c (or later) and Oracle Client 19c (or later). However, -explicitly specifying the purity as *SELF* in this way may cause some unusable -connections in a python-oracledb Thick mode connection pool to not be -terminated. In summary, if you cannot programmatically set the class name and -purity, or cannot use python-oracledb Thin mode, then avoid explicitly setting -the purity as a connection string parameter when using a python-oracledb -connection pooling in Thick mode. +.. code-block:: python + + pool = oracledb.create_pool(user="hr", password=userpwd, dsn="dbhost.example.com/orclpdb:pooled", + min=2, max=5, increment=1, + cclass="MYAPP") + + connection = mypool.acquire(cclass="OTHERAPP") + +If a pooled server of a requested class is not available, a server with new +session state is used. If the DRCP pool cannot grow, a server with a different +class may be used and its session state cleared. **Closing Connections when using DRCP** @@ -3122,7 +3122,7 @@ other users: # Do some database operations connection = mypool.acquire() . . . - connection.close(); # <- Add this to release the DRCP pooled server + connection.close() # <- Add this to release the DRCP pooled server # Do lots of non-database work . . . @@ -3130,11 +3130,41 @@ other users: # Do some more database operations connection = mypool.acquire() # <- And get a new pooled server only when needed . . . - connection.close(); + connection.close() -See `drcp_pool.py -`__ -for a runnable example of DRCP. +Setting the DRCP Connection Class and Purity in the Connection String +--------------------------------------------------------------------- + +Although setting the DRCP connection class and purity in the application is +preferred, sometimes it is not possible to modify an existing code base. For +these applications, you can specify the class and purity along with the pooled +server option in the connection string. + +For example with the :ref:`Easy Connect ` syntax:: + + dbhost.example.com/orclpdb:pooled?pool_connection_class=MYAPP&pool_purity=self + +or by using a :ref:`TNS Alias ` in a +:ref:`tnsnames.ora ` file:: + + customerpool = (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp) + (HOST=dbhost.example.com) + (PORT=1521))(CONNECT_DATA=(SERVICE_NAME=orclpdb) + (SERVER=POOLED) + (POOL_CONNECTION_CLASS=MYAPP) + (POOL_PURITY=SELF))) + +You can specify the class and purity options in connection strings when using +python-oracledb Thin mode with Oracle Database 21c, or later. + +For python-oracledb Thick mode, setting these options in the connection string +is supported if you are using Oracle Database 21c (or later) and Oracle Client +19c (or later). However, explicitly specifying the purity as *SELF* in this way +may cause some unusable connections in a python-oracledb Thick mode connection +pool to not be terminated. In summary, if you cannot programmatically set the +class name and purity, or cannot use python-oracledb Thin mode, then avoid +explicitly setting the purity as a connection string parameter when using a +local python-oracledb connection pool in Thick mode. .. _monitoringdrcp: diff --git a/doc/src/user_guide/extending.rst b/doc/src/user_guide/extending.rst index 0d0b31b2..3cba483c 100644 --- a/doc/src/user_guide/extending.rst +++ b/doc/src/user_guide/extending.rst @@ -208,7 +208,7 @@ strings prefixed with "myprefix://". import oracledb import oracledb.plugins.myplugin - cs = 'myprefix://localhost/orclpdb1' + cs = "myprefix://localhost/orclpdb" cp = oracledb.ConnectParams() cp.parse_connect_string(cs) @@ -217,8 +217,8 @@ strings prefixed with "myprefix://". Running this will print:: - In myhookfunc: protocol=myprefix arg=localhost/orclpdb1 - host=localhost, port=1521, service name=orclpdb1 + In myhookfunc: protocol=myprefix arg=localhost/orclpdb + host=localhost, port=1521, service name=orclpdb 7. To uninstall the plugin, simply remove the package:: diff --git a/doc/src/user_guide/tracing.rst b/doc/src/user_guide/tracing.rst index c2e637c5..9f7c5bc4 100644 --- a/doc/src/user_guide/tracing.rst +++ b/doc/src/user_guide/tracing.rst @@ -24,11 +24,15 @@ There are multiple approaches for application tracing and monitoring: - The Java Debug Wire Protocol (JDWP) for debugging PL/SQL can be used. See :ref:`jdwp`. +- Instrumentation libraries such as OpenTelemetry allow sophisticated + monitoring, see :ref:`opentelemetry`. + - Python-oracledb in Thick mode can dump a trace of SQL statements executed. See :ref:`lowlevelsqltrace`. -- The connection identifiers that appear in the traces and logs can be used - to resolve connectivity errors. See :ref:`connectionid`. +- The unique connection identifiers that appear in connection error messages, + and in Oracle Database traces and logs, can be used to resolve connectivity + errors. See :ref:`connectionid`. .. _endtoendtracing: @@ -48,13 +52,17 @@ utilities. Values may appear in logs and audit trails. Also see :ref:`appcontext` for information about setting Application Contexts. -The :attr:`Connection.client_identifier` attribute is typically set to the -name (or identifier) of the actual end user initiating a query. This allows -the database to distinguish, and trace, end users for applications that connect -to a common database username. It can also be used by `Oracle Virtual Private -Database (VPD) `__ policies to automatically limit -data access. +data access. Oracle Database’s `DBMS_MONITOR +`__ package can take advantage of the +client identifer to enable statistics and tracing at an individual level. The :attr:`Connection.module` and :attr:`Connection.action` attributes can be set to user-chosen, descriptive values identifying your code architecture. @@ -108,7 +116,9 @@ round-trips to the database which reduces application scalability: The :attr:`Connection.dbop` attribute can be used for Real-Time SQL Monitoring, see `Monitoring Database Operations `__. The value will -be shown in the DBOP_NAME column of the V$SQL_MONITOR view: +be shown in the DBOP_NAME column of the `V$SQL_MONITOR `__ +view: .. code-block:: python @@ -201,15 +211,23 @@ diagnosing of connection failures. For example:: DPY-6005: cannot connect to database (CONNECTION_ID=m0PfUY6hYSmWPcgrHZCQIQ==) +Depending on the Oracle Database version in use, the information that is shown +in logs varies. + You can define a prefix value which is added to the beginning of the -``CONNECTION_ID``. This prefix aids in identifying the connections from a +``CONNECTION_ID`` value. This prefix aids in identifying the connections from a specific application. -In python-oracledb Thin mode, you can specify a prefix in the -``connection_id_prefix`` parameter when creating -:meth:`standalone connections `, or -:meth:`pooled connections `. Also, you can specify -the connection identifier in :meth:`oracledb.ConnectParams()` or +See `Troubleshooting Oracle Net Services `_ for more +information on connection identifiers. + +**Python-oracledb Thin mode** + +In python-oracledb Thin mode, you can specify a prefix using the +``connection_id_prefix`` parameter when creating :meth:`standalone connections +` or :meth:`pooled connections `, +or alternatively set a prefix when calling :meth:`oracledb.ConnectParams()` or :meth:`oracledb.PoolParams()`. For example: .. code-block:: python @@ -219,12 +237,14 @@ the connection identifier in :meth:`oracledb.ConnectParams()` or connection_id_prefix="MYAPP") If this connection to the database fails, ``MYAPP`` is added as a prefix to the -``CONNECTION_ID`` as shown in the error message below:: +``CONNECTION_ID`` value shown in the error message, for example:: DPY-6005: cannot connect to database (CONNECTION_ID=MYAPPm0PfUY6hYSmWPcgrHZCQIQ==). -In python-oracledb Thick mode, you can specify the connection identifier prefix in -a connection string. For example:: +**Python-oracledb Thick mode** + +In python-oracledb Thick mode, you can specify the connection identifier prefix +in the connection string or connect descriptor. For example:: mydb = (DESCRIPTION = (ADDRESS_LIST= (ADDRESS=...) (ADDRESS=...)) @@ -234,12 +254,310 @@ a connection string. For example:: ) ) -Depending on the Oracle Database version in use, the information that is shown -in logs varies. +.. _tracingbind: -See `Troubleshooting Oracle Net Services `_ for more -information on connection identifiers. +Tracing Bind Values +------------------- + +Several methods for tracing bind variable values can be used. When tracing bind +variable values, be careful not to leak information and create a security +problem. + +In Oracle Database, the view `V$SQL_BIND_CAPTURE `__ +can capture bind information. Tracing with Oracle Database’s `DBMS_MONITOR +`__ +package may also be useful. + +You can additionally :ref:`subclass python-oracledb classes ` and +log any bind values. + +OpenTelemetry can also be used, see :ref:`opentelemetry`. + +.. _dbviews: + +Database Views for Tracing python-oracledb +------------------------------------------ + +This section shows some of the Oracle Database views useful for tracing and +monitoring python-oracledb. Other views and columns not described here also +contain useful information, such as the :ref:`drcp` views discussed in +:ref:`monitoringdrcp`, and the views discussed in :ref:`endtoendtracing` and +:ref:`tracingbind`. + +V$SESSION ++++++++++ + +The following table shows sample values for some `V$SESSION +`__ columns. You may see other values +if you have changed the defaults using the :ref:`Defaults object ` +before connecting, set the equivalent connection or pool creation parameters, +or set the attribute :attr:`Connection.module` as shown in +:ref:`endtoendtracing`. + +.. list-table-with-summary:: Sample V$SESSION column values + :header-rows: 1 + :class: wy-table-responsive + :widths: 10 15 15 + :name: V$SESSION_COLUMN_VALUES + :summary: The first column is the name of the column. The second column lists a sample python-oracledb Thick mode value. The third column lists a sample python-oracledb Thin mode value. + + * - Column + - Sample Thin mode value + - Sample Thick mode value + * - MACHINE + - "myusername-mac" + - "myusername-mac" + * - MODULE + - The value of Python's ``sys.executable``, such as `/Users/myusername/.pyenv/versions/3.13.3/bin/python` + - Similar to `python@myusername-mac (TNS V1-V3)` + * - OSUSER + - "myusername" + - "myusername" + * - PROGRAM + - The value of Python's ``sys.executable``, such as `/Users/myusername/.pyenv/versions/3.13.3/bin/python` + - Similar to `python@myusername-mac (TNS V1-V3)` + * - TERMINAL + - "unknown" + - Similar to `ttys001` + +V$SESSION_CONNECT_INFO +++++++++++++++++++++++ + +The following table shows sample values for some `V$SESSION_CONNECT_INFO +`__ columns. You may see other +values if you have changed the defaults using the :ref:`Defaults object +` before connecting, set the equivalent connection or pool creation +parameters, or set the ``driver_name`` parameter in +:meth:`oracledb.init_oracle_client()`. + +.. list-table-with-summary:: Sample V$SESSION_CONNECT_INFO column values + :header-rows: 1 + :class: wy-table-responsive + :widths: 10 15 15 + :name: V$SESSION_CONNECT_INFO + :summary: The first column is the name of V$SESSION_CONNECT_INFO view's column. The second column lists a sample python-oracledb Thick mode value. The third column list a sample python-oracledb Thin mode value. + + * - Column + - Sample Thin mode value + - Sample Thick mode value + * - CLIENT_DRIVER + - "python-oracledb thn : 3.2.0" + - "python-oracledb thk : 3.2.0" + * - CLIENT_OCI_LIBRARY + - "Unknown" + - The Oracle Client or Instant Client type, such as "Full Instant Client" + * - CLIENT_VERSION + - "3.2.0.0.0" (the python-oracledb version number with an extra .0.0) + - The Oracle Client library version number + * - OSUSER + - "myusername" + - "myusername" + +.. _opentelemetry: + +Using python-oracledb with OpenTelemetry +======================================== + +The OpenTelemetry observability framework is useful for monitoring applications +and identifying bottlenecks. Python-oracledb conforms to the `Python DB API +specification `__ allowing standard Python +modules for OpenTelemetry to be used to instrument your applications. + +OpenTelemetry's `backend trace exporters +`__ can provide +graphic and intuitive representation of OpenTelemetry trace information. Visual +exporters include Zipkin, Jaeger, and Prometheus. Simple tracing can also be +directed to the console by making use of the exporter ConsoleSpanExporter from +the opentelemetry-sdk package. + +For details on using OpenTelemetry in Python, see `Python OpenTelemetry +documentation `_. + +Example of Using python-oracledb with OpenTelemetry +--------------------------------------------------- + +This example shows a python-oracledb application using OpenTelemetry's +ConsoleSpanExporter exporter to display trace information to the console. + +**Installing OpenTelemetry Modules** + +For this example, install:: + + python -m pip install opentelemetry-sdk opentelemetry-api opentelemetry-instrumentation-dbapi + +**Sample Application** + +This simple application performs two queries in a custom span. It also sets the +service name and system attributes to user-chosen values. It uses the +``capture_parameters`` option to enable bind variable tracing. + +.. warning:: + + The trace integration setting ``capture_parameters=True`` captures + :ref:`bind variable values ` and is a security risk. + +The sample code is: + +.. code-block:: python + + import oracledb + + from opentelemetry import trace + from opentelemetry.sdk.trace import TracerProvider + from opentelemetry.sdk.trace.export import ( + BatchSpanProcessor, + ConsoleSpanExporter, + ) + from opentelemetry.sdk.resources import Resource + + user = "hr" + password = userpwd + host = "dbhost.example.com" + service_name = "orclpdb" + + resource = Resource(attributes={ + "service.name": service_name, # displayed as a resource attribute "service.name" + }) + + provider = TracerProvider(resource=resource) + processor = BatchSpanProcessor(ConsoleSpanExporter()) + provider.add_span_processor(processor) + trace.set_tracer_provider(provider) + + from opentelemetry.instrumentation.dbapi import trace_integration + + trace_integration( + oracledb, + connect_method_name="connect", + database_system="oracle", # displayed as attribute "db.system" + capture_parameters=True, # displayed as attribute "db.statement.parameters" + # WARNING: this shows bind variable values + ) + + connection = oracledb.connect(user=user, password=password, + host=host, service_name=service_name) + + with connection.cursor() as cursor: + tracer = trace.get_tracer("HR-tracer-name") + with tracer.start_as_current_span("HR-span-1") as span: + sql = "select city from locations where location_id = :1" + for r, in cursor.execute(sql, [2200]): + print(r) + sql = "select 'Hello World!' from dual" + for r, in cursor.execute(sql): + print(r) + +**Sample Output** + +The sample output will be like:: + + Sydney + Hello World! + { + "name": "select", + "context": { + "trace_id": "0xb24817cd2ea38ffa523c2ee2778508f7", + "span_id": "0xacfd82ed60e8976d", + "trace_state": "[]" + }, + "kind": "SpanKind.CLIENT", + "parent_id": "0x19027598c301cfac", + "start_time": "2025-05-29T08:40:10.194645Z", + "end_time": "2025-05-29T08:40:10.209815Z", + "status": { + "status_code": "UNSET" + }, + "attributes": { + "db.system": "oracle", + "db.name": "", + "db.statement": "select city from locations where location_id = :1", + "db.statement.parameters": "[2200]" + }, + "events": [], + "links": [], + "resource": { + "attributes": { + "service.name": "orclpdb1" + }, + "schema_url": "" + } + } + { + "name": "select", + "context": { + "trace_id": "0xb24817cd2ea38ffa523c2ee2778508f7", + "span_id": "0x376dff430f66b14f", + "trace_state": "[]" + }, + "kind": "SpanKind.CLIENT", + "parent_id": "0x19027598c301cfac", + "start_time": "2025-05-29T08:40:10.210799Z", + "end_time": "2025-05-29T08:40:10.214694Z", + "status": { + "status_code": "UNSET" + }, + "attributes": { + "db.system": "oracle", + "db.name": "", + "db.statement": "select 'Hello World!' from dual" + }, + "events": [], + "links": [], + "resource": { + "attributes": { + "service.name": "orclpdb1" + }, + "schema_url": "" + } + } + { + "name": "HR-span-1", + "context": { + "trace_id": "0xb24817cd2ea38ffa523c2ee2778508f7", + "span_id": "0x19027598c301cfac", + "trace_state": "[]" + }, + "kind": "SpanKind.INTERNAL", + "parent_id": null, + "start_time": "2025-05-29T08:40:10.194536Z", + "end_time": "2025-05-29T08:40:10.214732Z", + "status": { + "status_code": "UNSET" + }, + "attributes": {}, + "events": [], + "links": [], + "resource": { + "attributes": { + "service.name": "orclpdb1" + }, + "schema_url": "" + } + } + +The two query results precede OpenTelemetry's tracing. The tracing then shows: + +- The start and end time of each operation. + +- Each "select" trace block's association to the span "HR-span-1" via their + ``parent_id`` values, which match the span's ``span_id`` value. + +- The bind variable value *2200* in the attribute + ``db.statement.parameters``. *Warning*: it is a security risk to monitor bind + variable values this way. Keep the ``capture_parameters`` option set to + *False*. + +- The system and service name as set in the application. + +The Python OpenTelemetry modules allow further customization for tracing. See +their documentation for more information. + +When a graphical provider is used instead of ConsoleSpanExporter, the database +query relationships and timings are easier to analyze. .. _vsessconinfo: @@ -264,7 +582,9 @@ The python-oracledb version can be shown with :data:`oracledb.__version__`: print(oracledb.__version__) Version and mode information can also be seen in the Oracle Database data -dictionary table V$SESSION_CONNECT_INFO: +dictionary table `V$SESSION_CONNECT_INFO +`__: .. code-block:: python @@ -277,87 +597,16 @@ dictionary table V$SESSION_CONNECT_INFO: In python-oracledb Thin mode, the output will be like:: - python-oracledb thn : 3.0.0 + python-oracledb thn : 3.2.0 In python-oracledb Thick mode, the output will be like:: - python-oracledb thk : 3.0.0 + python-oracledb thk : 3.2.0 Note that you may not see these values if you have set :attr:`oracledb.defaults.driver_name ` or the ``driver_name`` parameter in :meth:`oracledb.init_oracle_client()`. -.. _dbviews: - -Database Views -============== - -This section shows some sample column values for database views useful for -tracing and monitoring python-oracledb. Other views also contain useful -information, such as the :ref:`drcp` views discussed in :ref:`monitoringdrcp`. - -V$SESSION_CONNECT_INFO ----------------------- - -The following table lists sample default values for some -`V$SESSION_CONNECT_INFO `__ columns. You may not see -values with these formats if you have changed the defaults using the -:ref:`Defaults object `, set the equivalent connection or pool -creation parameters, or set the ``driver_name`` parameter in -:meth:`oracledb.init_oracle_client()`. - -.. list-table-with-summary:: Sample V$SESSION_CONNECT_INFO column values - :header-rows: 1 - :class: wy-table-responsive - :widths: 15 10 10 - :name: V$SESSION_CONNECT_INFO - :summary: The first column is the name of V$SESSION_CONNECT_INFO view's column. The second column lists a sample python-oracledb Thick mode value. The third column list a sample python-oracledb Thin mode value. - - * - Column - - Thick value - - Thin value - * - CLIENT_OCI_LIBRARY - - The Oracle Client or Instant Client type, such as "Full Instant Client" - - "Unknown" - * - CLIENT_VERSION - - The Oracle Client library version number - - "3.0.0.0.0" (the python-oracledb version number with an extra .0.0) - * - CLIENT_DRIVER - - "python-oracledb thk : 3.0.0" - - "python-oracledb thn : 3.0.0" - -V$SESSION ---------- - -The following table lists sample default values for columns with differences in -`V$SESSION `__. You may not see values with -these formats if you have changed the defaults using the -:ref:`Defaults object `, set the equivalent connection or pool -creation parameters, or set the attribute :attr:`Connection.module` as -shown in :ref:`endtoendtracing`. - -.. list-table-with-summary:: Sample V$SESSION column values - :header-rows: 1 - :class: wy-table-responsive - :widths: 15 10 10 - :name: V$SESSION_COLUMN_VALUES - :summary: The first column is the name of the column. The second column lists a sample python-oracledb Thick mode value. The third column lists a sample python-oracledb Thin mode value. - - * - Column - - Thick value - - Thin value - * - TERMINAL - - similar to `ttys001` - - the string "unknown" - * - PROGRAM - - similar to `python@myuser-mac2 (TNS V1-V3)` - - the contents of Python's ``sys.executable``, such as `/Users/myuser/.pyenv/versions/3.9.6/bin/python` - * - MODULE - - similar to `python@myuser-mac2 (TNS V1-V3)` - - the contents of Python's ``sys.executable``, such as `/Users/myuser/.pyenv/versions/3.9.6/bin/python` - Low Level Python-oracledb Driver Tracing ======================================== diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index 3093c9de..ec67ee59 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -247,8 +247,7 @@ DPI-1072 **Cause:** The connection to Oracle Database failed because the Oracle Client library version used is not supported by python-oracledb Thick mode. The Thick -mode needs Oracle Client library 11.2 or later. Note that version 19 is not -supported on Windows 7. +mode needs Oracle Client library 11.2 or later. **Action:** Review the :ref:`instreq`. You can either: @@ -333,8 +332,8 @@ or later. Oracle Client libraries and call :meth:`oracledb.init_oracle_client()` in your code. -- Upgrade your Oracle database to python-oracledb Thin mode supported versions - 12.1 or later. +- Upgrade Oracle Database to 12.1, or later, if you want to use python-oracledb + Thin mode. DPY-3015 ++++++++ @@ -342,10 +341,10 @@ DPY-3015 **Message:** ``DPY-3015: password verifier type 0x939 is not supported by python-oracledb in thin mode`` -**Cause:** The connection to Oracle Database with python-oracledb Thin mode -failed because your user account was only created with the 10G password -verifier. The python-oracledb Thin mode supports password verifiers 11G and -later. +**Cause:** The connection to Oracle Database with python-oracledb Thin mode +failed because your database user account was only created with the Oracle +Database 10G password verifier. Python-oracledb Thin mode supports password +verifiers 11G and later. **Action:** You can either: @@ -376,7 +375,7 @@ later. You can reset passwords for these users with commands like:: - alter user x identified by y + alter user x identified by y; .. seealso:: @@ -414,6 +413,61 @@ only supported in python-oracledb Thick mode. If additional messages indicate a reason, follow their guidance. +DPY-4027 +++++++++ + +**Message:** ``DPY-4027: no configuration directory specified`` + +**Cause:** The :ref:`connection string ` specified in your connection +or pool creation ``dsn`` parameter was interpreted by python-oracledb to be a +:ref:`TNS Alias ` which it needed to look up in a +:ref:`tnsnames.ora ` file. However, python-oracledb did not know +where to find that file. + +**Action:** You need to either tell python-oracledb where to find the +:ref:`tnsnames.ora ` file, or use a different connection string +syntax. Perform one of the following: + +- Use the equivalent :ref:`Easy Connect syntax ` or :ref:`Connect + Descriptor `: + + .. code-block:: python + + c = oracledb.connect(user="hr", password=userpw, dsn="localhost:1521/orclpdb") + + Or: + + .. code-block:: python + + c = oracledb.connect(user="hr", password=userpw, dsn="(DESCRIPTION=(ADDRESS=(...))") + +- Review the :attr:`defaults.config_dir` documentation for the heuristics used + by python-oracledb to automatically locate :ref:`tnsnames.ora + `. Ensure that your file is in an expected location, that the + file is readable by Python, and that any necessary environment variables such + as ``TNS_ADMIN`` are accessible by the Python process. + +- If you have problems with the heuristics, then you can explicitly specify the + location of :ref:`tnsnames.ora `. For example, if the file is at + ``/opt/myconfigdir/tnsnames.ora``, then: + + - In python-oracledb's default Thin mode, or when + :attr:`defaults.thick_mode_dsn_passthrough` is *False*, you can use: + + .. code-block:: python + + c = oracledb.connect(user="hr", password=userpw, dsn="MYDB", + config_dir="/opt/myconfigdir") + + - In python-oracledb's :ref:`Thick mode ` (which is the mode + when your application calls :func:`oracledb.init_oracle_client()`), then you + can use: + + .. code-block:: python + + oracledb.init_oracle_client(config_dir="/opt/myconfigdir") + c = oracledb.connect(user="hr", password=userpw, dsn="MYDB") + .. _oraerr: ORA Error Messages diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index 9202c753..84d20f7f 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -17,9 +17,11 @@ Some general tuning tips are: see :ref:`Session Callbacks for Setting Pooled Connection State `. - Make use of efficient python-oracledb functions. For example, to insert + Make use of efficient python-oracledb functions. For example, to insert multiple rows use :meth:`Cursor.executemany()` instead of - :meth:`Cursor.execute()`. + :meth:`Cursor.execute()`. Another example is to fetch data directly as + :ref:`data frames ` when working with packages like Pandas + and NumPy. * Tune your SQL statements. See the `SQL Tuning Guide `__. @@ -311,17 +313,62 @@ The ``arraysize`` value can also be set before calling the procedure: Also see `Avoiding Premature Prefetching`_. -Tuning Fetching for DataFrames ------------------------------- +Tuning Fetching for Data Frames +------------------------------- When fetching :ref:`data frames ` with :meth:`Connection.fetch_df_all()` or :meth:`Connection.fetch_df_batches()`, -tuning of data transfer across the network is controlled by the methods -``arraysize`` or ``size`` parameters, respectively. +tuning of data transfer across the network is controlled by the respective +methods ``arraysize`` or ``size`` parameters. Any :attr:`defaults.prefetchrows` value is ignored since these methods always set the internal prefetch size to the relevant ``arraysize`` or ``size`` value. +Parallelizing Data Fetches from a Single Table +---------------------------------------------- + +Before trying to improve the performance of querying a single table by issuing +multiple SQL queries in multiple threads, where each query extracts a different +range of data, you should do careful benchmarking. + +Factors that will impact such a solution: + +- How heavily loaded is the database? The parallel solution may appear to be + fast but it could be inefficient, thereby impacting, or eventually being + limited by, everyone else. + +- A single python-oracledb connection can only do one database operation at a + time, so you need to use a different connection for each executed SQL + statement, for example, using connections from a :ref:`pool + `. This will cause extra database load that needs to be + assessed. + +- A naive solution using the OFFSET FETCH syntax to fetch sections of a table + in individual queries will still cause table blocks to be scanned even though + not all data is returned. + +- Is the table partitioned? + +- Are zone maps being used? + +- Maybe the real performance bottleneck cannot be solved by parallelism. + Perhaps you have function based indexes that are being invoked for every + row. + +- Is the data in the database spread across multiple spindles or is the one + disk having to seek? + +- Is Exadata with storage indexes being used? + +- What is the impact of Python’s Global Interpreter Lock (GIL)? Maybe multiple + Python processes should be used instead of threads. + +- What is the application doing with the data? Can the receiving end + efficiently process it? + +- Is it better to execute a single query in Python but use a PARALLEL query + hint? Or will this overload the database. + Database Round-trips ==================== diff --git a/doc/src/user_guide/txn_management.rst b/doc/src/user_guide/txn_management.rst index 3c25fe48..84e035f9 100644 --- a/doc/src/user_guide/txn_management.rst +++ b/doc/src/user_guide/txn_management.rst @@ -5,31 +5,39 @@ Managing Transactions ********************* A database transaction is a grouping of SQL statements that make a logical data -change to the database. - -When :meth:`Cursor.execute()` or :meth:`Cursor.executemany()` executes a SQL -statement, a transaction is started or continued. By default, python-oracledb -does not commit this transaction to the database. The methods -:meth:`Connection.commit()` and :meth:`Connection.rollback()` methods can be -used to explicitly commit or rollback a transaction: +change to the database. When statements like :meth:`Cursor.execute()` or +:meth:`Cursor.executemany()` execute SQL statements like INSERT or UPDATE, a +transaction is started or continued. By default, python-oracledb does not +commit this transaction to the database. You can explictly commit or roll it +back using the methods :meth:`Connection.commit()` and +:meth:`Connection.rollback()`. For example to commit a new row: .. code-block:: python - cursor.execute("INSERT INTO mytab (name) VALUES ('John')") + cursor = connection.cursor() + cursor.execute("insert into mytab (name) values ('John')") connection.commit() +Transactions are handled at the connection level, meaning changes performed by +all cursors obtained from a connection will be committed or rolled back +together. + When a database connection is closed, such as with :meth:`Connection.close()`, or when variables referencing the connection go out of scope, any uncommitted transaction will be rolled back. +When `Data Definition Language (DDL) `__ statements such +as CREATE are executed, Oracle Database will always perform a commit. + Autocommitting ============== An alternative way to commit is to set the attribute :attr:`Connection.autocommit` of the connection to ``True``. This ensures all -:ref:`DML ` statements (INSERT, UPDATE, and so on) are committed as they are -executed. Unlike :meth:`Connection.commit()`, this does not require an +:ref:`DML ` statements (INSERT, UPDATE, and so on) are committed as they +are executed. Unlike :meth:`Connection.commit()`, this does not require an additional :ref:`round-trip ` to the database so it is more efficient when used appropriately. @@ -80,3 +88,8 @@ See the Oracle documentation for more details. Note that in order to make use of global (distributed) transactions, the attributes :attr:`Connection.internal_name` and :attr:`Connection.external_name` attributes must be set. + +Distributed Transactions +======================== + +For information on distributed transactions, see the chapter :ref:`tpc`. From 94bef53b420b5aeae8e22751df490bb5f80ba165 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 31 May 2025 14:48:29 -0600 Subject: [PATCH 088/239] Correct default purity. --- src/oracledb/impl/thin/messages/auth.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx index cf48925f..b6a3f621 100644 --- a/src/oracledb/impl/thin/messages/auth.pyx +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -272,9 +272,9 @@ cdef class AuthMessage(Message): if description.purity == PURITY_DEFAULT \ and self.conn_impl._drcp_enabled: if self.conn_impl._is_pooled: - self.purity = PURITY_NEW - else: self.purity = PURITY_SELF + else: + self.purity = PURITY_NEW else: self.purity = description.purity From b18ce9ed3f57d4bccce8af3e344c0ad70367cac9 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 31 May 2025 14:48:51 -0600 Subject: [PATCH 089/239] Ensure that max >= min when creating pools. --- doc/src/release_notes.rst | 7 +++++++ src/oracledb/errors.py | 4 ++++ src/oracledb/impl/base/pool_params.pyx | 4 ++++ tests/test_2400_pool.py | 5 +++++ tests/test_4700_pool_params.py | 9 ++++++--- tests/test_5500_pool_async.py | 5 +++++ 6 files changed, 31 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index afb8d992..5ded8f8e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -50,6 +50,13 @@ Common Changes (`PR 496 `__). #) Fix bug with GitHub build action merge artifacts step (`issue 495 `__). +#) Error ``DPY-2064: parameter 'max' should be greater than or equal to + parameter 'min'`` is now raised when a call to + :meth:`oracledb.create_pool()`, :meth:`oracledb.create_pool_async()` + or :meth:`oracledb.PoolParams()` is made with parameter "max" less than the + parameter "min". Previously python-oracledb Thin mode did not raise an + error and python-oracledb Thick mode raised the exception + ``ORA-24413: Invalid number of sessions specified``. #) Improved the test suite and documentation. diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 9ef74320..3c434c13 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -287,6 +287,7 @@ def _raise_not_supported(feature: str) -> None: ERR_PARAMS_HOOK_HANDLER_FAILED = 2061 ERR_PAYLOAD_CANNOT_BE_ENQUEUED = 2062 ERR_SCROLL_OUT_OF_RESULT_SET = 2063 +ERR_POOL_MAX_LESS_THAN_MIN = 2064 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -781,6 +782,9 @@ def _raise_not_supported(feature: str) -> None: ERR_POOL_HAS_BUSY_CONNECTIONS: ( "connection pool cannot be closed because connections are busy" ), + ERR_POOL_MAX_LESS_THAN_MIN: ( + "parameter 'max' should be greater than or equal to parameter 'min'" + ), ERR_POOL_NO_CONNECTION_AVAILABLE: ( "timed out waiting for the connection pool to return a connection" ), diff --git a/src/oracledb/impl/base/pool_params.pyx b/src/oracledb/impl/base/pool_params.pyx index dd22b038..8bc45bc0 100644 --- a/src/oracledb/impl/base/pool_params.pyx +++ b/src/oracledb/impl/base/pool_params.pyx @@ -94,6 +94,10 @@ cdef class PoolParamsImpl(ConnectParamsImpl): _set_int_param(args, "ping_interval", &self.ping_interval) _set_uint_param(args, "ping_timeout", &self.ping_timeout) + # verify that max >= min + if self.max < self.min: + errors._raise_err(errors.ERR_POOL_MAX_LESS_THAN_MIN) + # if the pool is dynamically sized (min != max) then ensure that the # increment value is non-zero (as otherwise the pool would never grow!) if self.max != self.min and self.increment == 0: diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index 08813d7b..d420133a 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -1079,6 +1079,11 @@ def hook(params): self.assertEqual(conn.stmtcachesize, orig_stmtcachesize) pool.close() + def test_2456(self): + "2456 - test creation of pool with min > max" + with self.assertRaisesFullCode("DPY-2064"): + test_env.get_pool(min=3, max=2) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_4700_pool_params.py b/tests/test_4700_pool_params.py index 2630889a..21dab38c 100644 --- a/tests/test_4700_pool_params.py +++ b/tests/test_4700_pool_params.py @@ -35,12 +35,13 @@ class TestCase(test_env.BaseTestCase): requires_connection = False - def __test_writable_parameter(self, name, value): + def __test_writable_parameter(self, name, value, params=None): """ Tests that a writable parameter can be written to and the modified value read back successfully. """ - params = oracledb.PoolParams() + if params is None: + params = oracledb.PoolParams() orig_value = getattr(params, name) copied_params = params.copy() args = {} @@ -54,7 +55,7 @@ def __test_writable_parameter(self, name, value): def test_4700(self): "4700 - test writable parameters" - self.__test_writable_parameter("min", 8) + self.__test_writable_parameter("min", 8, oracledb.PoolParams(max=10)) self.__test_writable_parameter("max", 12) self.__test_writable_parameter("increment", 2) self.__test_writable_parameter("connectiontype", oracledb.Connection) @@ -164,6 +165,8 @@ def test_4702(self): conn_string = f"{host}/{service_name}?pyo.{name}={str_value}" with self.subTest(name=name, value=str_value): params = oracledb.PoolParams() + if name == "min" and actual_value > params.max: + params.set(max=actual_value) params.parse_connect_string(conn_string) self.assertEqual(params.host, host) self.assertEqual(params.service_name, service_name) diff --git a/tests/test_5500_pool_async.py b/tests/test_5500_pool_async.py index 2a510d3d..fa20b0bf 100644 --- a/tests/test_5500_pool_async.py +++ b/tests/test_5500_pool_async.py @@ -630,6 +630,11 @@ async def test_5541(self): with self.assertRaises(TypeError): test_env.get_pool_async(pool_alias=alias) + async def test_5542(self): + "5542 - test creation of pool with min > max" + with self.assertRaisesFullCode("DPY-2064"): + test_env.get_pool_async(min=3, max=2) + if __name__ == "__main__": test_env.run_test_cases() From 9b6216f78731e6b549123d3e71764488ec17631d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 31 May 2025 14:49:38 -0600 Subject: [PATCH 090/239] Refactor tests to eliminate the "fake" client version for thin mode. --- tests/create_schema.py | 8 ++-- tests/test_1100_connection.py | 34 +++++--------- tests/test_1700_error.py | 4 +- tests/test_2200_number_var.py | 6 +-- tests/test_2400_pool.py | 68 ++++++++++----------------- tests/test_2500_string_var.py | 6 +-- tests/test_2700_aq_dbobject.py | 2 +- tests/test_3000_subscription.py | 50 ++++++++++---------- tests/test_3100_boolean_var.py | 34 ++++---------- tests/test_3200_features_12_1.py | 6 +-- tests/test_3300_soda_database.py | 36 +++++++------- tests/test_3400_soda_collection.py | 42 ++++++++--------- tests/test_3500_json.py | 10 ++-- tests/test_3700_var.py | 10 ++-- tests/test_3800_typehandler.py | 22 ++++----- tests/test_3900_cursor_execute.py | 4 +- tests/test_4300_cursor_other.py | 18 ++++--- tests/test_5200_sql_parser.py | 6 +-- tests/test_5300_connection_async.py | 13 ++--- tests/test_5500_pool_async.py | 15 +++--- tests/test_6000_typehandler_async.py | 6 +-- tests/test_6300_cursor_other_async.py | 6 +-- tests/test_6400_vector_var.py | 10 ++-- tests/test_6500_vector_interop.py | 8 ++-- tests/test_6700_json_23.py | 10 ++-- tests/test_6900_oson.py | 10 ++-- tests/test_7500_binary_vector.py | 10 ++-- tests/test_7700_sparse_vector.py | 8 +--- tests/test_8000_dataframe.py | 8 +--- tests/test_8100_dataframe_async.py | 4 +- tests/test_env.py | 63 ++++++++++++------------- 31 files changed, 217 insertions(+), 320 deletions(-) diff --git a/tests/create_schema.py b/tests/create_schema.py index 2fbc6e37..98fd236e 100644 --- a/tests/create_schema.py +++ b/tests/create_schema.py @@ -49,19 +49,19 @@ proxy_password=test_env.get_proxy_password(), edition_name=test_env.get_edition_name(), ) -if test_env.get_server_version() >= (21, 0): +if test_env.has_server_version(21): test_env.run_sql_script( conn, "create_schema_21", main_user=test_env.get_main_user() ) -if test_env.get_server_version() >= (23, 4): +if test_env.has_server_version(23, 4): test_env.run_sql_script( conn, "create_schema_23_4", main_user=test_env.get_main_user() ) -if test_env.get_server_version() >= (23, 5): +if test_env.has_server_version(23, 5): test_env.run_sql_script( conn, "create_schema_23_5", main_user=test_env.get_main_user() ) -if test_env.get_server_version() >= (23, 7): +if test_env.has_server_version(23, 7): test_env.run_sql_script( conn, "create_schema_23_7", main_user=test_env.get_main_user() ) diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index 89bec886..bbca631a 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -113,14 +113,12 @@ def test_1102(self): def test_1103(self): "1103 - test connection end-to-end tracing attributes" conn = test_env.get_connection() - if test_env.get_client_version() >= ( - 12, - 1, - ) and not self.is_on_oracle_cloud(conn): - sql = """select dbop_name from v$sql_monitor - where sid = sys_context('userenv', 'sid') - and status = 'EXECUTING'""" - self.__verify_attributes(conn, "dbop", "oracledb_dbop", sql) + if test_env.has_client_version(12, 1): + if not self.is_on_oracle_cloud(conn): + sql = """select dbop_name from v$sql_monitor + where sid = sys_context('userenv', 'sid') + and status = 'EXECUTING'""" + self.__verify_attributes(conn, "dbop", "oracledb_dbop", sql) sql = "select sys_context('userenv', 'action') from dual" self.__verify_attributes(conn, "action", "oracledb_Action", sql) self.__verify_attributes(conn, "action", None, sql) @@ -326,7 +324,7 @@ def test_1117(self): def test_1118(self): "1118 - test connection attribute values" conn = test_env.get_connection() - if test_env.get_client_version() >= (12, 1): + if test_env.has_client_version(12, 1): self.assertEqual(conn.ltxid, b"") self.assertFalse(conn.autocommit) conn.autocommit = True @@ -359,7 +357,7 @@ def test_1119(self): "stmtcachesize", "warning", ] - if test_env.get_client_version() >= (12, 1): + if test_env.has_client_version(12, 1): attr_names.append("ltxid") for name in attr_names: with self.assertRaisesFullCode("DPY-1001"): @@ -627,10 +625,7 @@ def test_1130(self): cursor.callproc("dbms_output.get_line", (string_var, number_var)) self.assertEqual(string_var.getvalue(), test_string) - @unittest.skipIf( - not test_env.get_is_thin() and test_env.get_client_version() < (18, 1), - "unsupported client", - ) + @unittest.skipUnless(test_env.has_client_version(18), "unsupported client") def test_1131(self): "1131 - test connection call_timeout" conn = test_env.get_connection() @@ -699,8 +694,8 @@ def test_1135(self): (instance_name,) = cursor.fetchone() self.assertEqual(conn.instance_name.upper(), instance_name) - @unittest.skipIf( - test_env.get_client_version() < (18, 1), "not supported on this client" + @unittest.skipUnless( + test_env.has_client_version(18), "not supported on this client" ) def test_1136(self): "1136 - test deprecated attributes" @@ -709,11 +704,8 @@ def test_1136(self): self.assertEqual(conn.callTimeout, 500) @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipIf( - test_env.get_server_version() < (23, 0) - or test_env.get_client_version() < (23, 0), - "unsupported client/server", - ) + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") def test_1137(self): "1137 - test maximum allowed length for password" conn = test_env.get_connection() diff --git a/tests/test_1700_error.py b/tests/test_1700_error.py index 84c440f1..201ca334 100644 --- a/tests/test_1700_error.py +++ b/tests/test_1700_error.py @@ -92,9 +92,7 @@ def test_1702(self): (error_obj,) = cm.exception.args self.assertEqual(error_obj.full_code, "DPI-1037") - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "unsupported client" - ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") def test_1703(self): "1703 - test generation of error help portal URL" cursor = self.conn.cursor() diff --git a/tests/test_2200_number_var.py b/tests/test_2200_number_var.py index 173ca90d..f98af3fd 100644 --- a/tests/test_2200_number_var.py +++ b/tests/test_2200_number_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -74,8 +74,8 @@ def setUp(self): self.raw_data.append(data_tuple) self.data_by_key[i] = data_tuple - @unittest.skipIf( - test_env.get_client_version() < (12, 1), "not supported on this client" + @unittest.skipUnless( + test_env.has_client_version(12, 1), "not supported on this client" ) def test_2200(self): "2200 - test binding in a boolean" diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index d420133a..556028e3 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -113,13 +113,13 @@ def __perform_reconfigure_test( ping_interval=ping_interval, getmode=getmode, ) - if test_env.get_client_version() >= (12, 1): + if test_env.has_client_version(12, 1): creation_args["max_lifetime_session"] = max_lifetime_session - if test_env.get_client_version() >= (12, 2): + if test_env.has_client_version(12, 2): creation_args["wait_timeout"] = wait_timeout - if test_env.get_client_version() >= (18, 3): + if test_env.has_client_version(18, 3): creation_args["max_sessions_per_shard"] = max_sessions_per_shard - if test_env.get_client_version() >= (19, 11): + if test_env.has_client_version(19, 11): creation_args["soda_metadata_cache"] = soda_metadata_cache pool = test_env.get_pool(**creation_args) @@ -167,27 +167,21 @@ def __verify_create_arg(self, arg_name, arg_value, sql): self.assertEqual(fetched_value, arg_value) pool.close() - @unittest.skipIf( - test_env.get_client_version() < (19, 1), "not supported on this client" - ) def test_2400(self): "2400 - test getting default pool parameters" pool = test_env.get_pool() self.assertEqual(pool.busy, 0) self.assertEqual(pool.dsn, test_env.get_connect_string()) self.assertEqual(pool.tnsentry, pool.dsn) - if test_env.get_client_version() >= (12, 2): + if test_env.has_client_version(12, 2): self.assertEqual(pool.getmode, oracledb.POOL_GETMODE_WAIT) self.assertIs(pool.getmode, oracledb.PoolGetMode.WAIT) self.assertTrue(pool.homogeneous) self.assertEqual(pool.increment, 1) self.assertEqual(pool.max, 2) - if test_env.get_client_version() >= (12, 1): + if test_env.has_client_version(12, 1): self.assertEqual(pool.max_lifetime_session, 0) - if not test_env.get_is_thin() and test_env.get_client_version() >= ( - 18, - 3, - ): + if not test_env.get_is_thin() and test_env.has_client_version(18, 3): self.assertEqual(pool.max_sessions_per_shard, 0) self.assertEqual(pool.min, 1) if test_env.get_is_thin(): @@ -196,10 +190,7 @@ def test_2400(self): self.assertRegex(pool.name, "^OCI:SP:.+") self.assertEqual(pool.ping_interval, 60) self.assertEqual(pool.stmtcachesize, oracledb.defaults.stmtcachesize) - if not test_env.get_is_thin() and test_env.get_client_version() >= ( - 19, - 11, - ): + if not test_env.get_is_thin() and test_env.has_client_version(19, 11): self.assertFalse(pool.soda_metadata_cache) self.assertEqual(pool.thin, test_env.get_is_thin()) self.assertEqual(pool.timeout, 0) @@ -234,9 +225,6 @@ def test_2401(self): self.assertEqual(user, test_env.get_proxy_user().upper()) conn.close() - @unittest.skipIf( - test_env.get_client_version() < (19, 1), "not supported on this client" - ) def test_2402(self): "2402 - test setting pool attributes" pool = test_env.get_pool() @@ -248,26 +236,20 @@ def test_2402(self): ((12, 1), "max_lifetime_session", 3), ] for version, attr_name, value in test_values: - if test_env.get_client_version() >= version: + if test_env.has_client_version(*version): setattr(pool, attr_name, value) self.assertEqual(getattr(pool, attr_name), value) self.assertRaises( TypeError, setattr, pool, attr_name, "invalid value" ) - if not test_env.get_is_thin() and test_env.get_client_version() >= ( - 18, - 3, - ): + if not test_env.get_is_thin() and test_env.has_client_version(18, 3): self.assertEqual(pool.max_sessions_per_shard, 0) self.assertRaises( TypeError, setattr, pool, "max_sessions_per_shard", "bad_val" ) - if not test_env.get_is_thin() and test_env.get_client_version() >= ( - 19, - 11, - ): + if not test_env.get_is_thin() and test_env.has_client_version(19, 11): pool.soda_metadata_cache = True self.assertTrue(pool.soda_metadata_cache) self.assertRaises( @@ -471,7 +453,7 @@ def test_2410(self): ) def test_2411(self): "2411 - test PL/SQL session callbacks" - if test_env.get_client_version() < (12, 2): + if not test_env.has_client_version(12, 2): self.skipTest("PL/SQL session callbacks not supported before 12.2") callback = "pkg_SessionCallback.TheCallback" pool = test_env.get_pool( @@ -526,7 +508,7 @@ def test_2412(self): pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_NOWAIT) conn = pool.acquire() self.assertRaises(TypeError, pool.release, conn, tag=12345) - if test_env.get_client_version() >= (12, 2): + if test_env.has_client_version(12, 2): with self.assertRaisesFullCode("ORA-24488"): pool.release(conn, tag="INVALID_TAG") @@ -577,13 +559,13 @@ def test_2415(self): self.__perform_reconfigure_test( "getmode", oracledb.POOL_GETMODE_NOWAIT ) - if test_env.get_client_version() >= (12, 1): + if test_env.has_client_version(12, 1): self.__perform_reconfigure_test("max_lifetime_session", 2000) - if test_env.get_client_version() >= (12, 2): + if test_env.has_client_version(12, 2): self.__perform_reconfigure_test("wait_timeout", 8000) - if test_env.get_client_version() >= (18, 3): + if test_env.has_client_version(18, 3): self.__perform_reconfigure_test("max_sessions_per_shard", 5) - if test_env.get_client_version() >= (19, 11): + if test_env.has_client_version(19, 11): self.__perform_reconfigure_test("soda_metadata_cache", True) @unittest.skipIf( @@ -676,8 +658,8 @@ def test_2421(self): with self.assertRaisesFullCode("DPY-1002"): pool.acquire() - @unittest.skipIf( - test_env.get_client_version() < (19, 1), "not supported on this client" + @unittest.skipUnless( + test_env.has_client_version(19), "not supported on this client" ) def test_2422(self): "2422 - using the pool beyond max limit raises an error" @@ -862,11 +844,11 @@ def test_2437(self): with self.assertRaisesFullCode("DPY-2023"): test_env.get_pool(connectiontype=int) - @unittest.skipIf( - test_env.get_server_version() < (12, 2), "not supported on this server" + @unittest.skipUnless( + test_env.has_server_version(12, 2), "not supported on this server" ) - @unittest.skipIf( - test_env.get_client_version() < (19, 1), "not supported on this client" + @unittest.skipUnless( + test_env.has_client_version(19), "not supported on this client" ) def test_2438(self): "2438 - ensure that timed wait times out with appropriate exception" @@ -876,8 +858,8 @@ def test_2438(self): with self.assertRaisesFullCode("DPY-4005"): pool.acquire() - @unittest.skipIf( - test_env.get_client_version() < (18, 1), "not supported on this client" + @unittest.skipUnless( + test_env.has_client_version(18), "not supported on this client" ) def test_2439(self): "2439 - ensure call timeout is reset on connections returned by pool" diff --git a/tests/test_2500_string_var.py b/tests/test_2500_string_var.py index 5b7cce4c..c9b363a1 100644 --- a/tests/test_2500_string_var.py +++ b/tests/test_2500_string_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -424,8 +424,8 @@ def test_2527(self): self.cursor.fetchall(), [(1, short_string), (2, long_string)] ) - @unittest.skipIf( - test_env.get_server_version() < (12, 2), "not supported on this server" + @unittest.skipUnless( + test_env.has_server_version(12, 2), "not supported on this server" ) def test_2528(self): "2528 - test issue 50 - avoid error ORA-24816" diff --git a/tests/test_2700_aq_dbobject.py b/tests/test_2700_aq_dbobject.py index d6400d51..23a0bd3b 100644 --- a/tests/test_2700_aq_dbobject.py +++ b/tests/test_2700_aq_dbobject.py @@ -358,7 +358,7 @@ def test_2715(self): f"{self.conn.username}.transform1" ) queue.enqoptions.transformation = transformation_str - if test_env.get_client_version() >= (23, 1): + if test_env.has_client_version(23): self.assertEqual( queue.enqoptions.transformation, transformation_str ) diff --git a/tests/test_3000_subscription.py b/tests/test_3000_subscription.py index 73a7c753..1b33f869 100644 --- a/tests/test_3000_subscription.py +++ b/tests/test_3000_subscription.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -82,8 +82,8 @@ def _process_message(self, message): test_env.get_is_thin(), "thin mode doesn't support subscriptions" ) class TestCase(test_env.BaseTestCase): - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3000(self): "3000 - test subscription for insert, update, delete and truncate" @@ -187,8 +187,8 @@ def test_3001(self): with self.assertRaisesFullCode("DPY-2014"): self.conn.subscribe(client_initiated=True, clientInitiated=True) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3002(self): "3002 - test subscription for AQ" @@ -217,8 +217,8 @@ def test_3002(self): # wait for all messages to be sent data.wait_for_messages() - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3003(self): "3003 - test verifying what registerquery returns" @@ -244,8 +244,8 @@ def test_3003(self): conn.unsubscribe(sub) conn.close() - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3004(self): "3004 - test Subscription repr()" @@ -255,8 +255,8 @@ def test_3004(self): self.assertEqual(repr(sub), f"") conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3005(self): "3005 - test registerquery with invalid parameters" @@ -275,8 +275,8 @@ def test_3005(self): sub.registerquery("insert into TestTempTable (IntCol) values (1)") conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3006(self): "3006 - test getting subscription attributes" @@ -304,8 +304,8 @@ def test_3006(self): conn.unsubscribe(sub) conn.close() - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3007(self): "3007 - test getting Message, MessageQuery, MessageTable attributes" @@ -352,8 +352,8 @@ def callback_handler(message): self.assertTrue(condition.wait(5)) conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3008(self): "3008 - test unsubscribe with invalid parameter" @@ -364,8 +364,8 @@ def test_3008(self): with self.assertRaisesFullCode("DPI-1002"): conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3010(self): "3010 - test registerquery in the middle of an active transaction" @@ -380,8 +380,8 @@ def test_3010(self): sub.registerquery("select * from TestTempTable") connection.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3011(self): "3011 - test registerquery with aq subscription" @@ -395,8 +395,8 @@ def test_3011(self): sub.registerquery("select * from TestTempTable") connection.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3013(self): "3013 - test subscription with SUBSCR_QOS_DEREG_NFY deregisters" @@ -427,8 +427,8 @@ def callback(message): self.assertTrue(condition.wait(5)) conn.unsubscribe(sub) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "crashes in older clients" + @unittest.skipUnless( + test_env.has_client_version(23), "crashes in older clients" ) def test_3014(self): "3014 - test adding a consumer to a single consumer queue (negative)" diff --git a/tests/test_3100_boolean_var.py b/tests/test_3100_boolean_var.py index 51a7b2b3..41bb8a83 100644 --- a/tests/test_3100_boolean_var.py +++ b/tests/test_3100_boolean_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,12 +32,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (12, 1), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (12, 1), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(12, 1), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(12, 1), "unsupported server") class TestCase(test_env.BaseTestCase): def __test_bind_value_as_boolean(self, value): expected_result = str(bool(value)).upper() @@ -106,12 +102,8 @@ def test_3108(self): ) self.assertIsNone(result) - @unittest.skipUnless( - test_env.get_client_version() >= (23, 1), "unsupported client" - ) - @unittest.skipUnless( - test_env.get_server_version() >= (23, 1), "unsupported server" - ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") def test_3109(self): "3109 - test binding and fetching boolean with 23ai" for value in (True, False): @@ -121,12 +113,8 @@ def test_3109(self): self.assertIsInstance(fetched_value, bool) self.assertEqual(fetched_value, not value) - @unittest.skipUnless( - test_env.get_client_version() >= (23, 1), "unsupported client" - ) - @unittest.skipUnless( - test_env.get_server_version() >= (23, 1), "unsupported server" - ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") def test_3110(self): "3110 - test binding and fetching string literals that represent True" self.cursor.execute("truncate table TestBooleans") @@ -141,12 +129,8 @@ def test_3110(self): expected_values = [(True, True) for _ in true_values] self.assertEqual(self.cursor.fetchall(), expected_values) - @unittest.skipUnless( - test_env.get_client_version() >= (23, 1), "unsupported client" - ) - @unittest.skipUnless( - test_env.get_server_version() >= (23, 1), "unsupported server" - ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") def test_3111(self): "3111 - test binding and fetching string literals that represent False" self.cursor.execute("truncate table TestBooleans") diff --git a/tests/test_3200_features_12_1.py b/tests/test_3200_features_12_1.py index 65090393..db8da2fa 100644 --- a/tests/test_3200_features_12_1.py +++ b/tests/test_3200_features_12_1.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,9 +33,7 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (12, 1), "unsupported client" -) +@unittest.skipUnless(test_env.has_client_version(12, 1), "unsupported client") class TestCase(test_env.BaseTestCase): def test_3200(self): "3200 - test executing with arraydmlrowcounts mode disabled" diff --git a/tests/test_3300_soda_database.py b/tests/test_3300_soda_database.py index 1d9b9b59..f01423f8 100644 --- a/tests/test_3300_soda_database.py +++ b/tests/test_3300_soda_database.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -62,10 +62,10 @@ def test_3300(self): "3300 - test creating documents with JSON data" soda_db = self.get_soda_database() val = {"testKey1": "testValue1", "testKey2": "testValue2"} - if test_env.get_client_version() < (23, 4): - str_val = json.dumps(val) - else: + if test_env.has_client_version(23, 4): str_val = str(val) + else: + str_val = json.dumps(val) bytes_val = str_val.encode() key = "MyKey" media_type = "text/plain" @@ -138,8 +138,8 @@ def test_3305(self): soda_db.createCollection("CollMetadata", 7) self.assertRaises(TypeError, soda_db.getCollectionNames, 1) - @unittest.skipIf( - test_env.get_client_version() < (23, 4), "unsupported data types" + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported data types" ) def test_3306(self): "3306 - test creating documents with JSON data using extended types" @@ -169,9 +169,9 @@ def test_3307(self): doc = soda_db.createDocument(val, key, media_type) self.__verify_doc(doc, bytes_val, str_val, val, key, media_type) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - and test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + and test_env.has_server_version(23, 4), "data types serialized differently", ) def test_3308(self): @@ -192,9 +192,9 @@ def test_3308(self): doc, bytes_val, str_val, decimal_val, key, media_type ) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - and test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + and test_env.has_server_version(23, 4), "unsupported data types", ) def test_3309(self): @@ -225,9 +225,9 @@ def test_3309(self): doc, bytes_val, str_val, decimal_val, key, media_type ) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - and test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + and test_env.has_server_version(23, 4), "data types serialized differently", ) def test_3310(self): @@ -244,9 +244,9 @@ def test_3310(self): doc = soda_db.createDocument(val, key, media_type) self.__verify_doc(doc, bytes_val, str_val, val, key, media_type) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - and test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + and test_env.has_server_version(23, 4), "data types serialized differently", ) def test_3311(self): diff --git a/tests/test_3400_soda_collection.py b/tests/test_3400_soda_collection.py index 5b159f16..6e7dcc2e 100644 --- a/tests/test_3400_soda_collection.py +++ b/tests/test_3400_soda_collection.py @@ -340,9 +340,7 @@ def test_3412(self): doc = coll.insertOneAndGet(data) self.assertEqual(doc.createdOn, doc.lastModified) - @unittest.skipIf( - test_env.get_client_version() < (20, 1), "unsupported client" - ) + @unittest.skipUnless(test_env.has_client_version(20), "unsupported client") def test_3413(self): "3413 - test Soda truncate" soda_db = self.get_soda_database() @@ -360,8 +358,8 @@ def test_3413(self): coll.truncate() self.assertEqual(coll.find().count(), 0) - @unittest.skipIf( - test_env.get_client_version() < (19, 11), + @unittest.skipUnless( + test_env.has_client_version(19, 11), "client version not supported.. min required 19.11", ) def test_3414(self): @@ -443,7 +441,7 @@ def test_3417(self): coll.insertMany([]) @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "save() is not implemented in Oracle Database 23ai", ) def test_3418(self): @@ -467,7 +465,7 @@ def test_3418(self): ) @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "save() is not implemented in Oracle Database 23ai", ) def test_3419(self): @@ -498,7 +496,7 @@ def test_3419(self): self.assertIn(hint, result.read()) @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "save() is not implemented in Oracle Database 23ai", ) def test_3420(self): @@ -578,7 +576,7 @@ def test_3424(self): coll.find().skip(10).count() @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "map mode not supported with native collections in Oracle Database 23", ) def test_3425(self): @@ -605,7 +603,7 @@ def test_3425(self): self.assertFalse(mapped_coll.drop()) @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "map mode not supported with native collections in Oracle Database 23", ) def test_3426(self): @@ -722,7 +720,7 @@ def test_3431(self): self.assertFalse(coll.drop()) @unittest.skipIf( - test_env.get_client_version() > (23, 0), + test_env.has_client_version(23), "map mode not supported with native collections in Oracle Database 23", ) def test_3432(self): @@ -759,8 +757,8 @@ def test_3434(self): with self.assertRaisesFullCode("ORA-40734"): coll.find().keys(keys).replaceOneAndGet({"data": "new"}) - @unittest.skipIf( - test_env.get_client_version() < (19, 9), + @unittest.skipUnless( + test_env.has_client_version(19, 9), "client version not supported.. min required 19.9", ) def test_3435(self): @@ -829,9 +827,9 @@ def test_3438(self): data_guide = coll.getDataGuide().getContent() - client_version = test_env.get_client_version() - server_version = test_env.get_server_version() - if client_version >= (23, 4) and server_version >= (23, 4): + if test_env.has_client_version(23, 4) and test_env.has_server_version( + 23, 4 + ): self.assertEqual(data_guide["properties"]["_id"]["type"], "id") values = [ @@ -973,9 +971,9 @@ def test_3444(self): UnicodeDecodeError, fetched_doc.getContentAsString ) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - or test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + or test_env.has_server_version(23, 4), "unsupported data types", ) def test_3445(self): @@ -996,9 +994,9 @@ def test_3445(self): self.assertEqual(fetched_content, val) self.assertEqual(doc.getContent(), val) - @unittest.skipIf( - test_env.get_client_version() < (23, 4) - or test_env.get_server_version() < (23, 4), + @unittest.skipUnless( + test_env.has_client_version(23, 4) + or test_env.has_server_version(23, 4), "unsupported data types", ) def test_3446(self): diff --git a/tests/test_3500_json.py b/tests/test_3500_json.py index 12a0b01f..b706ab18 100644 --- a/tests/test_3500_json.py +++ b/tests/test_3500_json.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,12 +34,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (21, 0), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (21, 0), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(21), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(21), "unsupported server") class TestCase(test_env.BaseTestCase): json_data = [ True, diff --git a/tests/test_3700_var.py b/tests/test_3700_var.py index d7dbc63b..f5fa17a7 100644 --- a/tests/test_3700_var.py +++ b/tests/test_3700_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -361,12 +361,8 @@ def test_3721(self): wrong_obj_type = self.conn.gettype("UDT_OBJECTARRAY") self._test_negative_set_and_get(wrong_obj_type, obj) - @unittest.skipIf( - test_env.get_client_version() < (21, 0), "unsupported client" - ) - @unittest.skipIf( - test_env.get_server_version() < (21, 0), "unsupported server" - ) + @unittest.skipUnless(test_env.has_client_version(21), "unsupported client") + @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") def test_3722(self): "3722 - setting values on variables of type DB_TYPE_JSON" json_data = [ diff --git a/tests/test_3800_typehandler.py b/tests/test_3800_typehandler.py index 2098279f..032b6e74 100644 --- a/tests/test_3800_typehandler.py +++ b/tests/test_3800_typehandler.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -217,9 +217,7 @@ def output_type_handler(cursor, metadata): expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] self.assertEqual(self.cursor.fetchall(), expected_data) - @unittest.skipUnless( - test_env.get_server_version() >= (21, 0), "unsupported server" - ) + @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") def test_3806(self): "3806 - output type handler for fetching 21c JSON" @@ -246,20 +244,16 @@ def output_type_handler(cursor, metadata): dict(name="Sam", city="Mumbai"), ] data_to_insert = list(enumerate(json_data)) - json_as_string = self.conn.thin or test_env.get_client_version() < ( - 21, - 0, - ) - if json_as_string: - # insert data as JSON string - json_string_data = [(i, json.dumps(j)) for i, j in data_to_insert] - self.cursor.executemany(insert_sql, json_string_data) - else: + if test_env.has_client_version(21): # take advantage of direct binding self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) self.cursor.executemany(insert_sql, data_to_insert) + else: + # insert data as JSON string + json_string_data = [(i, json.dumps(j)) for i, j in data_to_insert] + self.cursor.executemany(insert_sql, json_string_data) - if json_as_string: + if not test_env.has_client_version(21): self.cursor.outputtypehandler = output_type_handler self.cursor.execute("select * from TestJson") self.assertEqual(self.cursor.fetchall(), data_to_insert) diff --git a/tests/test_3900_cursor_execute.py b/tests/test_3900_cursor_execute.py index 29cc5b8d..494aecd6 100644 --- a/tests/test_3900_cursor_execute.py +++ b/tests/test_3900_cursor_execute.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -517,7 +517,7 @@ def test_3931(self): self.assertIsInstance(fetch_info, oracledb.FetchInfo) self.assertEqual(fetch_info.display_size, display_size) self.assertEqual(fetch_info.internal_size, internal_size) - if test_env.get_server_version() > (12, 2): + if test_env.has_server_version(12, 2): self.assertEqual(fetch_info.is_json, is_json) self.assertEqual(fetch_info.name, name) self.assertEqual(fetch_info.null_ok, null_ok) diff --git a/tests/test_4300_cursor_other.py b/tests/test_4300_cursor_other.py index ae88f793..6f1f1a9d 100644 --- a/tests/test_4300_cursor_other.py +++ b/tests/test_4300_cursor_other.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -879,11 +879,11 @@ def test_4359(self): fetched_data = [(n, c.read()) for n, c in self.cursor] self.assertEqual(fetched_data, data) - @unittest.skipIf( - test_env.get_server_version() <= (12, 2), "unsupported database" + @unittest.skipUnless( + test_env.has_server_version(12, 2), "unsupported database" ) - @unittest.skipIf( - test_env.get_client_version() <= (12, 2), "unsupported database" + @unittest.skipUnless( + test_env.has_client_version(12, 2), "unsupported database" ) def test_4360(self): "4360 - fetch JSON columns as Python objects" @@ -894,12 +894,10 @@ def test_4360(self): self.cursor.execute("select * from TestJsonCols order by IntCol") self.assertEqual(self.cursor.fetchall(), expected_data) - @unittest.skipIf( - test_env.get_server_version() < (23, 1), "unsupported database" - ) - @unittest.skipIf( - test_env.get_client_version() < (23, 1), "unsupported client" + @unittest.skipUnless( + test_env.has_server_version(23), "unsupported database" ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") def test_4361(self): "4361 - fetch table with domain and annotations" self.cursor.execute("select * from TableWithDomainAndAnnotations") diff --git a/tests/test_5200_sql_parser.py b/tests/test_5200_sql_parser.py index 363dd12e..39c00b32 100644 --- a/tests/test_5200_sql_parser.py +++ b/tests/test_5200_sql_parser.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -161,9 +161,7 @@ def test_5211(self): self.cursor.bindnames(), ["A", "B", "C", "D", "E", "F"] ) - @unittest.skipUnless( - test_env.get_client_version() >= (19, 1), "unsupported client" - ) + @unittest.skipUnless(test_env.has_client_version(19), "unsupported client") def test_5212(self): "5212 - bind variables between JSON constants" self.cursor.prepare( diff --git a/tests/test_5300_connection_async.py b/tests/test_5300_connection_async.py index 1233168f..19c69356 100644 --- a/tests/test_5300_connection_async.py +++ b/tests/test_5300_connection_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -295,8 +295,7 @@ async def test_5317(self): async def test_5318(self): "5318 - test connection attribute values" async with test_env.get_connection_async() as conn: - if test_env.get_client_version() >= (12, 1): - self.assertEqual(conn.ltxid, b"") + self.assertEqual(conn.ltxid, b"") self.assertIsNone(conn.current_schema) conn.current_schema = "test_schema" self.assertEqual(conn.current_schema, "test_schema") @@ -320,11 +319,10 @@ async def test_5319(self): "edition", "external_name", "internal_name", + "ltxid", "stmtcachesize", "warning", ] - if test_env.get_client_version() >= (12, 1): - attr_names.append("ltxid") for name in attr_names: with self.assertRaisesFullCode("DPY-1001"): getattr(conn, name) @@ -514,10 +512,7 @@ async def test_5335(self): self.assertEqual(conn.instance_name.upper(), instance_name) @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipIf( - test_env.get_server_version() < (23, 0), - "unsupported server", - ) + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") async def test_5337(self): "5337 - test maximum allowed length for password" async with test_env.get_connection_async() as conn: diff --git a/tests/test_5500_pool_async.py b/tests/test_5500_pool_async.py index fa20b0bf..60af4312 100644 --- a/tests/test_5500_pool_async.py +++ b/tests/test_5500_pool_async.py @@ -117,12 +117,11 @@ async def test_5501(self): ] try: for version, attr_name, value in test_values: - if test_env.get_client_version() >= version: - setattr(pool, attr_name, value) - self.assertEqual(getattr(pool, attr_name), value) - self.assertRaises( - TypeError, setattr, pool, attr_name, "invalid value" - ) + setattr(pool, attr_name, value) + self.assertEqual(getattr(pool, attr_name), value) + self.assertRaises( + TypeError, setattr, pool, attr_name, "invalid value" + ) finally: await pool.close(force=True) @@ -495,8 +494,8 @@ async def test_5527(self): with self.assertRaisesFullCode("DPY-2023"): test_env.get_pool_async(connectiontype=int) - @unittest.skipIf( - test_env.get_server_version() < (12, 2), "not supported on this server" + @unittest.skipUnless( + test_env.has_server_version(12, 2), "not supported on this server" ) async def test_5528(self): "5528 - ensure that timed wait times out with appropriate exception" diff --git a/tests/test_6000_typehandler_async.py b/tests/test_6000_typehandler_async.py index 3e45c542..9a423d27 100644 --- a/tests/test_6000_typehandler_async.py +++ b/tests/test_6000_typehandler_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -232,9 +232,7 @@ def output_type_handler(cursor, metadata): expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] self.assertEqual(await self.cursor.fetchall(), expected_data) - @unittest.skipUnless( - test_env.get_server_version() >= (21, 0), "unsupported server" - ) + @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") async def test_6006(self): "6006 - output type handler for fetching 21c JSON" diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index dee39758..64937149 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -803,8 +803,8 @@ async def test_6346(self): fetched_data = [(n, await c.read()) async for n, c in self.cursor] self.assertEqual(fetched_data, data) - @unittest.skipIf( - test_env.get_server_version() < (23, 1), "unsupported database" + @unittest.skipUnless( + test_env.has_server_version(23), "unsupported database" ) async def test_6347(self): "6347 - fetch table with domain and annotations" diff --git a/tests/test_6400_vector_var.py b/tests/test_6400_vector_var.py index 4670db81..4bdbc1be 100644 --- a/tests/test_6400_vector_var.py +++ b/tests/test_6400_vector_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,12 +33,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (23, 4), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (23, 4), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(23, 4), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(23, 4), "unsupported server") class TestCase(test_env.BaseTestCase): def __test_insert_and_fetch(self, value, column_name, expected_typecode): """ diff --git a/tests/test_6500_vector_interop.py b/tests/test_6500_vector_interop.py index 665e1841..5ccb6549 100644 --- a/tests/test_6500_vector_interop.py +++ b/tests/test_6500_vector_interop.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,12 +34,10 @@ @unittest.skipIf( - test_env.get_client_version() >= (23, 4), + test_env.has_client_version(23, 4), "client supports vectors directly", ) -@unittest.skipUnless( - test_env.get_server_version() >= (23, 4), "unsupported server" -) +@unittest.skipUnless(test_env.has_server_version(23, 4), "unsupported server") class TestCase(test_env.BaseTestCase): def test_6500(self): "6500 - verify fetch information for older clients" diff --git a/tests/test_6700_json_23.py b/tests/test_6700_json_23.py index b3cda4b2..58614a51 100644 --- a/tests/test_6700_json_23.py +++ b/tests/test_6700_json_23.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,12 +33,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (23, 0), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (23, 0), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(23), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(23), "unsupported server") class TestCase(test_env.BaseTestCase): def __test_fetch_json(self, value, table_name="TestJson"): """ diff --git a/tests/test_6900_oson.py b/tests/test_6900_oson.py index f6ac57bb..4120af9a 100644 --- a/tests/test_6900_oson.py +++ b/tests/test_6900_oson.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,12 +32,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (21, 0), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (21, 0), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(21), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(21), "unsupported server") class TestCase(test_env.BaseTestCase): def test_6900(self): "6900 - test OSON metadata" diff --git a/tests/test_7500_binary_vector.py b/tests/test_7500_binary_vector.py index 702297d4..9be1a80c 100644 --- a/tests/test_7500_binary_vector.py +++ b/tests/test_7500_binary_vector.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,12 +34,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (23, 5), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (23, 5), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(23, 5), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(23, 5), "unsupported server") class TestCase(test_env.BaseTestCase): def test_7500(self): diff --git a/tests/test_7700_sparse_vector.py b/tests/test_7700_sparse_vector.py index 56cd8a12..bc52d3d6 100644 --- a/tests/test_7700_sparse_vector.py +++ b/tests/test_7700_sparse_vector.py @@ -34,12 +34,8 @@ import test_env -@unittest.skipUnless( - test_env.get_client_version() >= (23, 7), "unsupported client" -) -@unittest.skipUnless( - test_env.get_server_version() >= (23, 7), "unsupported server" -) +@unittest.skipUnless(test_env.has_client_version(23, 7), "unsupported client") +@unittest.skipUnless(test_env.has_server_version(23, 7), "unsupported client") class TestCase(test_env.BaseTestCase): def __test_insert_and_fetch(self, vector, column_name, expected_typecode): """ diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 3abb84ec..2b2fad04 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -576,12 +576,8 @@ def test_8025(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - @unittest.skipUnless( - test_env.get_client_version() >= (23, 1), "unsupported client" - ) - @unittest.skipUnless( - test_env.get_server_version() >= (23, 1), "unsupported server" - ) + @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") def test_8026(self): "8026 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 5d9c3de1..947a92fa 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -531,9 +531,7 @@ async def test_8121(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - @unittest.skipUnless( - test_env.get_server_version() >= (23, 1), "unsupported server" - ) + @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") async def test_8122(self): "8122 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] diff --git a/tests/test_env.py b/tests/test_env.py index f89a1786..f6243dc2 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -226,11 +226,8 @@ def get_client_version(): name = "CLIENT_VERSION" value = PARAMETERS.get(name) if value is None: - if get_is_thin(): - value = (23, 7) - else: - _initialize() - value = oracledb.clientversion()[:2] + _initialize() + value = oracledb.clientversion()[:2] PARAMETERS[name] = value return value @@ -349,10 +346,9 @@ def get_proxy_user(): def get_sleep_proc_name(): - server_version = get_server_version() - return ( - "dbms_session.sleep" if server_version[0] >= 18 else "dbms_lock.sleep" - ) + if not has_server_version(18): + return "dbms_lock.sleep" + return "dbms_session.sleep" def get_server_version(): @@ -394,9 +390,23 @@ def get_random_string(length=10): return "".join(secrets.choice(string.ascii_letters) for i in range(length)) +def has_client_version(major_version, minor_version=0): + if get_is_thin(): + return True + return get_client_version() >= (major_version, minor_version) + + +def has_server_version(major_version, minor_version=0): + return get_server_version() >= (major_version, minor_version) + + +async def has_server_version_async(major_version, minor_version=0): + await get_server_version_async() + return has_server_version(major_version, minor_version) + + def is_on_oracle_cloud(connection): - server = get_server_version() - if server < (18, 0): + if not has_server_version(18): return False cursor = connection.cursor() cursor.execute( @@ -410,8 +420,7 @@ def is_on_oracle_cloud(connection): async def is_on_oracle_cloud_async(connection): - server = await get_server_version_async() - if server < (18, 0): + if not await has_server_version_async(18): return False cursor = connection.cursor() await cursor.execute( @@ -472,13 +481,11 @@ def run_test_cases(): def skip_soda_tests(): if get_is_thin(): return True - client = get_client_version() - if client < (18, 3): + if not has_client_version(18, 3): return True - server = get_server_version() - if server < (18, 0): + if not has_server_version(18): return True - if server > (20, 1) and client < (20, 1): + if has_server_version(20, 1) and not has_client_version(20, 1): return True return False @@ -602,11 +609,7 @@ def get_and_clear_queue( message="not supported with this client/server combination", ): if payload_type == "JSON": - min_version = (21, 0) - if ( - get_client_version() < min_version - or get_server_version() < min_version - ): + if not has_client_version(21) or not has_server_version(21): self.skipTest(message) elif isinstance(payload_type, str): payload_type = self.conn.gettype(payload_type) @@ -660,18 +663,14 @@ def get_sid_serial(self, conn=None): def get_soda_database( self, - minclient=(18, 3), - minserver=(18, 0), + minclient=None, + minserver=None, message="not supported with this client/server combination", drop_collections=True, ): - client = get_client_version() - if client < minclient: - self.skipTest(message) - server = get_server_version() - if server < minserver: + if minclient is not None and not has_client_version(*minclient): self.skipTest(message) - if server > (20, 1) and client < (20, 1): + if minserver is not None and not has_server_version(*minserver): self.skipTest(message) soda_db = self.conn.getSodaDatabase() if drop_collections: @@ -740,7 +739,7 @@ async def get_and_clear_queue( message="not supported with this client/server combination", ): if payload_type == "JSON": - if get_server_version() < (21, 0): + if not has_server_version(21): self.skipTest(message) elif isinstance(payload_type, str): payload_type = await self.conn.gettype(payload_type) From 073fa6a8bb74ebfa22bfa15fb40aeea71f1b26f8 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 4 Jun 2025 11:49:23 -0600 Subject: [PATCH 091/239] Fix typo and add tests to check for that scenario. --- src/oracledb/impl/thin/pool.pyx | 2 +- tests/test_2400_pool.py | 20 ++++++++++++++++++++ tests/test_5500_pool_async.py | 20 ++++++++++++++++++++ 3 files changed, 41 insertions(+), 1 deletion(-) diff --git a/src/oracledb/impl/thin/pool.pyx b/src/oracledb/impl/thin/pool.pyx index 4377540d..77c1bbc6 100644 --- a/src/oracledb/impl/thin/pool.pyx +++ b/src/oracledb/impl/thin/pool.pyx @@ -587,7 +587,7 @@ cdef class ThinPoolImpl(BaseThinPoolImpl): request.conn_impl.ping() request.conn_impl.set_call_timeout(0) except exceptions.Error: - request.conn_impl._protocol.disconnect() + request.conn_impl._protocol._disconnect() request.conn_impl = None else: conn_impl = self._create_conn_impl(request.params) diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index 556028e3..aab8e5f8 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -1066,6 +1066,26 @@ def test_2456(self): with self.assertRaisesFullCode("DPY-2064"): test_env.get_pool(min=3, max=2) + @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + def test_2457(self): + "2457 - ping pooled connection on receiving dead connection error" + admin_conn = test_env.get_admin_connection() + pool = test_env.get_pool(min=1, max=1, ping_interval=0) + + # kill connection in pool + with admin_conn.cursor() as admin_cursor: + with pool.acquire() as conn: + sid, serial = self.get_sid_serial(conn) + sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(sql) + + # acquire connection which should succeed without failure + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + self.assertEqual(user, test_env.get_main_user().upper()) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5500_pool_async.py b/tests/test_5500_pool_async.py index 60af4312..520c5ed0 100644 --- a/tests/test_5500_pool_async.py +++ b/tests/test_5500_pool_async.py @@ -634,6 +634,26 @@ async def test_5542(self): with self.assertRaisesFullCode("DPY-2064"): test_env.get_pool_async(min=3, max=2) + @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + async def test_5543(self): + "5543 - ping pooled connection on receiving dead connection error" + admin_conn = await test_env.get_admin_connection_async() + pool = test_env.get_pool_async(min=1, max=1, ping_interval=0) + + # kill connection in pool + with admin_conn.cursor() as admin_cursor: + async with pool.acquire() as conn: + sid, serial = await self.get_sid_serial(conn) + sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(sql) + + # acquire connection which should succeed without failure + async with pool.acquire() as conn: + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + self.assertEqual(user, test_env.get_main_user().upper()) + if __name__ == "__main__": test_env.run_test_cases() From d8c38e49432a901865819cc78b76b6885d82040e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 4 Jun 2025 12:00:01 -0600 Subject: [PATCH 092/239] Fixed bug when a connection pool internally makes an attempt to ping a closed connection (#482). --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thin/packet.pyx | 11 +++++++---- src/oracledb/impl/thin/transport.pyx | 7 ++++++- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 5ded8f8e..5274ebe9 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -22,6 +22,9 @@ Thin Mode Changes #) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many times as needed. +#) Fixed bug when a connection pool internally makes an attempt to ping a + closed connection + (`issue 482 `__). #) Fixed bug when connecting with asyncio using the parameter ``https_proxy``. #) Fixed regression when connecting where only the host specified by the ``https_proxy`` parameter can successfully perform name resolution. diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index 76bcd8e9..03ff915c 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -660,10 +660,13 @@ cdef class ReadBuffer(Buffer): cdef: bint notify_waiter Packet packet - packet = self._transport.read_packet() - self._process_packet(packet, ¬ify_waiter, False) - if notify_waiter: - self._start_packet() + packet = self._transport.read_packet(raise_exc=False) + if packet is None: + self._pending_error_num = TNS_ERR_SESSION_SHUTDOWN + else: + self._process_packet(packet, ¬ify_waiter, False) + if notify_waiter: + self._start_packet() cdef bint has_response(self): """ diff --git a/src/oracledb/impl/thin/transport.pyx b/src/oracledb/impl/thin/transport.pyx index 2d84c6fc..825e1429 100644 --- a/src/oracledb/impl/thin/transport.pyx +++ b/src/oracledb/impl/thin/transport.pyx @@ -332,7 +332,7 @@ cdef class Transport: self._transport = transport self._transport_num = sock.fileno() - cdef Packet read_packet(self): + cdef Packet read_packet(self, bint raise_exc=True): """ Reads a packet from the transport. """ @@ -344,10 +344,15 @@ cdef class Transport: try: data = self._transport.recv(self._max_packet_size) except ConnectionResetError as e: + self._transport = None + if not raise_exc: + return None errors._raise_err(errors.ERR_CONNECTION_CLOSED, str(e), cause=e) if len(data) == 0: self.disconnect() + if not raise_exc: + return None errors._raise_err(errors.ERR_CONNECTION_CLOSED) packet = self.extract_packet(data) return packet From e49269484f58f7a1cc7ff70c1e43bf91df2824bf Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 4 Jun 2025 19:25:22 -0600 Subject: [PATCH 093/239] Fixed date handling to match PyArrow's and avoid localization issues (#499) which also corrects the bug on Windows when fetching dates prior to 1970 and after 2038 (#483). --- doc/src/release_notes.rst | 8 +++++ src/oracledb/base_impl.pxd | 3 +- src/oracledb/base_impl.pyx | 3 ++ src/oracledb/impl/base/converters.pyx | 24 ++++++++++--- tests/test_8000_dataframe.py | 51 +++++++++++++++++++++------ tests/test_8100_dataframe_async.py | 44 +++++++++++++++++++---- 6 files changed, 110 insertions(+), 23 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 5274ebe9..0102630a 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -60,6 +60,14 @@ Common Changes parameter "min". Previously python-oracledb Thin mode did not raise an error and python-oracledb Thick mode raised the exception ``ORA-24413: Invalid number of sessions specified``. +#) Improvements to :ref:`data frames `: + + - Fixed date handling to match PyArrow's and avoid localization issues + (`issue 499 `__). + + - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 + (`issue 483 `__). + #) Improved the test suite and documentation. diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 31d3fcc5..82cf1359 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -35,6 +35,7 @@ from libc.stdint cimport int8_t, int16_t, int32_t, int64_t from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from libc.stdlib cimport abs from cpython cimport array +cimport cpython.datetime as cydatetime ctypedef unsigned char char_type @@ -982,7 +983,7 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, OracleData* data, const char* encoding_errors, bint from_dbobject) -cdef object convert_date_to_python(OracleDataBuffer *buffer) +cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer) cdef uint16_t decode_uint16be(const char_type *buf) cdef uint32_t decode_uint32be(const char_type *buf) cdef uint16_t decode_uint16le(const char_type *buf) diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index cc5a9a84..a4ac37e0 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -116,6 +116,9 @@ cdef const char* DRIVER_INSTALLATION_URL = \ cdef const char* ENCODING_UTF8 = "UTF-8" cdef const char* ENCODING_UTF16 = "UTF-16BE" +# variables needed for dates when using pyarrow +cdef cydatetime.datetime EPOCH_DATE = datetime.datetime(1970, 1, 1) + # protocols registered with the library REGISTERED_PROTOCOLS = {} diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 3614a2b1..0843b711 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -29,13 +29,14 @@ # form returned by the decoders to an appropriate Python value. #------------------------------------------------------------------------------ -cdef object convert_date_to_python(OracleDataBuffer *buffer): +cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer): """ Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTAMP WITH TIMEZONE value stored in the buffer to Python datetime.datetime(). """ cdef: OracleDate *value = &buffer.as_date + cydatetime.datetime output int32_t seconds output = cydatetime.datetime_new(value.year, value.month, value.day, value.hour, value.minute, value.second, @@ -46,6 +47,22 @@ cdef object convert_date_to_python(OracleDataBuffer *buffer): return output +cdef int convert_date_to_arrow_timestamp(OracleArrowArray arrow_array, + OracleDataBuffer *buffer) except -1: + """ + Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTAMP + WITH TIMEZONE value stored in the buffer to Arrow timestamp. + """ + cdef: + cydatetime.timedelta td + cydatetime.datetime dt + int64_t ts + dt = convert_date_to_python(buffer) + td = dt - EPOCH_DATE + ts = int(cydatetime.total_seconds(td) * arrow_array.factor) + arrow_array.append_int64(ts) + + cdef object convert_interval_ds_to_python(OracleDataBuffer *buffer): """ Converts an INTERVAL DAYS TO SECONDS value stored in the buffer to Python @@ -215,7 +232,6 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, ArrowType arrow_type uint32_t db_type_num OracleRawBytes* rb - int64_t ts # NULL values if data.is_null: @@ -243,9 +259,7 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, rb = &data.buffer.as_raw_bytes arrow_array.append_bytes( rb.ptr, rb.num_bytes) elif arrow_type == NANOARROW_TYPE_TIMESTAMP: - ts = int(convert_date_to_python(&data.buffer).timestamp() * - arrow_array.factor) - arrow_array.append_int64(ts) + convert_date_to_arrow_timestamp(arrow_array, &data.buffer) elif arrow_type == NANOARROW_TYPE_DECIMAL128: convert_number_to_arrow_decimal(arrow_array, &data.buffer) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 2b2fad04..ccf35ec2 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -25,6 +25,7 @@ """ Module for testing dataframes """ + import datetime import decimal import unittest @@ -49,7 +50,7 @@ "Doe", "San Francisco", "USA", - datetime.date(1989, 8, 22), + datetime.date(1955, 7, 1), # summer(before 1970) 12132.40, 400, datetime.datetime.now(), @@ -60,7 +61,7 @@ "Hero", "San Fransokyo", "Japansa", - datetime.date(1988, 8, 22), + datetime.date(1955, 1, 1), # winter(before 1970) 234234.32, 400, datetime.datetime.now(), @@ -75,7 +76,7 @@ "Doe", "San Francisco", "USA", - datetime.date(1989, 8, 22), + datetime.date(2000, 7, 1), # summer(between) None, 400, datetime.datetime.now(), @@ -86,7 +87,29 @@ "Hero", "San Fransokyo", None, - datetime.date(1988, 8, 22), + datetime.date(2000, 1, 1), # winter(between) + -12312.1, + 0, + datetime.datetime.now(), + ), + ( + 3, + "Johns", + "Does", + "San Franciscos", + "USAs", + datetime.date(2040, 7, 1), # summer(after) + None, + 500, + datetime.datetime.now(), + ), + ( + 4, + "Bigs", + "Heros", + "San Fransokyos", + None, + datetime.date(2040, 1, 1), # winter(after) -12312.1, 0, datetime.datetime.now(), @@ -221,6 +244,12 @@ def __check_interop(self): if not HAS_INTEROP: self.skipTest("missing pandas or pyarrow modules") + def __convert_date(self, value): + """ + Converts a date to the format required by Arrow. + """ + return (value - datetime.datetime(1970, 1, 1)).total_seconds() + def __convert_to_array(self, data, typ): """ Convert raw data to an Arrow array using pyarrow. @@ -233,11 +262,13 @@ def __convert_to_array(self, data, typ): elif isinstance(typ, pyarrow.TimestampType): if typ.unit == "s": data = [ - datetime.datetime(v.year, v.month, v.day).timestamp() + self.__convert_date( + datetime.datetime(v.year, v.month, v.day) + ) for v in data ] else: - data = [value.timestamp() * 1000000 for value in data] + data = [self.__convert_date(value) * 1000000 for value in data] mask = [value is None for value in data] return pyarrow.array(data, typ, mask=mask) @@ -459,7 +490,7 @@ def test_8015(self): ora_df = self.conn.fetch_df_all(statement) col = ora_df.get_column_by_name("SALARY") self.assertEqual(col.size(), len(DATASET_2)) - self.assertEqual(col.null_count, 1) + self.assertEqual(col.null_count, 2) def test_8016(self): "8016 - check unsupported error" @@ -506,16 +537,16 @@ def test_8020(self): ora_col = ora_df.get_column(0) self.assertEqual(ora_col.describe_null[0], 0) self.assertEqual(ora_col.dtype[0], 0) - metadata = {"name": "ID", "size": 2, "num_chunks": 1} + metadata = {"name": "ID", "size": 4, "num_chunks": 1} self.assertEqual(metadata, ora_col.metadata) self.assertEqual(ora_col.null_count, 0) ora_col = ora_df.get_column(4) self.assertEqual(ora_col.describe_null[0], 3) self.assertEqual(ora_col.dtype[0], 21) - metadata = {"name": "COUNTRY", "size": 2, "num_chunks": 1} + metadata = {"name": "COUNTRY", "size": 4, "num_chunks": 1} self.assertEqual(metadata, ora_col.metadata) - self.assertEqual(ora_col.null_count, 1) + self.assertEqual(ora_col.null_count, 2) def test_8021(self): "8021 - batches with size that has duplicate rows across batches" diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 947a92fa..007e24d6 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -50,7 +50,7 @@ "Doe", "San Francisco", "USA", - datetime.date(1989, 8, 22), + datetime.date(1955, 7, 1), # summer(before 1970) 12132.40, 400, datetime.datetime.now(), @@ -61,7 +61,7 @@ "Hero", "San Fransokyo", "Japansa", - datetime.date(1988, 8, 22), + datetime.date(1955, 1, 1), # winter(before 1970) 234234.32, 400, datetime.datetime.now(), @@ -76,7 +76,7 @@ "Doe", "San Francisco", "USA", - datetime.date(1989, 8, 22), + datetime.date(2000, 7, 1), # summer(between) None, 400, datetime.datetime.now(), @@ -87,7 +87,29 @@ "Hero", "San Fransokyo", None, - datetime.date(1988, 8, 22), + datetime.date(2000, 1, 1), # winter(between) + -12312.1, + 0, + datetime.datetime.now(), + ), + ( + 3, + "Johns", + "Does", + "San Franciscos", + "USAs", + datetime.date(2040, 7, 1), # summer(after) + None, + 500, + datetime.datetime.now(), + ), + ( + 4, + "Bigs", + "Heros", + "San Fransokyos", + None, + datetime.date(2040, 1, 1), # winter(after) -12312.1, 0, datetime.datetime.now(), @@ -225,6 +247,12 @@ def __check_interop(self): if not HAS_INTEROP: self.skipTest("missing pandas or pyarrow modules") + def __convert_date(self, value): + """ + Converts a date to the format required by Arrow. + """ + return (value - datetime.datetime(1970, 1, 1)).total_seconds() + def __convert_to_array(self, data, typ): """ Convert raw data to an Arrow array using pyarrow. @@ -237,11 +265,13 @@ def __convert_to_array(self, data, typ): elif isinstance(typ, pyarrow.TimestampType): if typ.unit == "s": data = [ - datetime.datetime(v.year, v.month, v.day).timestamp() + self.__convert_date( + datetime.datetime(v.year, v.month, v.day) + ) for v in data ] else: - data = [value.timestamp() * 1000000 for value in data] + data = [self.__convert_date(value) * 1000000 for value in data] mask = [value is None for value in data] return pyarrow.array(data, typ, mask=mask) @@ -470,7 +500,7 @@ async def test_8115(self): ora_df = await self.conn.fetch_df_all(statement) col = ora_df.get_column_by_name("SALARY") self.assertEqual(col.size(), len(DATASET_2)) - self.assertEqual(col.null_count, 1) + self.assertEqual(col.null_count, 2) async def test_8116(self): "8116 - check unsupported error" From ba425c48cdd1198b196b29ba83b8175afd793730 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:29:20 -0600 Subject: [PATCH 094/239] Remove unnecessary definitions from shared file and put definitions in alphabetical order in the private file. --- src/oracledb/base_impl.pxd | 10 ++-------- src/oracledb/base_impl.pyx | 4 ++-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 82cf1359..4334eb9d 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -186,18 +186,13 @@ cpdef enum: VECTOR_FORMAT_FLOAT64 = 3 VECTOR_FORMAT_INT8 = 4 -cdef type PY_TYPE_ASYNC_CURSOR cdef type PY_TYPE_ASYNC_LOB -cdef type PY_TYPE_BOOL -cdef type PY_TYPE_CURSOR cdef type PY_TYPE_DATE cdef type PY_TYPE_DATETIME -cdef type PY_TYPE_DECIMAL cdef type PY_TYPE_DB_OBJECT -cdef type PY_TYPE_DB_OBJECT_TYPE -cdef type PY_TYPE_FETCHINFO -cdef type PY_TYPE_JSON_ID +cdef type PY_TYPE_DECIMAL cdef type PY_TYPE_INTERVAL_YM +cdef type PY_TYPE_JSON_ID cdef type PY_TYPE_LOB cdef type PY_TYPE_MESSAGE cdef type PY_TYPE_MESSAGE_QUERY @@ -205,7 +200,6 @@ cdef type PY_TYPE_MESSAGE_ROW cdef type PY_TYPE_MESSAGE_TABLE cdef type PY_TYPE_SPARSE_VECTOR cdef type PY_TYPE_TIMEDELTA -cdef type PY_TYPE_VAR cdef str DRIVER_NAME cdef str DRIVER_VERSION diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index a4ac37e0..3ed00048 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -88,9 +88,10 @@ cdef type PY_TYPE_CURSOR cdef object PY_TYPE_DATAFRAME cdef type PY_TYPE_DATE = datetime.date cdef type PY_TYPE_DATETIME = datetime.datetime -cdef type PY_TYPE_DECIMAL = decimal.Decimal cdef type PY_TYPE_DB_OBJECT +cdef type PY_TYPE_DECIMAL = decimal.Decimal cdef type PY_TYPE_DB_OBJECT_TYPE +cdef type PY_TYPE_FETCHINFO cdef type PY_TYPE_JSON_ID cdef type PY_TYPE_INTERVAL_YM cdef type PY_TYPE_LOB @@ -101,7 +102,6 @@ cdef type PY_TYPE_MESSAGE_TABLE cdef type PY_TYPE_SPARSE_VECTOR cdef type PY_TYPE_TIMEDELTA = datetime.timedelta cdef type PY_TYPE_VAR -cdef type PY_TYPE_FETCHINFO # enumerations used by the driver in connect parameters cdef object ENUM_AUTH_MODE From 91ea42b87c2aa3eff11728706173d5dee4154fdc Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:30:14 -0600 Subject: [PATCH 095/239] Added support for multiple pools with DRCP. --- doc/src/api_manual/connect_params.rst | 16 ++++- doc/src/api_manual/module.rst | 66 +++++++++++++++++-- doc/src/api_manual/pool_params.rst | 6 +- doc/src/release_notes.rst | 2 + doc/src/user_guide/connection_handling.rst | 76 +++++++++++++++------- src/oracledb/base_impl.pxd | 1 + src/oracledb/connect_params.py | 20 +++++- src/oracledb/connection.py | 8 +++ src/oracledb/impl/base/connect_params.pyx | 4 ++ src/oracledb/impl/base/parsers.pyx | 2 + src/oracledb/pool.py | 8 +++ src/oracledb/pool_params.py | 11 +++- tests/test_4500_connect_params.py | 3 + tests/test_4700_pool_params.py | 1 + utils/fields.cfg | 7 ++ 15 files changed, 197 insertions(+), 34 deletions(-) diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index 5e5237bd..ad09094b 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -62,11 +62,15 @@ ConnectParams Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=None, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=None) + extra_auth_params=None, pool_Name=None, handle=None) Sets the values for one or more of the parameters of a ConnectParams object. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``use_sni``, ``thick_mode_dsn_passthrough``, ``extra_auth_params`` @@ -333,6 +337,16 @@ ConnectParams Attributes .. versionadded:: 2.1.0 +.. attribute:: ConnectParams.pool_name + + This read-only attribute is a string that specifies the name of the pool + when using multiple DRCP pools with Oracle Database 23.4 or later. See + :ref:`DRCP Pool Names `. + + This attribute is supported in both python-oracledb Thin and Thick modes. + + .. versionadded:: 3.2.0 + .. attribute:: ConnectParams.port This read-only attribute is an integer that returns the port number on diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 0dfc0ac1..1bad3c95 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -47,7 +47,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Constructor for creating a connection to the database. Returns a :ref:`Connection Object `. All parameters are optional and can be @@ -410,6 +410,11 @@ Oracledb Methods used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* and is only of use when embedding Python in an application (like PowerBuilder) which has already made the connection. The connection thus @@ -418,6 +423,10 @@ Oracledb Methods is ignored in the Thin mode. It should be used with extreme caution. The default value is *0*. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``pool_alias``, ``instance_name``, ``use_sni``, @@ -470,7 +479,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Constructor for creating a connection to the database. Returns an :ref:`AsyncConnection Object `. All parameters are optional @@ -760,9 +769,18 @@ Oracledb Methods This value is used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + The ``thick_mode_dsn_passthrough`` and ``handle`` parameters are ignored in python-oracledb Thin mode. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``pool_alias``, ``instance_name``, ``use_sni``, @@ -813,7 +831,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Contains all the parameters that can be used to establish a connection to the database. @@ -1130,11 +1148,20 @@ Oracledb Methods used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + The ``handle`` parameter is expected to be an integer which represents a pointer to a valid service context handle. This value is only used in the python-oracledb Thick mode. It should be used with extreme caution. The default value is *0*. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``instance_name``, ``use_sni``, ``thick_mode_dsn_passthrough`` and @@ -1196,7 +1223,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Creates a connection pool with the supplied parameters and returns the :ref:`ConnectionPool object ` for the pool. See :ref:`Connection @@ -1634,6 +1661,11 @@ Oracledb Methods used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* and is only of use when embedding Python in an application (like PowerBuilder) which has already made the connection. The connection thus @@ -1642,6 +1674,10 @@ Oracledb Methods is ignored in the Thin mode. It should be used with extreme caution. The default value is *0*. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``pool_alias``, ``instance_name``, ``use_sni``, @@ -1698,7 +1734,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Creates a connection pool with the supplied parameters and returns the :ref:`AsyncConnectionPool object ` for the pool. @@ -2047,9 +2083,18 @@ Oracledb Methods used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + The ``handle`` and ``thick_mode_dsn_passthrough`` parameters are ignored in python-oracledb Thin mode. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``pool_alias``, ``instance_name``, ``use_sni``, @@ -2293,7 +2338,7 @@ Oracledb Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=False, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=0) + extra_auth_params=None, pool_name=None, handle=0) Creates and returns a :ref:`PoolParams Object `. The object can be passed to :meth:`oracledb.create_pool()`. @@ -2674,11 +2719,20 @@ Oracledb Methods used in both the python-oracledb Thin and Thick modes. See :ref:`tokenauth`. + The ``pool_name`` parameter is expected to be a string which specifies the + name of the pool when using multiple DRCP pools with Oracle Database 23.4 + or later. This value is used in both python-oracledb Thin and Thick modes. + See :ref:`DRCP Pool Names `. + The ``handle`` parameter is expected to be an integer which represents a pointer to a valid service context handle. This value is only used in the python-oracledb Thick mode. It should be used with extreme caution. The default value is *0*. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``use_sni``, ``instance_name``, ``thick_mode_dsn_passthrough``, diff --git a/doc/src/api_manual/pool_params.rst b/doc/src/api_manual/pool_params.rst index 5a802522..76d3168d 100644 --- a/doc/src/api_manual/pool_params.rst +++ b/doc/src/api_manual/pool_params.rst @@ -53,10 +53,14 @@ PoolParams Methods terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ driver_name=oracledb.defaults.driver_name, use_sni=None, \ thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, handle=None) + extra_auth_params=None, pool_name=None, handle=None) Sets one or more of the parameters. + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + .. versionchanged:: 3.0.0 The ``use_sni``, ``thick_mode_dsn_passthrough``, diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0102630a..9f3da8e1 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -47,6 +47,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added parameter ``pool_name`` to connection and pool creation methods to + support Oracle Database 23ai multi-pool :ref:`drcp`. #) Added Instance Principal authentication support when using :ref:`OCI Cloud Native Authentication `. #) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 1ca7db4a..9cabf5d7 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -2893,7 +2893,11 @@ Enabling DRCP in Oracle Database Oracle Database versions prior to 21c can have a single DRCP connection pool. From Oracle Database 21c, each pluggable database can optionally have its own pool, or can use the container level pool. From Oracle Database 23ai, you can -create multiple pools at the pluggable, or container, database level. +create multiple pools at the pluggable, or container, database level. This +multi-pool feature is useful where different applications connect to the same +database, but there is a concern that one application's use of the pool may +impact other applications. If this is not the case, a single pool may allow +best resource sharing on the database host. Note that DRCP is already enabled in Oracle Autonomous Database and pool management is different to the steps below. @@ -3033,7 +3037,7 @@ To enable connections to use DRCP pooled servers, you can: server_type="pooled", cclass="MYAPP") -**DRCP Connection Class Names** +**DRCP Connection Classes** The best practice is to specify a ``cclass`` class name when creating a python-oracledb connection pool. This user-chosen name provides some @@ -3071,7 +3075,9 @@ default, python-oracledb pooled connections use ``PURITY_SELF`` and standalone connections use ``PURITY_NEW``. To limit session sharing, you can explicitly require that new session memory be -allocated each time :meth:`~ConnectionPool.acquire()` is called: +allocated each time :meth:`~ConnectionPool.acquire()` is called. Do this when +creating a driver connection pool by specifying the ``purity`` as +``PURITY_NEW``: .. code-block:: python @@ -3079,6 +3085,27 @@ allocated each time :meth:`~ConnectionPool.acquire()` is called: min=2, max=5, increment=1, cclass="MYAPP", purity=oracledb.PURITY_NEW) +The overheads can impact ultimate scalability. + +.. _poolnames: + +**DRCP Pool Names** + +From Oracle Database 23ai, multiple DRCP pools can be created by setting a pool +name at DRCP pool creation time. Applications can then specifiy which DRCP pool +to use by passing the ``pool_name`` parameter during connection, or connection +pool, creation, for example: + +.. code-block:: python + + pool = oracledb.create_pool(user="hr", password=userpwd, + dsn="dbhost.example.com/orclpdb:pooled", + min=2, max=5, increment=1, + cclass="MYAPP", pool_name="MYPOOL") + +When specifying a pool name, you should still set a connection class name to +allow efficient use of the pool's resources. + **Acquiring a DRCP Connection** Once DRCP has been enabled and the driver connection pool has been created with @@ -3132,19 +3159,23 @@ other users: . . . connection.close() -Setting the DRCP Connection Class and Purity in the Connection String ---------------------------------------------------------------------- +Setting DRCP Parameters in Connection Strings +--------------------------------------------- -Although setting the DRCP connection class and purity in the application is -preferred, sometimes it is not possible to modify an existing code base. For -these applications, you can specify the class and purity along with the pooled -server option in the connection string. +Setting the DRCP connection class, purity, and pool name as function parameters +in the application is preferred, but sometimes it is not possible to modify an +existing code base. For these applications, you can specify the values along +with the pooled server option in the connection string. + +You can specify the class and purity options in connection strings when using +Oracle Database 21c, or later. You can specify the pool name when using Oracle +Database 23ai, or later. For example with the :ref:`Easy Connect ` syntax:: - dbhost.example.com/orclpdb:pooled?pool_connection_class=MYAPP&pool_purity=self + dbhost.example.com/orclpdb:pooled?pool_connection_class=MYAPP&pool_purity=self&pool_name=MYPOOL -or by using a :ref:`TNS Alias ` in a +Or by using a :ref:`TNS Alias ` in a :ref:`tnsnames.ora ` file:: customerpool = (DESCRIPTION=(ADDRESS=(PROTOCOL=tcp) @@ -3152,19 +3183,16 @@ or by using a :ref:`TNS Alias ` in a (PORT=1521))(CONNECT_DATA=(SERVICE_NAME=orclpdb) (SERVER=POOLED) (POOL_CONNECTION_CLASS=MYAPP) - (POOL_PURITY=SELF))) - -You can specify the class and purity options in connection strings when using -python-oracledb Thin mode with Oracle Database 21c, or later. - -For python-oracledb Thick mode, setting these options in the connection string -is supported if you are using Oracle Database 21c (or later) and Oracle Client -19c (or later). However, explicitly specifying the purity as *SELF* in this way -may cause some unusable connections in a python-oracledb Thick mode connection -pool to not be terminated. In summary, if you cannot programmatically set the -class name and purity, or cannot use python-oracledb Thin mode, then avoid -explicitly setting the purity as a connection string parameter when using a -local python-oracledb connection pool in Thick mode. + (POOL_PURITY=SELF) + (POOL_NAME=MYPOOL))) + +Explicitly specifying the purity as *SELF* in a connection string may cause +some unusable connections in a python-oracledb Thick mode connection pool to +not be terminated, potentially eventually rendering all connections in the pool +to be unusable. If you cannot programmatically set the class name and purity, +or cannot use python-oracledb Thin mode, then avoid explicitly setting the +purity as a connection string parameter when using a local python-oracledb +Thick mode connection pool. .. _monitoringdrcp: diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 4334eb9d..c685495f 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -503,6 +503,7 @@ cdef class Description(ConnectParamsNode): public str cclass public str connection_id_prefix public str pool_boundary + public str pool_name public uint32_t purity public bint ssl_server_dn_match public bint use_tcp_fast_open diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index 8e5bd5eb..c4b4db90 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -107,6 +107,7 @@ def __init__( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ): """ @@ -316,6 +317,9 @@ def __init__( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -371,7 +375,8 @@ def __repr__(self): f"driver_name={self.driver_name!r}, " f"use_sni={self.use_sni!r}, " f"thick_mode_dsn_passthrough={self.thick_mode_dsn_passthrough!r}, " - f"extra_auth_params={self.extra_auth_params!r}" + f"extra_auth_params={self.extra_auth_params!r}, " + f"pool_name={self.pool_name!r}" ")" ) @@ -568,6 +573,15 @@ def pool_boundary(self) -> Union[list, str]: """ return [d.pool_boundary for d in self._impl.description_list.children] + @property + @_flatten_value + def pool_name(self) -> Union[list, str]: + """ + The name of the DRCP pool when using multi-pool DRCP with Oracle + Database 23.4 or higher. + """ + return [d.pool_name for d in self._impl.description_list.children] + @property @_flatten_value def port(self) -> Union[list, int]: @@ -914,6 +928,7 @@ def set( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ): """ @@ -1109,6 +1124,9 @@ def set( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 01c31edb..bd696a18 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1337,6 +1337,7 @@ def connect( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ) -> Connection: """ @@ -1564,6 +1565,9 @@ def connect( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -2186,6 +2190,7 @@ def connect_async( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ) -> AsyncConnection: """ @@ -2413,6 +2418,9 @@ def connect_async( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) diff --git a/src/oracledb/impl/base/connect_params.pyx b/src/oracledb/impl/base/connect_params.pyx index 1ad6b32f..b8868c74 100644 --- a/src/oracledb/impl/base/connect_params.pyx +++ b/src/oracledb/impl/base/connect_params.pyx @@ -907,6 +907,8 @@ cdef class Description(ConnectParamsNode): else: if self.cclass is not None: temp_parts.append(f"(POOL_CONNECTION_CLASS={self.cclass})") + if self.pool_name is not None: + temp_parts.append(f"(POOL_NAME={self.pool_name})") if self.purity == PURITY_SELF: temp_parts.append(f"(POOL_PURITY=SELF)") elif self.purity == PURITY_NEW: @@ -961,6 +963,7 @@ cdef class Description(ConnectParamsNode): description.cclass = self.cclass description.connection_id_prefix = self.connection_id_prefix description.pool_boundary = self.pool_boundary + description.pool_name = self.pool_name description.purity = self.purity description.ssl_server_dn_match = self.ssl_server_dn_match description.use_tcp_fast_open = self.use_tcp_fast_open @@ -995,6 +998,7 @@ cdef class Description(ConnectParamsNode): _set_str_param(args, "cclass", self) _set_enum_param(args, "purity", ENUM_PURITY, &self.purity) _set_str_param(args, "pool_boundary", self) + _set_str_param(args, "pool_name", self) _set_str_param(args, "connection_id_prefix", self) _set_bool_param(args, "use_tcp_fast_open", &self.use_tcp_fast_open) extra_args = args.get("extra_connect_data_args") diff --git a/src/oracledb/impl/base/parsers.pyx b/src/oracledb/impl/base/parsers.pyx index 62b161a4..19fa04eb 100644 --- a/src/oracledb/impl/base/parsers.pyx +++ b/src/oracledb/impl/base/parsers.pyx @@ -83,6 +83,7 @@ CONNECT_DATA_PARAM_NAMES = set([ "connection_id_prefix", "instance_name", "pool_boundary", + "pool_name", "purity", "server_type", "service_name", @@ -108,6 +109,7 @@ COMMON_PARAM_NAMES = set([ "https_proxy_port", "load_balance", "pool_boundary", + "pool_name", "pool_connection_class", "pool_purity", "retry_count", diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index baba8834..77601581 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -677,6 +677,7 @@ def create_pool( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ) -> ConnectionPool: """ @@ -961,6 +962,9 @@ def create_pool( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -1209,6 +1213,7 @@ def create_pool_async( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ) -> AsyncConnectionPool: """ @@ -1494,6 +1499,9 @@ def create_pool_async( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index fd9e9b2d..a73323b9 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -120,6 +120,7 @@ def __init__( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ): """ @@ -385,6 +386,9 @@ def __init__( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher (default: None) + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -454,7 +458,8 @@ def __repr__(self): f"driver_name={self.driver_name!r}, " f"use_sni={self.use_sni!r}, " f"thick_mode_dsn_passthrough={self.thick_mode_dsn_passthrough!r}, " - f"extra_auth_params={self.extra_auth_params!r}" + f"extra_auth_params={self.extra_auth_params!r}, " + f"pool_name={self.pool_name!r}" ")" ) @@ -655,6 +660,7 @@ def set( use_sni: Optional[bool] = None, thick_mode_dsn_passthrough: Optional[bool] = None, extra_auth_params: Optional[dict] = None, + pool_name: Optional[str] = None, handle: Optional[int] = None, ): """ @@ -901,6 +907,9 @@ def set( necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins + - pool_name: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher + - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution diff --git a/tests/test_4500_connect_params.py b/tests/test_4500_connect_params.py index 42f511a6..b0e798dd 100644 --- a/tests/test_4500_connect_params.py +++ b/tests/test_4500_connect_params.py @@ -692,6 +692,7 @@ def test_4539(self): ("use_sni", True), ("thick_mode_dsn_passthrough", True), ("extra_auth_params", dict(extra1="A", extra2="B")), + ("pool_name", "my_pool"), ] params = oracledb.ConnectParams(**dict(values)) parts = [f"{name}={value!r}" for name, value in values] @@ -747,6 +748,7 @@ def test_4539(self): ("use_sni", False), ("thick_mode_dsn_passthrough", False), ("extra_auth_params", dict(extra1="X", extra2="Y")), + ("pool_name", "my_second_pool"), ] params.set(**dict(new_values)) parts = [f"{name}={value!r}" for name, value in new_values] @@ -1489,6 +1491,7 @@ def test_4579(self): "(FAILOVER_MODE=(TYPE=select)(METHOD=basic)(RETRIES=2)(DELAY=15))", "(HS=ok)", "(TUNNEL_SERVICE_NAME=south)", + "(POOL_NAME=pool_name_4579)", ] service_name = "service_4577" diff --git a/tests/test_4700_pool_params.py b/tests/test_4700_pool_params.py index 21dab38c..62b06362 100644 --- a/tests/test_4700_pool_params.py +++ b/tests/test_4700_pool_params.py @@ -134,6 +134,7 @@ def test_4701(self): ("use_sni", True), ("thick_mode_dsn_passthrough", True), ("extra_auth_params", dict(extra1="A", extra2="B")), + ("pool_name", "my_pool"), ] params = oracledb.PoolParams(**dict(values)) parts = [f"{name}={value!r}" for name, value in values] diff --git a/utils/fields.cfg b/utils/fields.cfg index 30ea95a3..8ef1a4f1 100644 --- a/utils/fields.cfg +++ b/utils/fields.cfg @@ -519,6 +519,13 @@ description = Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins +[pool_name] +type = str +source = description +description = + the name of the DRCP pool when using multi-pool DRCP with Oracle Database + 23.4 or higher + [handle] type = int default = 0 From ab4f6e982602d1ba57fd87e881edd8f2a936e2b2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:30:42 -0600 Subject: [PATCH 096/239] Fixed a bug resulting in a ``ValueError`` exception when getting attribute "MessageProperties.enqtime" if the value is not available or None. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thick/queue.pyx | 15 +++++++++------ tests/test_7800_aq_raw.py | 1 + 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9f3da8e1..2f2ce3e8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -43,6 +43,8 @@ Thick Mode Changes :attr:`DeqOptions.msgid`, :attr:`DeqOptions.transformation`, :attr:`EnqOptions.transformation`, :attr:`MessageProperties.correlation`, or :attr:`MessageProperties.exceptionq` are set to ``None``. +#) Fixed a bug resulting in a ``ValueError`` exception when getting attribute + :attr:`MessageProperties.enqtime` if the value is not available or None. Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/queue.pyx b/src/oracledb/impl/thick/queue.pyx index dd34e431..8fd8504b 100644 --- a/src/oracledb/impl/thick/queue.pyx +++ b/src/oracledb/impl/thick/queue.pyx @@ -400,6 +400,7 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): cdef: dpiMsgProps* _handle ThickConnImpl _conn_impl + bint _has_been_dequeued def __dealloc__(self): if self._handle != NULL: @@ -414,6 +415,7 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): dpiJsonNode *node dpiJson *json + self._has_been_dequeued = True self._conn_impl = queue_impl._conn_impl if queue_impl.is_json: if dpiMsgProps_getPayloadJson(self._handle, &json) < 0: @@ -480,12 +482,13 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): Internal method for getting the enqueue time. """ cdef dpiTimestamp timestamp - if dpiMsgProps_getEnqTime(self._handle, ×tamp) < 0: - _raise_from_odpi() - return cydatetime.datetime_new(timestamp.year, timestamp.month, - timestamp.day, timestamp.hour, - timestamp.minute, timestamp.second, - timestamp.fsecond // 1000, None) + if self._has_been_dequeued: + if dpiMsgProps_getEnqTime(self._handle, ×tamp) < 0: + _raise_from_odpi() + return cydatetime.datetime_new(timestamp.year, timestamp.month, + timestamp.day, timestamp.hour, + timestamp.minute, timestamp.second, + timestamp.fsecond // 1000, None) def get_exception_queue(self): """ diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index 2795a703..c10ee281 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -140,6 +140,7 @@ def test_7806(self): self.__verify_attr(props, "priority", 1) self.assertEqual(props.state, oracledb.MSG_READY) self.assertEqual(props.deliverymode, 0) + self.assertIsNone(props.enqtime) def test_7807(self): "7807 - test enqueue visibility option - ENQ_ON_COMMIT" From 6c866637fd3172d0bf2185e8bb248688ed2154d3 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:31:25 -0600 Subject: [PATCH 097/239] Fixed bug resulting in "TypeError" when using "DeqOptions.correlation" for buffered delivery mode. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/messages/aq_base.pyx | 2 +- tests/test_7800_aq_raw.py | 17 +++++++++++++++++ tests/test_7900_aq_raw_async.py | 17 +++++++++++++++++ 4 files changed, 37 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2f2ce3e8..11814e2e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -33,6 +33,8 @@ Thin Mode Changes #) Pooled connections that are no longer needed are now closed normally if possible instead of simply having the socket disconnected (`issue 393 `__). +#) Fixed bug resulting in ``TypeError`` when using + :attr:`DeqOptions.correlation`` for buffered delivery mode. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/aq_base.pyx b/src/oracledb/impl/thin/messages/aq_base.pyx index 4a432e8e..f1a31cf5 100644 --- a/src/oracledb/impl/thin/messages/aq_base.pyx +++ b/src/oracledb/impl/thin/messages/aq_base.pyx @@ -88,7 +88,7 @@ cdef class AqBaseMessage(Message): elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_ADDRESS: props_impl.sender_agent_address = value elif keyword == TNS_AQ_EXT_KEYWORD_AGENT_PROTOCOL: - props_impl.sender_agent_protocol = value + props_impl.sender_agent_protocol = value[0] elif keyword == TNS_AQ_EXT_KEYWORD_ORIGINAL_MSGID: props_impl.original_msg_id = value diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index c10ee281..30fce278 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -463,6 +463,23 @@ def test_7828(self): setattr(props, name, None) self.assertIsNone(getattr(props, name)) + def test_7829(self): + "7829 - test deq options correlation with buffered messages" + queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + props = self.conn.msgproperties(payload=value, correlation="sample") + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqone(props) + self.conn.commit() + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "sample" + msg = queue.deqone() + self.conn.commit() + self.assertEqual(msg.payload, value) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index fd403a79..4b3fe450 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -402,6 +402,23 @@ async def test_7924(self): with self.assertRaisesFullCode("DPY-2062"): await queue.enqone(props) + async def test_7925(self): + "7925 - test deq options correlation with buffered messages" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + value = self.raw_data[0] + props = self.conn.msgproperties(payload=value, correlation="sample") + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + await queue.enqone(props) + await self.conn.commit() + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "sample" + msg = await queue.deqone() + await self.conn.commit() + self.assertEqual(msg.payload, value) + if __name__ == "__main__": test_env.run_test_cases() From 3fcd58048d9841e011b61de549d7eccfee2302c9 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:32:13 -0600 Subject: [PATCH 098/239] Doc tweaks. --- doc/src/api_manual/async_connection.rst | 4 +- doc/src/api_manual/connection.rst | 4 +- doc/src/api_manual/dataframe.rst | 2 +- doc/src/release_notes.rst | 29 +++--- doc/src/user_guide/bind.rst | 16 ++-- doc/src/user_guide/connection_handling.rst | 76 +++++++++------ doc/src/user_guide/dataframes.rst | 104 ++++++++++++++------- doc/src/user_guide/vector_data_type.rst | 14 +-- 8 files changed, 151 insertions(+), 98 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 96354c25..1bf156dc 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -147,7 +147,7 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. .. versionadded:: 3.0.0 @@ -175,7 +175,7 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 057e7755..2d70fef2 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -140,7 +140,7 @@ Connection Methods .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. .. dbapimethodextension:: @@ -172,7 +172,7 @@ Connection Methods .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. .. dbapimethodextension:: diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index ca45f7d1..8cd01dcf 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -13,7 +13,7 @@ from Oracle Database types to Arrow data types. .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. .. _oracledataframeobj: diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 11814e2e..2326e2d8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -55,6 +55,13 @@ Common Changes support Oracle Database 23ai multi-pool :ref:`drcp`. #) Added Instance Principal authentication support when using :ref:`OCI Cloud Native Authentication `. +#) Improvements to :ref:`data frames `: + + - Fixed date handling to match PyArrow's and avoid localization issues + (`issue 499 `__). + - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 + (`issue 483 `__). + #) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai (`PR 496 `__). #) Fix bug with GitHub build action merge artifacts step @@ -66,14 +73,6 @@ Common Changes parameter "min". Previously python-oracledb Thin mode did not raise an error and python-oracledb Thick mode raised the exception ``ORA-24413: Invalid number of sessions specified``. -#) Improvements to :ref:`data frames `: - - - Fixed date handling to match PyArrow's and avoid localization issues - (`issue 499 `__). - - - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 - (`issue 483 `__). - #) Improved the test suite and documentation. @@ -980,14 +979,12 @@ Common Changes `__). #) Added properties :data:`FetchInfo.domain_schema`, :data:`FetchInfo.domain_name` and :data:`FetchInfo.annotations` for the - `SQL domain `__ - and `annotations `__ - associated with columns that are being fetched. SQL domains and annotations - require Oracle Database 23ai. If using python-oracledb Thick mode, Oracle - Client 23ai is also required. + `SQL domain `__ and `annotations + `__ associated with columns that are being + fetched. SQL domains and annotations require Oracle Database 23ai. If using + python-oracledb Thick mode, Oracle Client 23ai is also required. #) Added parameter ``data`` to :meth:`Connection.createlob()` to allow data to be written at LOB creation time. #) Added type :data:`~oracledb.DB_TYPE_XMLTYPE` to represent data of type diff --git a/doc/src/user_guide/bind.rst b/doc/src/user_guide/bind.rst index 0abc88a5..fe5394d6 100644 --- a/doc/src/user_guide/bind.rst +++ b/doc/src/user_guide/bind.rst @@ -823,16 +823,16 @@ will accept them but there will be no processing benefit. It is not uncommon for SQL statements to have low hundreds of versions. Sometimes this is expected and not a result of any issue. To determine the reason, find the SQL identifier of the statement and then query -the Oracle Database view `V$SQL_SHARED_CURSOR `__. +the Oracle Database view `V$SQL_SHARED_CURSOR `__. The SQL identifier of a statement can be found in Oracle Database views like -`V$SQLAREA `__ after you have run a statement, or you can find it -*before* you execute the statement by using the `DBMS_SQL_TRANSLATOR.SQL_ID() -`__ function. Make sure to pass in exactly the same SQL -text, including the same whitespace: +`V$SQLAREA `__ after you have run a statement, or you +can find it *before* you execute the statement by using the +`DBMS_SQL_TRANSLATOR.SQL_ID() `__ function. Make sure +to pass in exactly the same SQL text, including the same whitespace: .. code-block:: python diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 9cabf5d7..06c37dad 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -32,32 +32,28 @@ Oracle Client and Oracle Database communicate. There are two ways to create a connection to Oracle Database using python-oracledb: -* **Standalone connections**: :ref:`Standalone connections ` - are useful when the application needs a single connection to a database. - Connections are created by calling :meth:`oracledb.connect()`. - -* **Pooled connections**: :ref:`Connection pooling ` is important for - performance when applications frequently connect and disconnect from the database. - Pools support Oracle's :ref:`high availability ` features and are - recommended for applications that must be reliable. Small pools can also be - useful for applications that want a few connections available for infrequent - use. Pools are created with :meth:`oracledb.create_pool()` at application - initialization time, and then :meth:`ConnectionPool.acquire()` can be called to - obtain a connection from a pool. +* **Standalone connections**: :ref:`Standalone connections + ` are useful when the application needs a single + connection to a database. Connections are created by calling + :meth:`oracledb.connect()`. For :ref:`asyncio `, use + :meth:`oracledb.connect_async()` instead, see :ref:`connasync`. + +* **Pooled connections**: :ref:`Connection pooling ` is important + for performance when applications frequently connect and disconnect from the + database. Pools support Oracle's :ref:`high availability ` + features and are recommended for applications that must be reliable. Small + pools can also be useful for applications that want a few connections + available for infrequent use. Pools are created with + :meth:`oracledb.create_pool()` at application initialization time, and then + :meth:`ConnectionPool.acquire()` can be called to obtain a connection from a + pool. For :ref:`asyncio `, use :meth:`oracledb.create_pool_async()` + and :meth:`AsyncConnectionPool.acquire()` instead, see :ref:`asyncconnpool`. Many connection behaviors can be controlled by python-oracledb connection options. Other settings can be configured in :ref:`optnetfiles` or in :ref:`optclientfiles`. These include limiting the amount of time that opening a connection can take, or enabling :ref:`network encryption `. -.. note:: - - Creating a connection in python-oracledb Thin mode always requires a - connection string, or the database host name and service name, to be - specified. The Thin mode cannot use "bequeath" connections and does not - reference Oracle environment variables ``ORACLE_SID``, ``TWO_TASK``, - or ``LOCAL``. - .. _standaloneconnection: Standalone Connections @@ -287,6 +283,14 @@ For more information about naming methods, see the `Database Net Services Administrator's Guide `__. +.. note:: + + Creating a connection in python-oracledb Thin mode always requires a + connection string, or the database host name and service name, to be + specified. The Thin mode cannot use "bequeath" connections and does not + reference Oracle environment variables ``ORACLE_SID``, ``TWO_TASK``, + or ``LOCAL``. + .. _easyconnect: Easy Connect Syntax for Connection Strings @@ -1883,6 +1887,10 @@ creation calls. If you call :meth:`ConnectParams.parse_connect_string()`, the registered protocol hook method will be called but the parameter hook will not be. +.. + Note to doc writers: do not change the following heading because it is used + for a link emitted by ldap_hook() in src/oracledb/builtin_hooks.py + .. _ldapconnections: LDAP Directory Naming @@ -2061,8 +2069,17 @@ Connection Pooling ================== Connection pooling can significantly improve application performance and -scalability, allows resource sharing, and lets applications use advanced Oracle -High Availability features. +scalability by allowing resource sharing. Pools also let applications use +optional advanced Oracle High Availability features. + +Opening a connection to a database can be expensive: the connection string must +be parsed, a network connection must be established, the Oracle Database +network listener needs to be invoked, user authentication must be performed, a +database server process must be created, and session memory must be allocated +(and then the process is destroyed when the connection is closed). Connection +pools remove the overhead of repeatedly opening and closing :ref:`standalone +connections ` by establishing a pool of open connections +that can be reused throughout the life of an application process. The pooling solutions available to python-oracledb applications are: @@ -2092,12 +2109,12 @@ The pooling solutions available to python-oracledb applications are: - `Proxy Resident Connection Pooling (PRCP) `__: This is connection pooling handled by a dedicated - mid-tier connection proxy, `CMAN-TDM `__. + 4F14-AF9B-BCC87C982DA8>`__: This is connection pooling handled by Oracle's + mid-tier connection proxy solution, `CMAN-TDM `__. - This is useful for applications taking advantage of CMAN-TDM. + PRCP is useful for applications taking advantage of CMAN-TDM. - :ref:`implicitconnpool`: This can add pooling benefits to applications that connect when they start, and only close the connection when the application @@ -2242,6 +2259,11 @@ server process to be released, use :meth:`ConnectionPool.drop()`: pool.drop(connection) +Avoid doing this unnecessarily because it shrinks the pool. A future +:meth:`~ConnectionPool.acquire()` call may suffer the overhead of establishing +a new connection to the database, instead of being able to reuse a connection +already available in the pool. + Closing a Connection Pool +++++++++++++++++++++++++ diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 95987f45..64dcbb4e 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -4,18 +4,21 @@ Working with Data Frames ************************ -Python-oracledb can fetch directly to data frames that expose an Apache Arrow -PyCapsule Interface. This can reduce application memory requirements and allow -zero-copy data interchanges between Python data frame libraries. It is an -efficient way to work with data using Python libraries such as `Apache PyArrow +Python-oracledb queries can fetch directly to data frames. This can improve +performance and reduce memory requirements when your application uses Python +data frame libraries such as `Apache PyArrow `__, `Pandas `__, `Polars `__, `NumPy -`__, `PyTorch `__, or to write files -in `Apache Parquet `__ format. +`__, `Dask `__, `PyTorch +`__, or writes files in `Apache Parquet +`__ format. The :ref:`OracleDataFrame +` objects fetched expose an Apache Arrow PyCapsule +Interface which, in some cases, allow zero-copy data interchanges to the data +frame objects of other libraries. .. note:: - The data frame support in python-oracledb 3.1 is a pre-release and may + The data frame support in python-oracledb 3.2 is a pre-release and may change in a future version. **Fetching Data Frames** @@ -44,6 +47,34 @@ With Oracle Database's standard DEPARTMENTS table, this would display:: 4 columns 27 rows +To fetch in batches, use an iterator: + +.. code-block:: python + + sql = "select * from departments where department_id < 80" + # Adjust "size" to tune the query fetch performance + # Here it is small to show iteration + for odf in connection.fetch_df_batches(statement=sql, size=4): + df = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ).to_pandas() + print(df) + +With Oracle Database's standard DEPARTMENTS table, this would display:: + + DEPARTMENT_ID DEPARTMENT_NAME MANAGER_ID LOCATION_ID + 0 10 Administration 200 1700 + 1 20 Marketing 201 1800 + 2 30 Purchasing 114 1700 + 3 40 Human Resources 203 2400 + DEPARTMENT_ID DEPARTMENT_NAME MANAGER_ID LOCATION_ID + 0 50 Shipping 121 1500 + 1 60 IT 103 1400 + 2 70 Public Relations 204 2700 + +Converting to other data frame formats is :ref:`shown later ` in +this chapter. + **Inserting OracleDataFrames into Oracle Database** To insert data currently in :ref:`OracleDataFrame ` format @@ -67,7 +98,7 @@ types used in OracleDataFrame objects. Querying any other data types from Oracle Database will result in an exception. :ref:`Output type handlers ` cannot be used to map data types. -.. list-table-with-summary:: +.. list-table-with-summary:: Mapping from Oracle Database to Arrow data types :header-rows: 1 :class: wy-table-responsive :widths: 1 1 @@ -123,6 +154,8 @@ When converting Oracle Database CLOBs and BLOBs: When converting Oracle Database DATEs and TIMESTAMPs: +- Arrow TIMESTAMPs will not have timezone data. + - For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of "seconds". @@ -143,47 +176,47 @@ When converting Oracle Database DATEs and TIMESTAMPs: * - 1 - 3 - milliseconds * - 4 - 6 - - microconds + - microseconds * - 7 - 9 - nanoseconds -Arrow TIMESTAMPs will not have timezone data. +.. _convertingodf: Converting OracleDataFrame to Other Data Frames ----------------------------------------------- -To do more extensive operations, :ref:`OracleDataFrames ` -can be converted to your chosen library data frame, and then methods of that -library can be used. - -Some examples are shown in the following sections. Other libraries will have -similar methods. +To use data frames in your chosen analysis library, :ref:`OracleDataFrame +objects ` can be converted. Examples for some libraries are +shown in the following sections. Other libraries will have similar methods. **Conversion Overview** -To convert :ref:`OracleDataFrame ` to a `PyArrow Table -`__, use -`pyarrow.Table.from_arrays() -`__ -which leverages the Arrow PyCapsule interface. +The guidelines for converting :ref:`OracleDataFrame objects +` to data frames for other libraries are: + +- To convert to a `PyArrow Table `__, use `pyarrow.Table.from_arrays() + `__ which leverages the Arrow PyCapsule interface. -To convert :ref:`OracleDataFrame ` to a `Pandas DataFrame -`__, -use `pyarrow.Table.to_pandas() -`__. +- To convert to a `Pandas DataFrame `__, use + `pyarrow.Table.to_pandas() `__. -If you want to use a data frame library other than Pandas or PyArrow, use the -library's ``from_arrow()`` method to convert a PyArrow Table to the applicable -data frame, if your library supports this. For example, with `Polars -`__ use `polars.from_arrow() -`__. +- If you want to use a library other than Pandas or PyArrow, use the library's + ``from_arrow()`` method to convert a PyArrow Table to the applicable data + frame, if your library supports this. For example, with `Polars + `__ use `polars.from_arrow() `__. -Lastly, if your data frame library does not support ``from_arrow()``, then use -``from_dataframe()`` if the library supports it. This can be slower, depending -on the implementation. +- If your library does not support ``from_arrow()``, then use + ``from_dataframe()`` if the library supports it. This can be slower, + depending on the implementation. -The general recommendation is to use Apache Arrow as much as possible but if -there are no options, then use ``from_dataframe()``. +Overall, the general recommendation is to use Apache Arrow as much as possible +but if there are no options, then use ``from_dataframe()``. You should test +and benchmark to find the best option for your applications. Creating PyArrow Tables +++++++++++++++++++++++ @@ -352,7 +385,6 @@ For example, to convert to `NumPy `__ ``ndarray`` format: print(numpy.sum(np)) print(numpy.log10(np)) - See `samples/dataframe_numpy.py `__ for a runnable example. diff --git a/doc/src/user_guide/vector_data_type.rst b/doc/src/user_guide/vector_data_type.rst index 90cea8c9..9f2d1397 100644 --- a/doc/src/user_guide/vector_data_type.rst +++ b/doc/src/user_guide/vector_data_type.rst @@ -4,12 +4,14 @@ Using VECTOR Data ***************** -Oracle Database 23ai introduced a new data type `VECTOR `__ for artificial intelligence and machine learning search operations. -The VECTOR data type is a homogeneous array of 8-bit signed integers, 8-bit -unsigned integers, 32-bit floating-point numbers, or 64-bit floating-point -numbers. +Oracle Database 23ai introduced a new data type `VECTOR `__ for artificial intelligence and machine learning search +operations. The VECTOR data type is a homogeneous array of 8-bit signed +integers, 8-bit unsigned integers, 32-bit floating-point numbers, or 64-bit +floating-point numbers. For more information about using vectors in Oracle +Database, see the `Oracle AI Vector Search User's Guide +`__. With the VECTOR data type, you can define the number of dimensions for the data and the storage format for each dimension value in the vector. The From 500187f041ca869465595aed4fdd53d55a007451 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Mon, 9 Jun 2025 19:32:36 -0600 Subject: [PATCH 099/239] Correct fetching of LOBs with asyncio prior to Oracle Database 23ai (#500). --- doc/src/release_notes.rst | 3 + src/oracledb/impl/thin/connection.pyx | 4 +- src/oracledb/impl/thin/messages/base.pyx | 158 ++++++++++++++++------- src/oracledb/impl/thin/packet.pyx | 10 +- src/oracledb/lob.py | 4 +- 5 files changed, 124 insertions(+), 55 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2326e2d8..b82af338 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -26,6 +26,9 @@ Thin Mode Changes closed connection (`issue 482 `__). #) Fixed bug when connecting with asyncio using the parameter ``https_proxy``. +#) Fixed bug when fetching LOBs with asyncio from databases prior to Oracle + Database 23ai + (`issue 500 `__). #) Fixed regression when connecting where only the host specified by the ``https_proxy`` parameter can successfully perform name resolution. #) Fixed bug resulting in explicit request boundaries to aid planned database diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index ad3ce333..9b333a8d 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -596,11 +596,13 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): cursor_impl = message_with_data.cursor_impl if message.resend: await protocol._process_message(message) + await message.postprocess_async() if op_type in ( PIPELINE_OP_TYPE_FETCH_ONE, PIPELINE_OP_TYPE_FETCH_MANY, PIPELINE_OP_TYPE_FETCH_ALL, ): + result_impl.rows = [] while cursor_impl._buffer_rowcount > 0: result_impl.rows.append(cursor_impl._create_row()) result_impl.fetch_metadata = cursor_impl.fetch_metadata @@ -870,7 +872,7 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): Message message for message in messages: result_impl = message.pipeline_result_impl - if result_impl.error is not None: + if result_impl.error is not None or message.resend: continue try: self._populate_pipeline_op_result(message) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 5853b210..f9040fff 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -42,6 +42,32 @@ cdef class _OracleErrorInfo: list batcherrors +@cython.freelist(20) +cdef class _PostProcessFn: + cdef: + object fn + bint convert_nulls + bint check_awaitable + uint32_t num_elements + list values + + @staticmethod + cdef _PostProcessFn from_info(object fn, uint32_t num_elements, + list values, bint convert_nulls=False, + bint check_awaitable=False): + """ + Create a post process function object and return it. + """ + cdef _PostProcessFn fn_obj + fn_obj = _PostProcessFn.__new__(_PostProcessFn) + fn_obj.fn = fn + fn_obj.convert_nulls = convert_nulls + fn_obj.check_awaitable = check_awaitable + fn_obj.num_elements = num_elements + fn_obj.values = values + return fn_obj + + cdef class Message: cdef: BaseThinConnImpl conn_impl @@ -736,6 +762,70 @@ cdef class MessageWithData(Message): self.bit_vector = self.bit_vector_buf.data.as_chars memcpy( self.bit_vector, ptr, num_bytes) + cdef list _get_post_process_fns(self): + """ + Returns a list of functions that need to be run after the database + response has been completely received. These functions can be + internally defined (for wrapping implementation objects with user + facing objects) or user defined (out converters). This prevents + multiple executions of functions (reparsing of database responses for + older databases without the end of response indicator) or interference + with any ongoing database response. Returning a list allows this + process to be determined commonly across sync and async in order to + avoid duplicating code. + """ + cdef: + OracleMetadata metadata + uint32_t num_elements + uint8_t ora_type_num + ThinVarImpl var_impl + _PostProcessFn fn + list fns = [] + bint is_async + object cls + is_async = self.conn_impl._protocol._transport._is_async + if self.out_var_impls is not None: + for var_impl in self.out_var_impls: + if var_impl is None: + continue + + # retain last raw value when not fetching Arrow (for handling + # duplicate rows) + if not self.cursor_impl.fetching_arrow: + var_impl._last_raw_value = \ + var_impl._values[self.cursor_impl._last_row_index] + + # determine the number of elements to process, if needed + if var_impl.is_array: + num_elements = var_impl.num_elements_in_array + else: + num_elements = self.row_index + + # perform post conversion to user-facing objects, if applicable + if self.in_fetch: + metadata = var_impl._fetch_metadata + else: + metadata = var_impl.metadata + ora_type_num = metadata.dbtype._ora_type_num + if ora_type_num in (ORA_TYPE_NUM_CLOB, + ORA_TYPE_NUM_BLOB, + ORA_TYPE_NUM_BFILE): + cls = PY_TYPE_ASYNC_LOB if is_async else PY_TYPE_LOB + fn = _PostProcessFn.from_info(cls._from_impl, num_elements, + var_impl._values) + fns.append(fn) + + # perform post conversion via user out converter, if applicable + if var_impl.outconverter is None: + continue + fn = _PostProcessFn.from_info(var_impl.outconverter, + num_elements, var_impl._values, + var_impl.convert_nulls, + check_awaitable=True) + fns.append(fn) + + return fns + cdef bint _is_duplicate_data(self, uint32_t column_num): """ Returns a boolean indicating if the given column contains data @@ -1366,32 +1456,21 @@ cdef class MessageWithData(Message): database round-trip. """ cdef: - uint32_t i, j, num_elements object value, element_value - ThinVarImpl var_impl - if self.out_var_impls is None: - return 0 - for var_impl in self.out_var_impls: - if var_impl is None or var_impl.outconverter is None: - continue - if not self.cursor_impl.fetching_arrow: - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] - if var_impl.is_array: - num_elements = var_impl.num_elements_in_array - else: - num_elements = self.row_index - for i in range(num_elements): - value = var_impl._values[i] - if value is None and not var_impl.convert_nulls: + _PostProcessFn fn + uint32_t i, j + for fn in self._get_post_process_fns(): + for i in range(fn.num_elements): + value = fn.values[i] + if value is None and not fn.convert_nulls: continue if isinstance(value, list): for j, element_value in enumerate(value): - if element_value is None: + if element_value is None and not fn.convert_nulls: continue - value[j] = var_impl.outconverter(element_value) + value[j] = fn.fn(element_value) else: - var_impl._values[i] = var_impl.outconverter(value) + fn.values[i] = fn.fn(value) async def postprocess_async(self): """ @@ -1401,39 +1480,28 @@ cdef class MessageWithData(Message): database round-trip. """ cdef: - object value, element_value, fn - uint32_t i, j, num_elements - ThinVarImpl var_impl - if self.out_var_impls is None: - return 0 - for var_impl in self.out_var_impls: - if var_impl is None or var_impl.outconverter is None: - continue - if not self.cursor_impl.fetching_arrow: - var_impl._last_raw_value = \ - var_impl._values[self.cursor_impl._last_row_index] - if var_impl.is_array: - num_elements = var_impl.num_elements_in_array - else: - num_elements = self.row_index - fn = var_impl.outconverter - for i in range(num_elements): - value = var_impl._values[i] - if value is None and not var_impl.convert_nulls: + object value, element_value + _PostProcessFn fn + uint32_t i, j + for fn in self._get_post_process_fns(): + for i in range(fn.num_elements): + value = fn.values[i] + if value is None and not fn.convert_nulls: continue if isinstance(value, list): for j, element_value in enumerate(value): - if element_value is None: + if element_value is None and not fn.convert_nulls: continue - element_value = fn(element_value) - if inspect.isawaitable(element_value): + element_value = fn.fn(element_value) + if fn.check_awaitable \ + and inspect.isawaitable(element_value): element_value = await element_value value[j] = element_value else: - value = fn(value) - if inspect.isawaitable(value): + value = fn.fn(value) + if fn.check_awaitable and inspect.isawaitable(value): value = await value - var_impl._values[i] = value + fn.values[i] = value cdef int preprocess(self) except -1: cdef: diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index 03ff915c..77fff14e 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -487,7 +487,6 @@ cdef class ReadBuffer(Buffer): BaseThinLobImpl lob_impl uint64_t size bytes locator - type cls self.read_ub4(&num_bytes) if num_bytes > 0: if dbtype._ora_type_num == ORA_TYPE_NUM_BFILE: @@ -497,18 +496,13 @@ cdef class ReadBuffer(Buffer): self.read_ub4(&chunk_size) locator = self.read_bytes() if lob is None: - lob_impl = conn_impl._create_lob_impl(dbtype, locator) - cls = PY_TYPE_ASYNC_LOB \ - if conn_impl._protocol._transport._is_async \ - else PY_TYPE_LOB - lob = cls._from_impl(lob_impl) + lob = lob_impl = conn_impl._create_lob_impl(dbtype, locator) else: lob_impl = lob._impl lob_impl._locator = locator lob_impl._size = size lob_impl._chunk_size = chunk_size - lob_impl._has_metadata = \ - dbtype._ora_type_num != ORA_TYPE_NUM_BFILE + lob_impl._has_metadata = dbtype._ora_type_num != ORA_TYPE_NUM_BFILE return lob cdef const char_type* read_raw_bytes(self, ssize_t num_bytes) except NULL: diff --git a/src/oracledb/lob.py b/src/oracledb/lob.py index 29bf6c16..01dbd5a1 100644 --- a/src/oracledb/lob.py +++ b/src/oracledb/lob.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -70,6 +70,8 @@ def _check_value_to_write(self, value): @classmethod def _from_impl(cls, impl): + if isinstance(impl, BaseLOB): + return impl lob = cls.__new__(cls) lob._impl = impl return lob From 242946b38f9cd3be751b8fb5448c6f47364bdeee Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 11 Jun 2025 12:13:56 -0600 Subject: [PATCH 100/239] Simplify code. --- src/oracledb/interchange/nanoarrow_bridge.pyx | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 34fc6c0d..e7f80024 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -88,12 +88,8 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowArrayReserve(ArrowArray* array, int64_t additional_size_elements) ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) - ArrowErrorCode ArrowArrayViewInitFromSchema(ArrowArrayView* array_view, - const ArrowSchema* schema, - ArrowError* error) - ArrowErrorCode ArrowArrayViewSetArray(ArrowArrayView* array_view, - const ArrowArray* array, - ArrowError* error) + ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, + ArrowArray* array) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, void *private_data) @@ -420,9 +416,7 @@ cdef class OracleArrowArray: int64_t n_buffers = self.arrow_array.n_buffers ArrowBufferView *buffer ArrowArrayView view - _check_nanoarrow(ArrowArrayViewInitFromSchema(&view, self.arrow_schema, - NULL)) - _check_nanoarrow(ArrowArrayViewSetArray(&view, self.arrow_array, NULL)) + _check_nanoarrow(ArrowArrayViewInitFromArray(&view, self.arrow_array)) # initialize all buffers to None to begin with buffers = { From 9ff772cd233117dd35b459202237661f363c8270 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 11 Jun 2025 12:14:21 -0600 Subject: [PATCH 101/239] Doc tweaks. --- doc/src/release_notes.rst | 18 +++++++++--------- doc/src/user_guide/connection_handling.rst | 3 ++- tests/README.md | 14 +++++++++----- 3 files changed, 20 insertions(+), 15 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b82af338..3dd85217 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -22,6 +22,9 @@ Thin Mode Changes #) Emulate support for :meth:`Queue.deqmany()` with JSON payloads when using Oracle Database 21c by internally calling :meth:`Queue.deqone()` as many times as needed. +#) Pooled connections that are no longer needed are now closed normally if + possible instead of simply having the socket disconnected + (`issue 393 `__). #) Fixed bug when a connection pool internally makes an attempt to ping a closed connection (`issue 482 `__). @@ -33,11 +36,8 @@ Thin Mode Changes ``https_proxy`` parameter can successfully perform name resolution. #) Fixed bug resulting in explicit request boundaries to aid planned database maintenance not being sent when using connection pools with asyncio. -#) Pooled connections that are no longer needed are now closed normally if - possible instead of simply having the socket disconnected - (`issue 393 `__). #) Fixed bug resulting in ``TypeError`` when using - :attr:`DeqOptions.correlation`` for buffered delivery mode. + :attr:`DeqOptions.correlation` for buffered delivery mode. Thick Mode Changes ++++++++++++++++++ @@ -54,8 +54,6 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Added parameter ``pool_name`` to connection and pool creation methods to - support Oracle Database 23ai multi-pool :ref:`drcp`. #) Added Instance Principal authentication support when using :ref:`OCI Cloud Native Authentication `. #) Improvements to :ref:`data frames `: @@ -65,6 +63,8 @@ Common Changes - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 (`issue 483 `__). +#) Added parameter ``pool_name`` to connection and pool creation methods to + support Oracle Database 23ai multi-pool :ref:`drcp`. #) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai (`PR 496 `__). #) Fix bug with GitHub build action merge artifacts step @@ -72,9 +72,9 @@ Common Changes #) Error ``DPY-2064: parameter 'max' should be greater than or equal to parameter 'min'`` is now raised when a call to :meth:`oracledb.create_pool()`, :meth:`oracledb.create_pool_async()` - or :meth:`oracledb.PoolParams()` is made with parameter "max" less than the - parameter "min". Previously python-oracledb Thin mode did not raise an - error and python-oracledb Thick mode raised the exception + or :meth:`oracledb.PoolParams()` is made with parameter ``max`` less than + the parameter ``min``. Previously python-oracledb Thin mode did not raise + an error and python-oracledb Thick mode raised the exception ``ORA-24413: Invalid number of sessions specified``. #) Improved the test suite and documentation. diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 06c37dad..4d2bc68a 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -3084,7 +3084,8 @@ specifying a class name, you can call: cclass="MYAPP") If ``cclass`` is not set, then the pooled server sessions will not be reused -optimally, and the DRCP statistic views may record large values for NUM_MISSES. +optimally, and the :ref:`DRCP statistic views ` may record +large values for NUM_MISSES. **DRCP Connection Purity** diff --git a/tests/README.md b/tests/README.md index 90ec5b99..be963101 100644 --- a/tests/README.md +++ b/tests/README.md @@ -9,10 +9,14 @@ This directory contains the test suite for python-oracledb. python create_schema.py -2. Run the test suite by issuing the following command in the top-level +2. Install tox: + + python -m pip install tox + +3. Run the test suite by issuing the following command in the top-level directory of your oracledb installation: - tox + python -m tox This will build the module in an independent environment and run the test suite using the module that was just built in that environment. @@ -25,7 +29,7 @@ This directory contains the test suite for python-oracledb. python test_1000_module.py -3. After running the test suite, the schemas can be dropped by running the +4. After running the test suite, the schemas can be dropped by running the Python script [drop_schema.py][3]. The script requires administrative privileges and will prompt for these credentials as well as the names of the schemas that will be dropped, unless a number of environment variables @@ -34,12 +38,12 @@ This directory contains the test suite for python-oracledb. python drop_schema.py -4. Enable tests that require extra configuration +5. Enable tests that require extra configuration The following test(s) are automatically skipped if their required environment variable(s) and setup is not available. - 4.1 test_5000_externalauth.py + 5.1 test_5000_externalauth.py This test aims to test the usage of external authentication. From 0b754430ad7247dcb253f062a1e6011e751432ed Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 11 Jun 2025 12:14:38 -0600 Subject: [PATCH 102/239] Fixed bug when fetching multiple consecutive null values into a data frame. --- doc/src/release_notes.rst | 1 + src/oracledb/interchange/nanoarrow_bridge.pyx | 11 ++++++ tests/test_8000_dataframe.py | 39 +++++++++++++++++++ tests/test_8100_dataframe_async.py | 39 +++++++++++++++++++ 4 files changed, 90 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 3dd85217..213da136 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -38,6 +38,7 @@ Thin Mode Changes maintenance not being sent when using connection pools with asyncio. #) Fixed bug resulting in ``TypeError`` when using :attr:`DeqOptions.correlation` for buffered delivery mode. +#) Fixed bug when fetching multiple consecutive null values into a data frame. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index e7f80024..c09b3074 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -55,6 +55,9 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowBufferViewData data int64_t size_bytes + cdef struct ArrowBitmap: + ArrowBuffer buffer + cdef struct ArrowArrayView: ArrowBufferView *buffer_views @@ -88,6 +91,7 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowArrayReserve(ArrowArray* array, int64_t additional_size_elements) ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) + ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* array) ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, ArrowArray* array) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) @@ -335,8 +339,15 @@ cdef class OracleArrowArray: int64_t index uint8_t *ptr void* temp + ArrowBitmap *bitamp if array is None: array = self + bitmap = ArrowArrayValidityBitmap(array.arrow_array) + if bitmap != NULL and bitmap.buffer.data != NULL: + as_bool = ArrowBitGet(bitmap.buffer.data, index) + if not as_bool: + self.append_null() + return 0 index = array.arrow_array.length - 1 if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): data_buffer = ArrowArrayBuffer(array.arrow_array, 1) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index ccf35ec2..f5aa96dc 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -633,6 +633,45 @@ def test_8026(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + def test_8027(self): + "8027 - fetch data with multiple rows containing null values" + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + """ + ) + data = [ + (None,), + (None,), + (None,), + (datetime.datetime(2025, 6, 11),), + (None,), + (None,), + (None,), + (None,), + ] + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 007e24d6..04acbdb0 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -586,6 +586,45 @@ async def test_8122(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + async def test_8123(self): + "8123 - fetch data with multiple rows containing null values" + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + """ + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + """ + ) + data = [ + (None,), + (None,), + (None,), + (datetime.datetime(2025, 6, 11),), + (None,), + (None,), + (None,), + (None,), + ] + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + if __name__ == "__main__": test_env.run_test_cases() From df5c280bdb3c51d9cf871146eb9a5bfa9cb6ccc8 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 11 Jun 2025 12:15:16 -0600 Subject: [PATCH 103/239] Fixed bug populating attribute MessageProperties.deliverymode after dequeue, which is set using attribute DeqOptions.deliverymode. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/messages/aq_array.pyx | 3 +++ src/oracledb/impl/thin/queue.pyx | 1 + tests/test_7800_aq_raw.py | 2 ++ tests/test_7900_aq_raw_async.py | 2 ++ 5 files changed, 10 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 213da136..31003a1c 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -36,6 +36,8 @@ Thin Mode Changes ``https_proxy`` parameter can successfully perform name resolution. #) Fixed bug resulting in explicit request boundaries to aid planned database maintenance not being sent when using connection pools with asyncio. +#) Fixed bug populating :attr:`MessageProperties.deliverymode` after dequeue, + which is set using :attr:`DeqOptions.deliverymode`. #) Fixed bug resulting in ``TypeError`` when using :attr:`DeqOptions.correlation` for buffered delivery mode. #) Fixed bug when fetching multiple consecutive null values into a data frame. diff --git a/src/oracledb/impl/thin/messages/aq_array.pyx b/src/oracledb/impl/thin/messages/aq_array.pyx index ebad7f69..8158b3f0 100644 --- a/src/oracledb/impl/thin/messages/aq_array.pyx +++ b/src/oracledb/impl/thin/messages/aq_array.pyx @@ -69,6 +69,9 @@ cdef class AqArrayMessage(AqBaseMessage): props_impl.msgid = msgid[j * 16:(j + 1) * 16] else: props_impl.msgid = msgid + props_impl.delivery_mode = ( + self.deq_options_impl.delivery_mode + ) buf.read_ub2(&temp16) # extensions len if temp16 > 0: errors._raise_err(errors.ERR_NOT_IMPLEMENTED) diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 0a24b4fb..4fcb6747 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -74,6 +74,7 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): message = self._conn_impl._create_message(AqDeqMessage) message.queue_impl = self message.deq_options_impl = self.deq_options_impl + props_impl.delivery_mode = message.deq_options_impl.delivery_mode message.props_impl = props_impl return message diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index 30fce278..b1343904 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -199,6 +199,7 @@ def test_7809(self): results = value other_conn.commit() self.assertEqual(results, self.raw_data[0]) + self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) def test_7810(self): "7810 - test enqueue/dequeue delivery modes identical - persistent" @@ -220,6 +221,7 @@ def test_7810(self): results = value other_conn.commit() self.assertEqual(results, self.raw_data[0]) + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) def test_7811(self): "7811 - test enqueue/dequeue delivery modes the same" diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index 4b3fe450..af7808bb 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -187,6 +187,7 @@ async def test_7909(self): results = value await other_conn.commit() self.assertEqual(results, self.raw_data[0]) + self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) async def test_7910(self): "7910 - test enqueue/dequeue delivery modes identical - persistent" @@ -208,6 +209,7 @@ async def test_7910(self): results = value await other_conn.commit() self.assertEqual(results, self.raw_data[0]) + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) async def test_7911(self): "7911 - test enqueue/dequeue delivery modes the same" From 36300f72e7ecdff1e276dc9682841306542e87c1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 11 Jun 2025 12:16:00 -0600 Subject: [PATCH 104/239] Allow building of specified packages. --- .github/workflows/build.yaml | 51 ++++++++++++++++++++++++++--- doc/src/release_notes.rst | 12 ++++--- doc/src/user_guide/installation.rst | 15 +++++---- 3 files changed, 63 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index a0db8524..c63161e0 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -1,15 +1,21 @@ -name: build +name: Build python-oracledb packages on: push: tags: - v* workflow_dispatch: + inputs: + tgt: + description: List of package targets to build + default: 'Linux, macOS, Windows, Source' + required: false jobs: build_source_package: name: Build source package + if: contains(inputs.tgt, 'Source') || inputs.tgt == '' runs-on: ubuntu-latest steps: @@ -36,6 +42,7 @@ jobs: build_linux_wheels: name: Build wheels for Linux + if: contains(inputs.tgt, 'Linux') || inputs.tgt == '' runs-on: ${{ matrix.os }} strategy: matrix: @@ -82,12 +89,13 @@ jobs: name: Linux_${{ matrix.platform }}_wheels path: dist/*.whl - build_non_linux_wheels: + build_macos_wheels: name: Build wheels for ${{ matrix.os }} Python-${{ matrix.python-version}}-${{ matrix.arch }} + if: contains(inputs.tgt, 'macOS') || inputs.tgt == '' runs-on: ${{ matrix.os }} strategy: matrix: - os: [windows-latest, macos-latest] + os: [macos-latest] python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] arch: ['x86', ''] exclude: @@ -118,9 +126,44 @@ jobs: name: python-oracledb-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.arch }} path: dist/*.whl + build_windows_wheels: + name: Build wheels for ${{ matrix.os }} Python-${{ matrix.python-version}}-${{ matrix.arch }} + if: contains(inputs.tgt, 'Windows') || inputs.tgt == '' + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [windows-latest] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + arch: ['x86', ''] + + steps: + - uses: actions/checkout@v4 + with: + submodules: true + + - uses: actions/setup-python@v5 + with: + cache: 'pip' + python-version: ${{ matrix.python-version }} + architecture: ${{ matrix.arch }} + allow-prereleases: true + + - name: Ensure build package is present + run: python -m pip install build + + - name: Build wheel for Python ${{ matrix.python-version }} + run: python -m build + + - name: Upload the artifact + uses: actions/upload-artifact@v4 + with: + name: python-oracledb-${{ matrix.os }}-${{ matrix.python-version }}-${{ matrix.arch }} + path: dist/*.whl + combine_artifacts: name: Combine artifacts into single artifact - needs: [build_source_package, build_linux_wheels, build_non_linux_wheels] + if: ${{ always() }} + needs: [build_source_package, build_linux_wheels, build_macos_wheels, build_windows_wheels] runs-on: ubuntu-latest steps: diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 31003a1c..dcc4c593 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -68,10 +68,14 @@ Common Changes #) Added parameter ``pool_name`` to connection and pool creation methods to support Oracle Database 23ai multi-pool :ref:`drcp`. -#) Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai - (`PR 496 `__). -#) Fix bug with GitHub build action merge artifacts step - (`issue 495 `__). +#) :ref:`GitHub Action ` workflow updates: + + - Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai + (`PR 496 `__). + - Allow the GitHub build action to build a user-chosen subset of packages. + - Fix bug with GitHub build action merge artifacts step + (`issue 495 `__). + #) Error ``DPY-2064: parameter 'max' should be greater than or equal to parameter 'min'`` is now raised when a call to :meth:`oracledb.create_pool()`, :meth:`oracledb.create_pool_async()` diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index a053270f..9325d1e7 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -1010,19 +1010,20 @@ Python versions. `ODPI-C repository `__, keeping the default name. -2. Optionally edit ``.github/workflows/build.yaml`` and remove platforms and - versions that you are not interested in. Building all packages can take some - time. - -3. In your python-oracledb fork, go to the Actions tab +2. In your python-oracledb fork, go to the Actions tab ``https://github.com//python-oracledb/actions/``. If this is your first time using Actions, confirm enabling them. -4. In the "All workflows" list on the left-hand side, select the "build" entry. +3. In the "All workflows" list on the left-hand side, select the "Build + python-oracledb packages" entry. -5. Navigate to the "Run workflow" drop-down, select the branch to build from +4. Navigate to the "Run workflow" drop-down, select the branch to build from (for example, "main"), and run the workflow. +5. Optionally edit the input field list of package targets and remove targets + you do not want to build. For example, remove "Linux" if you do not want + Linux packages. + 6. When the build has completed, download the "python-oracledb-wheels" artifact, unzip it, and install the one for your architecture and Python version. For example, when using Python 3.12 on macOS, install:: From bfdaddc1a04baaee2122e790da9d0811fa1f4650 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 20:07:06 -0600 Subject: [PATCH 105/239] Tweaked relesae notes again. --- doc/src/release_notes.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index dcc4c593..f983b510 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -40,7 +40,8 @@ Thin Mode Changes which is set using :attr:`DeqOptions.deliverymode`. #) Fixed bug resulting in ``TypeError`` when using :attr:`DeqOptions.correlation` for buffered delivery mode. -#) Fixed bug when fetching multiple consecutive null values into a data frame. +#) Fixed bug when fetching multiple consecutive null values into a :ref:`data + frame `. Thick Mode Changes ++++++++++++++++++ @@ -52,7 +53,8 @@ Thick Mode Changes :attr:`EnqOptions.transformation`, :attr:`MessageProperties.correlation`, or :attr:`MessageProperties.exceptionq` are set to ``None``. #) Fixed a bug resulting in a ``ValueError`` exception when getting attribute - :attr:`MessageProperties.enqtime` if the value is not available or None. + :attr:`MessageProperties.enqtime` if the value is not available or + ``None``. Common Changes ++++++++++++++ From 5d2b053c7651c1c02bc6e08c04bf0a3e32e8c666 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 20:07:24 -0600 Subject: [PATCH 106/239] Fixed a memory leak when enqueuing to a JSON queue with AQ. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thick/queue.pyx | 13 ++++++++----- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index f983b510..89323bdd 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -55,6 +55,8 @@ Thick Mode Changes #) Fixed a bug resulting in a ``ValueError`` exception when getting attribute :attr:`MessageProperties.enqtime` if the value is not available or ``None``. +#) Fixed a memory leak when enqueuing to JSON queues with + :ref:`Oracle Advanced Queuing `. Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/queue.pyx b/src/oracledb/impl/thick/queue.pyx index 8fd8504b..77694a0d 100644 --- a/src/oracledb/impl/thick/queue.pyx +++ b/src/oracledb/impl/thick/queue.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2020, 2022, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -600,10 +600,13 @@ cdef class ThickMsgPropsImpl(BaseMsgPropsImpl): json_buf.from_object(json_val) if dpiConn_newJson(self._conn_impl._handle, &json) < 0: _raise_from_odpi() - if dpiJson_setValue(json, &json_buf._top_node) < 0: - _raise_from_odpi() - if dpiMsgProps_setPayloadJson(self._handle, json) < 0: - _raise_from_odpi() + try: + if dpiJson_setValue(json, &json_buf._top_node) < 0: + _raise_from_odpi() + if dpiMsgProps_setPayloadJson(self._handle, json) < 0: + _raise_from_odpi() + finally: + dpiJson_release(json) def set_priority(self, int32_t value): """ From 3c38e1ccde983551022046c1d84b802fc5fb940a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 20:07:43 -0600 Subject: [PATCH 107/239] Added more tests. --- tests/test_8000_dataframe.py | 65 ++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index f5aa96dc..64f0a33d 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -672,6 +672,71 @@ def test_8027(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + def test_8028(self): + "8028 - verify dtype for all Arrow types" + query = """ + select + cast(1 as number(10)) as col_int64, + cast(1.23 as binary_double) as col_double, + cast(7.14 as binary_float) as col_float, + cast('abcd' as varchar2(10)) as col_string, + cast(systimestamp as timestamp(0)) as col_ts_sec, + cast(systimestamp as timestamp(3)) as col_ts_ms, + cast(systimestamp as timestamp(6)) as col_ts_us, + cast(systimestamp as timestamp(9)) as col_ts_ns, + to_clob('abc') as col_large_string, + utl_raw.cast_to_raw('abc2') as col_binary, + to_blob(utl_raw.cast_to_raw('abc3')) as col_large_binary + from dual + """ + decimal_query = ( + "select cast(123.45 as decimal(10, 2)) as col_decimal128" + ) + + # determine dtype kind enumeration + ora_df = self.conn.fetch_df_all("select user from dual") + col = ora_df.get_column(0) + dtype_kind = type(col.dtype[0]) + + expected_dtypes = { + "COL_INT64": (dtype_kind.INT, 64, "l", "="), + "COL_DOUBLE": (dtype_kind.FLOAT, 64, "g", "="), + "COL_FLOAT": (dtype_kind.FLOAT, 64, "g", "="), + "COL_STRING": (dtype_kind.STRING, 8, "u", "="), + "COL_TS_SEC": (dtype_kind.DATETIME, 64, "tss:", "="), + "COL_TS_MS": (dtype_kind.DATETIME, 64, "tsm:", "="), + "COL_TS_US": (dtype_kind.DATETIME, 64, "tsu:", "="), + "COL_TS_NS": (dtype_kind.DATETIME, 64, "tsn:", "="), + "COL_LARGE_STRING": (dtype_kind.STRING, 8, "U", "="), + "COL_BINARY": (dtype_kind.STRING, 8, "z", "="), + "COL_LARGE_BINARY": (dtype_kind.STRING, 8, "Z", "="), + "COL_DECIMAL128": (dtype_kind.DECIMAL, 128, "d:10.2", "="), + } + + # check query without fetch_decimals enabled + ora_df = self.conn.fetch_df_all(query) + for i, name in enumerate(ora_df.column_names()): + col = ora_df.get_column(i) + self.assertEqual(col.dtype, expected_dtypes[name]) + + # check query with fetch_decimals enabled + with test_env.DefaultsContextManager("fetch_decimals", True): + ora_df = self.conn.fetch_df_all(decimal_query) + col = ora_df.get_column(0) + self.assertEqual(col.dtype, expected_dtypes["COL_DECIMAL128"]) + + def test_8029(self): + "8029 - verify get_buffers() with data frames containing null values" + self.__populate_table(DATASET_2) + statement = "select * from TestDataFrame order by Id" + ora_df = self.conn.fetch_df_all(statement) + country_col = ora_df.get_column_by_name("COUNTRY") + buffers = country_col.get_buffers() + self.assertEqual(len(buffers), 3) + self.assertIsNotNone(buffers["data"]) + self.assertIsNotNone(buffers["offsets"]) + self.assertIsNotNone(buffers["validity"]) + if __name__ == "__main__": test_env.run_test_cases() From aab7f11fbe0cc70df74d99909aee0c3aa9ae1977 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 20:08:05 -0600 Subject: [PATCH 108/239] Tweak to bug fix for fetching multiple consecutive nulls in a data frame. --- src/oracledb/interchange/nanoarrow_bridge.pyx | 2 +- tests/test_8000_dataframe.py | 3 +++ tests/test_8100_dataframe_async.py | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index c09b3074..9e756819 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -342,13 +342,13 @@ cdef class OracleArrowArray: ArrowBitmap *bitamp if array is None: array = self + index = array.arrow_array.length - 1 bitmap = ArrowArrayValidityBitmap(array.arrow_array) if bitmap != NULL and bitmap.buffer.data != NULL: as_bool = ArrowBitGet(bitmap.buffer.data, index) if not as_bool: self.append_null() return 0 - index = array.arrow_array.length - 1 if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_int64 = data_buffer.data diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 64f0a33d..d384cab2 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -638,6 +638,8 @@ def test_8027(self): self.__check_interop() ora_df = self.conn.fetch_df_all( """ + select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual + union all select to_date(null) as data from dual union all select to_date(null) as data from dual @@ -656,6 +658,7 @@ def test_8027(self): """ ) data = [ + (datetime.datetime(2025, 6, 12),), (None,), (None,), (None,), diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 04acbdb0..d38d516a 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -591,6 +591,8 @@ async def test_8123(self): self.__check_interop() ora_df = await self.conn.fetch_df_all( """ + select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual + union all select to_date(null) as data from dual union all select to_date(null) as data from dual @@ -609,6 +611,7 @@ async def test_8123(self): """ ) data = [ + (datetime.datetime(2025, 6, 12),), (None,), (None,), (None,), From 3cc4270a4d91a6e1d40f75d75e6663d1f7ff2fd0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 20:21:59 -0600 Subject: [PATCH 109/239] Force use of Python 3.13.5 on Windows to avoid bug with Python 3.13.4. --- .github/workflows/build.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index c63161e0..4052c596 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -133,7 +133,7 @@ jobs: strategy: matrix: os: [windows-latest] - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13.5'] arch: ['x86', ''] steps: From db97c39bf205f6973e8558d2d2449f9790a2b501 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 12 Jun 2025 21:16:20 -0600 Subject: [PATCH 110/239] Added support for the NCHAR, NVARCHAR and NCLOB data types in data frames (#505). --- doc/src/release_notes.rst | 2 + src/oracledb/impl/base/metadata.pyx | 6 ++- src/oracledb/impl/thick/var.pyx | 3 ++ tests/test_8000_dataframe.py | 8 ++++ tests/test_8100_dataframe_async.py | 61 +++++++++++++++++++++++++++++ 5 files changed, 78 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 89323bdd..ee1d5199 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -69,6 +69,8 @@ Common Changes (`issue 499 `__). - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 (`issue 483 `__). + - Added support for the NCHAR, NVARCHAR and NCLOB data types + (`issue 505 `__). #) Added parameter ``pool_name`` to connection and pool creation methods to support Oracle Database 23ai multi-pool :ref:`drcp`. diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index a253c91a..076e478b 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -71,7 +71,8 @@ cdef class OracleMetadata: self._arrow_type = NANOARROW_TYPE_INT64 else: self._arrow_type = NANOARROW_TYPE_DOUBLE - elif db_type_num in (DB_TYPE_NUM_CHAR, DB_TYPE_NUM_VARCHAR): + elif db_type_num in (DB_TYPE_NUM_CHAR, DB_TYPE_NUM_VARCHAR, + DB_TYPE_NUM_NCHAR, DB_TYPE_NUM_NVARCHAR): self._arrow_type = NANOARROW_TYPE_STRING elif db_type_num == DB_TYPE_NUM_BINARY_FLOAT: self._arrow_type = NANOARROW_TYPE_FLOAT @@ -86,7 +87,8 @@ cdef class OracleMetadata: self._arrow_type = NANOARROW_TYPE_TIMESTAMP elif db_type_num == DB_TYPE_NUM_LONG_RAW: self._arrow_type = NANOARROW_TYPE_LARGE_BINARY - elif db_type_num == DB_TYPE_NUM_LONG_VARCHAR: + elif db_type_num in (DB_TYPE_NUM_LONG_VARCHAR, + DB_TYPE_NUM_LONG_NVARCHAR): self._arrow_type = NANOARROW_TYPE_LARGE_STRING elif db_type_num == DB_TYPE_NUM_RAW: self._arrow_type = NANOARROW_TYPE_BINARY diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 407fe2f9..2d2d0183 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -369,8 +369,11 @@ cdef class ThickVarImpl(BaseVarImpl): ora_data.buffer.as_bool = data.value.asBoolean elif ora_type_num in ( DPI_ORACLE_TYPE_CHAR, + DPI_ORACLE_TYPE_LONG_NVARCHAR, DPI_ORACLE_TYPE_LONG_VARCHAR, DPI_ORACLE_TYPE_LONG_RAW, + DPI_ORACLE_TYPE_NCHAR, + DPI_ORACLE_TYPE_NVARCHAR, DPI_ORACLE_TYPE_RAW, DPI_ORACLE_TYPE_VARCHAR, ): diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index d384cab2..d1f10497 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -683,11 +683,15 @@ def test_8028(self): cast(1.23 as binary_double) as col_double, cast(7.14 as binary_float) as col_float, cast('abcd' as varchar2(10)) as col_string, + cast('efgh' as nvarchar2(6)) as col_nstring, + cast('ijkl' as char(4)) as col_char, + cast('mnop' as nchar(4)) as col_nchar, cast(systimestamp as timestamp(0)) as col_ts_sec, cast(systimestamp as timestamp(3)) as col_ts_ms, cast(systimestamp as timestamp(6)) as col_ts_us, cast(systimestamp as timestamp(9)) as col_ts_ns, to_clob('abc') as col_large_string, + to_nclob('def') as col_large_nstring, utl_raw.cast_to_raw('abc2') as col_binary, to_blob(utl_raw.cast_to_raw('abc3')) as col_large_binary from dual @@ -706,11 +710,15 @@ def test_8028(self): "COL_DOUBLE": (dtype_kind.FLOAT, 64, "g", "="), "COL_FLOAT": (dtype_kind.FLOAT, 64, "g", "="), "COL_STRING": (dtype_kind.STRING, 8, "u", "="), + "COL_NSTRING": (dtype_kind.STRING, 8, "u", "="), + "COL_CHAR": (dtype_kind.STRING, 8, "u", "="), + "COL_NCHAR": (dtype_kind.STRING, 8, "u", "="), "COL_TS_SEC": (dtype_kind.DATETIME, 64, "tss:", "="), "COL_TS_MS": (dtype_kind.DATETIME, 64, "tsm:", "="), "COL_TS_US": (dtype_kind.DATETIME, 64, "tsu:", "="), "COL_TS_NS": (dtype_kind.DATETIME, 64, "tsn:", "="), "COL_LARGE_STRING": (dtype_kind.STRING, 8, "U", "="), + "COL_LARGE_NSTRING": (dtype_kind.STRING, 8, "U", "="), "COL_BINARY": (dtype_kind.STRING, 8, "z", "="), "COL_LARGE_BINARY": (dtype_kind.STRING, 8, "Z", "="), "COL_DECIMAL128": (dtype_kind.DECIMAL, 128, "d:10.2", "="), diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index d38d516a..a5959bd8 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -628,6 +628,67 @@ async def test_8123(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + async def test_8124(self): + "8124 - verify dtype for all Arrow types" + query = """ + select + cast(1 as number(10)) as col_int64, + cast(1.23 as binary_double) as col_double, + cast(7.14 as binary_float) as col_float, + cast('abcd' as varchar2(10)) as col_string, + cast('efgh' as nvarchar2(6)) as col_nstring, + cast('ijkl' as char(4)) as col_char, + cast('mnop' as nchar(4)) as col_nchar, + cast(systimestamp as timestamp(0)) as col_ts_sec, + cast(systimestamp as timestamp(3)) as col_ts_ms, + cast(systimestamp as timestamp(6)) as col_ts_us, + cast(systimestamp as timestamp(9)) as col_ts_ns, + to_clob('abc') as col_large_string, + to_nclob('def') as col_large_nstring, + utl_raw.cast_to_raw('abc2') as col_binary, + to_blob(utl_raw.cast_to_raw('abc3')) as col_large_binary + from dual + """ + decimal_query = ( + "select cast(123.45 as decimal(10, 2)) as col_decimal128" + ) + + # determine dtype kind enumeration + ora_df = await self.conn.fetch_df_all("select user from dual") + col = ora_df.get_column(0) + dtype_kind = type(col.dtype[0]) + + expected_dtypes = { + "COL_INT64": (dtype_kind.INT, 64, "l", "="), + "COL_DOUBLE": (dtype_kind.FLOAT, 64, "g", "="), + "COL_FLOAT": (dtype_kind.FLOAT, 64, "g", "="), + "COL_STRING": (dtype_kind.STRING, 8, "u", "="), + "COL_NSTRING": (dtype_kind.STRING, 8, "u", "="), + "COL_CHAR": (dtype_kind.STRING, 8, "u", "="), + "COL_NCHAR": (dtype_kind.STRING, 8, "u", "="), + "COL_TS_SEC": (dtype_kind.DATETIME, 64, "tss:", "="), + "COL_TS_MS": (dtype_kind.DATETIME, 64, "tsm:", "="), + "COL_TS_US": (dtype_kind.DATETIME, 64, "tsu:", "="), + "COL_TS_NS": (dtype_kind.DATETIME, 64, "tsn:", "="), + "COL_LARGE_STRING": (dtype_kind.STRING, 8, "U", "="), + "COL_LARGE_NSTRING": (dtype_kind.STRING, 8, "U", "="), + "COL_BINARY": (dtype_kind.STRING, 8, "z", "="), + "COL_LARGE_BINARY": (dtype_kind.STRING, 8, "Z", "="), + "COL_DECIMAL128": (dtype_kind.DECIMAL, 128, "d:10.2", "="), + } + + # check query without fetch_decimals enabled + ora_df = await self.conn.fetch_df_all(query) + for i, name in enumerate(ora_df.column_names()): + col = ora_df.get_column(i) + self.assertEqual(col.dtype, expected_dtypes[name]) + + # check query with fetch_decimals enabled + with test_env.DefaultsContextManager("fetch_decimals", True): + ora_df = await self.conn.fetch_df_all(decimal_query) + col = ora_df.get_column(0) + self.assertEqual(col.dtype, expected_dtypes["COL_DECIMAL128"]) + if __name__ == "__main__": test_env.run_test_cases() From 916d98207b65189732c4616a39604ffddf23b8ae Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:33:49 -0600 Subject: [PATCH 111/239] Update ODPI-C to released 5.6.0. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 18fa0ef0..c4a65f6f 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 18fa0ef0815b0256a8f5a1540ed4b2efc535298d +Subproject commit c4a65f6f92a15222543260d68c90d8769e2f2d81 From 765d929ff8694e059c23b5b7faa6ece59d909367 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:36:11 -0600 Subject: [PATCH 112/239] Doc updates. --- .github/ISSUE_TEMPLATE/bug_report.md | 8 +- doc/src/release_notes.rst | 258 ++++++++++----------- doc/src/user_guide/appendix_b.rst | 10 + doc/src/user_guide/appendix_c.rst | 4 + doc/src/user_guide/batch_statement.rst | 49 +++- doc/src/user_guide/bind.rst | 34 ++- doc/src/user_guide/connection_handling.rst | 2 +- doc/src/user_guide/dataframes.rst | 42 ++-- doc/src/user_guide/extending.rst | 28 ++- doc/src/user_guide/initialization.rst | 10 +- doc/src/user_guide/tracing.rst | 10 +- doc/src/user_guide/troubleshooting.rst | 21 +- doc/src/user_guide/tuning.rst | 14 +- 13 files changed, 309 insertions(+), 181 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 28be75c9..c5dddef3 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -23,8 +23,13 @@ Use Markdown syntax, see https://docs.github.com/github/writing-on-github/gettin diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ee1d5199..d710c6b5 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,8 +11,8 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. -oracledb 3.2.0 (TBD) --------------------- +oracledb `3.2.0 `__ (TBD) +-------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -51,10 +51,10 @@ Thick Mode Changes :attr:`DeqOptions.consumername`, :attr:`DeqOptions.correlation`, :attr:`DeqOptions.msgid`, :attr:`DeqOptions.transformation`, :attr:`EnqOptions.transformation`, :attr:`MessageProperties.correlation`, - or :attr:`MessageProperties.exceptionq` are set to ``None``. + or :attr:`MessageProperties.exceptionq` are set to *None*. #) Fixed a bug resulting in a ``ValueError`` exception when getting attribute :attr:`MessageProperties.enqtime` if the value is not available or - ``None``. + *None*. #) Fixed a memory leak when enqueuing to JSON queues with :ref:`Oracle Advanced Queuing `. @@ -92,8 +92,8 @@ Common Changes #) Improved the test suite and documentation. -oracledb 3.1.1 (May 2025) -------------------------- +oracledb `3.1.1 `__ (May 2025) +------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -128,8 +128,8 @@ Common Changes (`PR 479 `__). -oracledb 3.1.0 (April 2025) ---------------------------- +oracledb `3.1.0 `__ (April 2025) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -212,8 +212,8 @@ Common Changes #) Improved the test suite and documentation. -oracledb 3.0.0 (March 2025) ---------------------------- +oracledb `3.0.0 `__ (March 2025) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -373,8 +373,8 @@ Common Changes #) Improved test suite and documentation. -oracledb 2.5.1 (December 2024) ------------------------------- +oracledb `2.5.1 `__ (December 2024) +------------------------------------------------------------------------------------------------------ Thin Mode Changes +++++++++++++++++ @@ -399,7 +399,7 @@ Thick Mode Changes #) Fixed bug calculating property :data:`Connection.max_identifier_length` when using Oracle Client libraries 12.1, or older. The returned value may - now be ``None`` when the size cannot be reliably determined by + now be *None* when the size cannot be reliably determined by python-oracledb, which occurs when using Oracle Client libraries 12.1 (or older) to connect to Oracle Database 12.2, or later. (`ODPI-C `__ dependency update). @@ -414,8 +414,8 @@ Common Changes (`issue 429 `__). -oracledb 2.5.0 (November 2024) ------------------------------- +oracledb `2.5.0 `__ (November 2024) +------------------------------------------------------------------------------------------------------ Thin Mode Changes +++++++++++++++++ @@ -504,7 +504,7 @@ Common Changes #) The variables saved with :meth:`Cursor.setinputsizes()` are now forgotten when an exception is raised (`issue 411 `__). -#) Fixed bug when calling :meth:`ConnectParams.set()` with a value of ``None`` +#) Fixed bug when calling :meth:`ConnectParams.set()` with a value of *None* for the ``connectiontype`` and ``session_callback`` parameters. Previously, any values set earlier would be improperly cleared and now they are retained @@ -512,8 +512,8 @@ Common Changes #) Improved test suite and documentation. -oracledb 2.4.1 (August 2024) ----------------------------- +oracledb `2.4.1 `__ (August 2024) +---------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -523,8 +523,8 @@ Thin Mode Changes (`issue 383 `__). -oracledb 2.4.0 (August 2024) ----------------------------- +oracledb `2.4.0 `__ (August 2024) +---------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -579,8 +579,8 @@ Common Changes segfault and Thick mode would result in unusual errors. -oracledb 2.3.0 (July 2024) --------------------------- +oracledb `2.3.0 `__ (July 2024) +-------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -675,8 +675,8 @@ Common Changes #) Internal changes to ensure that no circular imports occur. -oracledb 2.2.1 (May 2024) -------------------------- +oracledb `2.2.1 `__ (May 2024) +------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -697,8 +697,8 @@ Thin Mode Changes potential hangs in some configurations under some circumstances. -oracledb 2.2.0 (May 2024) -------------------------- +oracledb `2.2.0 `__ (May 2024) +------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -739,8 +739,8 @@ Common Changes ``DPY-4030: invalid DRCP pool boundary {boundary}``. -oracledb 2.1.2 (April 2024) ---------------------------- +oracledb `2.1.2 `__ (April 2024) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -757,8 +757,8 @@ Common Changes containing the ``/`` character. -oracledb 2.1.1 (March 2024) ---------------------------- +oracledb `2.1.1 `__ (March 2024) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -780,8 +780,8 @@ Thin Mode Changes losing output due to buffering when multiple threads are running. -oracledb 2.1.0 (March 2024) ---------------------------- +oracledb `2.1.0 `__ (March 2024) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -890,11 +890,11 @@ Common Changes #) Fixed bug in the calculation of :data:`Cursor.rowcount` under some circumstances. #) Connection parameters that are strings now treat an empty string in the - same way as the value ``None``. + same way as the value *None*. -oracledb 2.0.1 (January 2024) ------------------------------ +oracledb `2.0.1 `__ (January 2024) +----------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -935,8 +935,8 @@ Common Changes #) Bumped minimum requirement of Cython to 3.0. -oracledb 2.0.0 (December 2023) ------------------------------- +oracledb `2.0.0 `__ (December 2023) +------------------------------------------------------------------------------------------------------ Thin Mode Changes +++++++++++++++++ @@ -1034,8 +1034,8 @@ Common Changes #) Black is now used to format Python code and ruff to lint Python code. -oracledb 1.4.2 (October 2023) ------------------------------ +oracledb `1.4.2 `__ (October 2023) +----------------------------------------------------------------------------------------------------- Thick Changes +++++++++++++ @@ -1050,8 +1050,8 @@ Common Changes (`issue 237 `__). -oracledb 1.4.1 (September 2023) -------------------------------- +oracledb `1.4.1 `__ (September 2023) +------------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1074,7 +1074,7 @@ Common Changes #) Fixed bug when calling :meth:`Cursor.execute()` or :meth:`Cursor.executemany()` with missing bind data after calling :meth:`Cursor.setinputsizes()` with at least one of the values supplied as - ``None`` + *None* (`issue 217 `__). #) SQL statement parsing now raises ``DPY-2041: missing ending quote (') in string`` or ``DPY-2042: missing ending quote (") in identifier`` for @@ -1084,8 +1084,8 @@ Common Changes #) Added missing ">" to ``repr()`` of :ref:`sodadb`. -oracledb 1.4.0 (August 2023) ----------------------------- +oracledb `1.4.0 `__ (August 2023) +---------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1141,7 +1141,7 @@ Thick Mode Changes :meth:`~SodaOperation.lock()` was added which requires Oracle Client 21.3 or higher (or Oracle Client 19 from 19.11). #) Relaxed restriction for end-to-end tracing string connection - attributes. These values can now be set to the value ``None`` which will be + attributes. These values can now be set to the value *None* which will be treated the same as an empty string. #) Fixed bug when using external authentication with an Easy Connect connection string. @@ -1197,8 +1197,8 @@ Common Changes (`issue 204 `__). #) Improved test suite and documentation. -oracledb 1.3.2 (June 2023) --------------------------- +oracledb `1.3.2 `__ (June 2023) +-------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1235,8 +1235,8 @@ Common Changes consistency between Thin and Thick modes. -oracledb 1.3.1 (April 2023) ---------------------------- +oracledb `1.3.1 `__ (April 2023) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1245,7 +1245,7 @@ Thin Mode Changes (`issue 172 `__). #) Fixed bug with Oracle Database 23ai when SQL is executed after first being parsed. -#) Fixed bug when :data:`ConnectionPool.timeout` is not `None` when creating a +#) Fixed bug when :data:`ConnectionPool.timeout` is not *None* when creating a connection pool (`issue 166 `__). #) Fixed bug when a query is re-executed after an underlying table is dropped @@ -1260,8 +1260,8 @@ Common Changes #) Improved test suite and samples. -oracledb 1.3.0 (March 2023) ---------------------------- +oracledb `1.3.0 `__ (March 2023) +--------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1365,8 +1365,8 @@ Common Changes exceptions are handled more gracefully. -oracledb 1.2.2 (January 2023) ------------------------------ +oracledb `1.2.2 `__ (January 2023) +----------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1396,8 +1396,8 @@ Common Changes elements. -oracledb 1.2.1 (December 2022) ------------------------------- +oracledb `1.2.1 `__ (December 2022) +------------------------------------------------------------------------------------------------------ Thin Mode Changes +++++++++++++++++ @@ -1418,7 +1418,7 @@ Thick Mode Changes allowing it to be closed automatically when it goes out of scope). #) Fixed bug when calling :meth:`Subscription.registerquery()` with bind values. -#) Fixed bug that caused :data:`Message.dbname` to always be the value `None`. +#) Fixed bug that caused :data:`Message.dbname` to always be the value *None*. Common Changes ++++++++++++++ @@ -1427,8 +1427,8 @@ Common Changes instead of a hard-coded ``oracledb``. -oracledb 1.2.0 (November 2022) ------------------------------- +oracledb `1.2.0 `__ (November 2022) +------------------------------------------------------------------------------------------------------ Thin Mode Changes +++++++++++++++++ @@ -1474,13 +1474,13 @@ Common Changes #) Added support for Python 3.11. #) Added attribute :attr:`DbObjectType.package_name` which contains the name - of the package if the type is a PL/SQL type (otherwise, it will be `None`). + of the package if the type is a PL/SQL type (otherwise, it will be *None*). #) Added sample for loading data from a CSV file. #) Improved test suite and documentation. -oracledb 1.1.1 (September 2022) -------------------------------- +oracledb `1.1.1 `__ (September 2022) +------------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1513,8 +1513,8 @@ Common Changes #) Improved test suite and documentation. -oracledb 1.1.0 (September 2022) -------------------------------- +oracledb `1.1.0 `__ (September 2022) +------------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1567,8 +1567,8 @@ Common Changes #) Improved samples and documentation. -oracledb 1.0.3 (August 2022) ----------------------------- +oracledb `1.0.3 `__ (August 2022) +---------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1594,8 +1594,8 @@ Common Changes (`issue 35 `__). -oracledb 1.0.2 (July 2022) --------------------------- +oracledb `1.0.2 `__ (July 2022) +-------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1629,8 +1629,8 @@ Common Changes #) Improved samples and documentation. -oracledb 1.0.1 (June 2022) --------------------------- +oracledb `1.0.1 `__ (June 2022) +-------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ @@ -1891,7 +1891,7 @@ cx_Oracle 8.0 (June 2020) API. - The variable attribute :data:`~Variable.type` now refers to one of the new database type constants if the variable does not contain objects - (previously it was None in that case). + (previously it was *None* in that case). - The attribute :data:`~LOB.type` was added to LOB values. - The attribute ``type`` was added to attributes of object types. - The attribute ``element_type`` was added to object types. @@ -1954,7 +1954,7 @@ cx_Oracle 7.3 (December 2019) to the database (as opposed to the default server initiated connections) created by calling :meth:`Connection.subscribe()`. #) Added :attr:`support ` for returning the rowid of the - last row modified by an operation on a cursor (or None if no row was + last row modified by an operation on a cursor (or *None* if no row was modified). #) Added support for setting the ``maxSessionsPerShard`` attribute when creating connection pools. @@ -1988,7 +1988,7 @@ cx_Oracle 7.3 (December 2019) ``DPI-1012: proxy authentication is not possible with homogeneous pools`` instead of a ``ProgrammingError`` exception with the message ``pool is homogeneous. Proxy authentication is not possible.`` since this - check is done by ODPI-C. An empty string (or None) for the user name will + check is done by ODPI-C. An empty string (or *None*) for the user name will no longer generate an exception. #) Exception ``InterfaceError: not connected`` is now always raised when an operation is attempted with a closed connection. Previously, a number of @@ -2176,7 +2176,7 @@ cx_Oracle 7.1 (February 2019) Note that this support is limited to the size of VARCHAR2 columns in the database (either 4000 or 32767 bytes). #) Added support for allowing the typename parameter in method - :meth:`Cursor.var()` to be None or a valid object type created by the + :meth:`Cursor.var()` to be *None* or a valid object type created by the method :meth:`Connection.gettype()`, as requested (`issue 231 `__). #) Added support for getting and setting attributes of type RAW on Oracle @@ -2194,8 +2194,8 @@ cx_Oracle 7.1 (February 2019) decimal value is automatically returned instead. #) Corrected handling of multiple calls to method :meth:`Cursor.executemany()` where all of the values in one of the columns - passed to the first call are all None and a subsequent call has a value - other than None in the same column + passed to the first call are all *None* and a subsequent call has a value + other than *None* in the same column (`issue 236 `__). #) Added additional check for calling :meth:`Cursor.setinputsizes()` with an empty dictionary in order to avoid the error "cx_Oracle.ProgrammingError: @@ -2660,8 +2660,8 @@ cx_Oracle 6.0 (August 2017) - On platforms other than Windows, if the regular method for loading the Oracle Client libraries fails, try using $ORACLE_HOME/lib/libclntsh.so (`ODPI-C issue 20 `__). - - Use the environment variable ``DPI_DEBUG_LEVEL`` at runtime, not compile - time. + - Use the environment variable ``DPI_DEBUG_LEVEL`` at runtime, not + compile time. - Added support for DPI_DEBUG_LEVEL_ERRORS (reports errors and has the value 8) and DPI_DEBUG_LEVEL_SQL (reports prepared SQL statement text and has the value 16) in order to further improve the ability to debug @@ -2670,9 +2670,9 @@ cx_Oracle 6.0 (August 2017) #) Delay initialization of the ODPI-C library until the first standalone connection or session pool is created so that manipulation of the - environment variable ``NLS_LANG`` can be performed after the module has been - imported; this also has the added benefit of reducing the number of errors - that can take place when the module is imported. + environment variable ``NLS_LANG`` can be performed after the module has + been imported; this also has the added benefit of reducing the number of + errors that can take place when the module is imported. #) Prevent binding of null values from generating the exception "ORA-24816: Expanded non LONG bind data supplied after actual LONG or LOB column" in certain circumstances @@ -2804,7 +2804,7 @@ cx_Oracle 6.0 beta 1 (April 2017) #) Added support for smallint and float data types in Oracle objects, as `requested `__. #) An exception is no longer raised when a collection is empty for methods - :meth:`Object.first()` and :meth:`Object.last()`. Instead, the value None + :meth:`Object.first()` and :meth:`Object.last()`. Instead, the value *None* is returned to be consistent with the methods :meth:`Object.next()` and :meth:`Object.prev()`. #) If the environment variables NLS_LANG and NLS_NCHAR are being used, they @@ -2868,7 +2868,7 @@ cx_Oracle 5.3 (March 2017) :meth:`creating a connection ` and added support for setting these when creating a session pool. These can now be used instead of setting the environment variables ``NLS_LANG`` and ``NLS_NCHAR``. -#) Use None instead of 0 for items in the :attr:`Cursor.description` attribute +#) Use *None* instead of *0* for items in the :attr:`Cursor.description` attribute that do not have any validity. #) Changed driver name to match informal driver name standard used by Oracle for other drivers. @@ -3475,13 +3475,13 @@ cx_Oracle 4.1 beta 1 (September 2004) #) The cursor method arrayvar() will now accept the actual list so that it is not necessary to call cursor.arrayvar() followed immediately by var.setvalue(). -#) Fixed bug where attempts to execute the statement "None" with bind +#) Fixed bug where attempts to execute the statement *None* with bind variables would cause a segmentation fault. #) Added support for binding by position (paramstyle = "numeric"). #) Removed memory leak created by calls to OCIParamGet() which were not mirrored by calls to OCIDescriptorFree(). Thanks to Mihai Ibanescu for pointing this out and providing a patch. -#) Added support for calling cursor.executemany() with statement None +#) Added support for calling cursor.executemany() with statement *None* implying that the previously prepared statement ought to be executed. Thanks to Mihai Ibanescu for providing a patch. #) Added support for rebinding variables when a subsequent call to @@ -3599,18 +3599,18 @@ cx_Oracle 3.1 (August 2003) cx_Oracle 3.0a (June 2003) -------------------------- -#) Fixed bug where zero length PL/SQL arrays were being mishandled +#) Fixed bug where zero length PL/SQL arrays were being mishandled. #) Fixed support for the data type "float" in Oracle; added one to the display size to allow for the sign of the number, if necessary; changed the display size of unconstrained numbers to 127, which is the largest - number that Oracle can handle + number that Oracle can handle. #) Added support for retrieving the description of a bound cursor before - fetching it -#) Fixed a couple of build issues on Mac OS X, AIX and Solaris (64-bit) -#) Modified documentation slightly based on comments from several people -#) Included files in MANIFEST that are needed to generate the binaries + fetching it. +#) Fixed a couple of build issues on Mac OS X, AIX and Solaris (64-bit). +#) Modified documentation slightly based on comments from several people. +#) Included files in MANIFEST that are needed to generate the binaries. #) Modified test suite to work within the test environment at Computronix - as well as within the packages that are distributed + as well as within the packages that are distributed. cx_Oracle 3.0 (March 2003) @@ -3618,44 +3618,44 @@ cx_Oracle 3.0 (March 2003) #) Removed support for connection to Oracle7 databases; it is entirely possible that it will still work but I no longer have any way of testing - and Oracle has dropped any meaningful support for Oracle7 anyway + and Oracle has dropped any meaningful support for Oracle7 anyway. #) Fetching of strings is now done with predefined memory areas rather than dynamic memory areas; dynamic fetching of strings was causing problems with Oracle 9i in some instances and databases using a different character - set other than US ASCII + set other than US ASCII. #) Fixed bug where segfault would occur if the '/' character preceded the '@' - character in a connect string + character in a connect string. #) Added two new cursor methods var() and arrayvar() in order to eliminate the need for setinputsizes() when defining PL/SQL arrays and as a generic - method of acquiring bind variables directly when needed + method of acquiring bind variables directly when needed. #) Fixed support for binding cursors and added support for fetching cursors (these are known as ref cursors in PL/SQL). #) Eliminated discrepancy between the array size used internally and the array size specified by the interface user; this was done earlier to avoid bus errors on 64-bit platforms but another way has been found to get around that issue and a number of people were getting confused because of - the discrepancy + the discrepancy. #) Added support for the attribute "connection" on cursors, an optional - DB API extension + DB API extension. #) Added support for passing a dictionary as the second parameter for the cursor.execute() method in order to comply with the DB API more closely; the method of passing parameters with keyword parameters is still supported - and is in fact preferred + and is in fact preferred. #) Added support for the attribute "statement" on cursors which is a - reference to the last SQL statement prepared or executed + reference to the last SQL statement prepared or executed. #) Added support for passing any sequence to callproc() rather than just - lists as before + lists as before. #) Fixed bug where segfault would occur if the array size was changed after - the cursor was executed but before it was fetched + the cursor was executed but before it was fetched. #) Ignore array size when performing executemany() and use the length of the - list of parameters instead + list of parameters instead. #) Rollback when connection is closed or destroyed to follow DB API rather - than use the Oracle default (which is commit) -#) Added check for array size too large causing an integer overflow -#) Added support for iterators for Python 2.2 and above -#) Added test suite based on PyUnitTest + than use the Oracle default (which is commit). +#) Added check for array size too large causing an integer overflow. +#) Added support for iterators for Python 2.2 and above. +#) Added test suite based on PyUnitTest. #) Added documentation in HTML format similar to the documentation for the - core Python library + core Python library. cx_Oracle 2.5a (August 2002) @@ -3664,7 +3664,7 @@ cx_Oracle 2.5a (August 2002) #) Fix problem with Oracle 9i and retrieving strings; it seems that Oracle 9i uses the correct method for dynamic callback but Oracle 8i will not work with that method so an #ifdef was added to check for the existence of an - Oracle 9i feature; thanks to Paul Denize for discovering this problem + Oracle 9i feature; thanks to Paul Denize for discovering this problem. cx_Oracle 2.5 (July 2002) @@ -3672,19 +3672,19 @@ cx_Oracle 2.5 (July 2002) #) Added flag OPT_NoOracle7 which, if set, assumes that connections are being made to Oracle8 or higher databases; this allows for eliminating the - overhead in performing this check at connect time + overhead in performing this check at connect time. #) Added flag OPT_NumbersAsStrings which, if set, returns all numbers as strings rather than integers or floats; this flag is used when defined - variables are created (during select statements only) + variables are created (during select statements only). #) Added flag OPT_Threading which, if set, uses OCI threading mode; there is a significant performance degradation in this mode (about 15-20%) but it does allow threads to share connections (threadsafety level 2 according to the Python Database API 2.0); note that in order to support this, Oracle 8i or - higher is now required + higher is now required. #) Added Py_BEGIN_ALLOW_THREADS and Py_END_ALLOW_THREADS pairs where - applicable to support threading during blocking OCI calls + applicable to support threading during blocking OCI calls. #) Added global method attach() to cx_Oracle to support attaching to an - existing database handle (as provided by PowerBuilder, for example) + existing database handle (as provided by PowerBuilder, for example). #) Eliminated the cursor method fetchbinds() which was used for returning the list of bind variables after execution to get the values of out variables; the cursor method setinputsizes() was modified to return the list of bind @@ -3693,30 +3693,30 @@ cx_Oracle 2.5 (July 2002) these variables have three methods available to them: getvalue([]) to get the value of a variable, setvalue(, ) to set its value and copy(, , ) to copy the value from a variable in a - more efficient manner than setvalue(getvalue()) + more efficient manner than setvalue(getvalue()). #) Implemented cursor method executemany() which expects a list of - dictionaries for the parameters -#) Implemented cursor method callproc() + dictionaries for the parameters. +#) Implemented cursor method callproc(). #) Added cursor method prepare() which parses (prepares) the statement for - execution; subsequent execute() or executemany() calls can pass None as the - statement which will imply use of the previously prepared statement; used - for high performance only + execution; subsequent execute() or executemany() calls can pass *None* as + the statement which will imply use of the previously prepared statement; + used for high performance only. #) Added cursor method fetchraw() which will perform a raw fetch of the cursor returning the number of rows thus fetched; this is used to avoid the - overhead of generating result sets; used for high performance only + overhead of generating result sets; used for high performance only. #) Added cursor method executemanyprepared() which is identical to the method executemany() except that it takes a single parameter which is the number of times to execute a previously prepared statement and it assumes that the bind variables already have their values set; used for high performance - only -#) Added support for rowid being returned in a select statement -#) Added support for comparing dates returned by cx_Oracle + only. +#) Added support for rowid being returned in a select statement. +#) Added support for comparing dates returned by cx_Oracle. #) Integrated patch from Andre Reitz to set the null ok flag in the - description attribute of the cursor + description attribute of the cursor. #) Integrated patch from Andre Reitz to setup.py to support compilation with - Python 1.5 + Python 1.5. #) Integrated patch from Benjamin Kearns to setup.py to support compilation - on Cygwin + on Cygwin. cx_Oracle 2.4 (January 2002) @@ -3746,7 +3746,7 @@ cx_Oracle 2.3 (October 2001) ---------------------------- #) Incremental performance enhancements (dealing with reusing cursors and - bind handles) + bind handles). #) Ensured that arrays of integers with a single float in them are all treated as floats, as suggested by Martin Koch. #) Fixed code dealing with scale and precision for both defining a numeric @@ -3765,7 +3765,7 @@ cx_Oracle 2.2 (July 2001) Brad Powell. #) Added function write(Value, [Offset]) to LOB variables as requested by Matthias Kirst. -#) Procedure execute() on Cursor objects now permits a value None for the +#) Procedure execute() on Cursor objects now permits a value *None* for the statement which means that the previously prepared statement will be executed and any input sizes set earlier will be retained. This was done to improve the performance of scripts that execute one statement many times. diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index f2725fea..68242bdc 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -238,6 +238,16 @@ not supported in the Oracle Client libraries that are used in python-oracledb Thick mode. Note changing the type of bind variables for the same SQL text is inappropriate and gives indeterminate results in both modes. +Duplicate SQL Bind Variable Placeholders in Thin and Thick Modes +================================================================ + +To use python-oracledb Thin mode when you have duplicate bind variable +placeholder names in a SQL statement and are :ref:`binding by position +`, then supply a value for each use of the placeholders, see +:ref:`dupbindplaceholders`. + +This does not apply to PL/SQL blocks. + Error Handling in Thin and Thick Modes ====================================== diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index 0d8fa3d8..a0a74455 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -424,6 +424,10 @@ need to be made in addition to the common :ref:`commonupgrade`: environment variable. Thick mode does use these variables. See :ref:`globalization`. +9. If SQL statements contain repeated bind variable placeholder names, and you + are :ref:`binding by position `, then make sure that a value + is passed for each use of the placeholder, see :ref:`dupbindplaceholders`. + .. _upgradethick: Additional Upgrade Steps to use python-oracledb Thick Mode diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 17a2831a..2248e4a2 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -4,6 +4,16 @@ Batch Statement and Bulk Copy Operations **************************************** +Python-oracledb is perfect for large ETL ("Extract, Transform, Load") data +operations. + +This chapter focuses on efficient data ingestion. Python-oracledb lets you +easily optimize batch insertion, and also allows "noisy" data (values not in a +suitable format) to be filtered for review while other, correct, values are +inserted. + +Related topics include :ref:`tuning` and :ref:`dataframeformat`. + Batch Statement Execution ========================= @@ -12,7 +22,7 @@ Inserting, updating or deleting multiple rows can be performed efficiently with python-oracledb. This method can significantly outperform repeated calls to :meth:`Cursor.execute()` by reducing network transfer costs and database overheads. The :meth:`~Cursor.executemany()` method can also be used to -execute PL/SQL statements multiple times at once. +execute a PL/SQL statement multiple times in one call. There are examples in the `GitHub examples `__ @@ -92,6 +102,8 @@ the bind variable placeholder names: cursor.executemany("insert into ParentTable values :pid, :pdesc)", data) +.. _predefmemory: + Predefining Memory Areas ------------------------ @@ -505,7 +517,8 @@ Bulk Copying Data between Databases ----------------------------------- The :meth:`Cursor.executemany()` function is useful for copying data from one -database to another: +database to another, for example in an ETL ("Extract, Transform, Load") +workflow: .. code-block:: python @@ -523,21 +536,37 @@ database to another: # Perform bulk fetch and insertion source_cursor.execute("select c1, c2 from MySrcTable") while True: + + # Extract the records rows = source_cursor.fetchmany() if not rows: break + + # Optionally transform the records here + # ... + + # Load the records into the target database target_cursor.executemany("insert into MyDestTable values (:1, :2)", rows) target_connection.commit() -Tune the :attr:`~Cursor.arraysize` value according to notes in -:ref:`tuningfetch`. Use ``setinputsizes()`` according to `Predefining Memory -Areas`_. +The :attr:`~Cursor.arraysize` value alters how many rows each +:meth:`Cursor.fetchmany()` call returns, see :ref:`tuningfetch`. The +:meth:`~Cursor.setinputsizes()` call is used to optimize memory allocation when +inserting with :meth:`~Cursor.executemany()`, see :ref:`predefmemory`. You +may also want to tune the SDU setting for best nework performance, see +:ref:`tuning`. + +If you are inserting back into the same database that the records originally +came from, you do not need to open a second connection. Instead, both cursors +can be obtained from one connection. -Note that when copying data to another table in the same database, it may be -preferable to use INSERT INTO SELECT or CREATE AS SELECT to avoid the overhead -of copying data to, and back from, the Python process. This also avoids any -data type changes. For example to create a complete copy of a table: +**Avoiding Copying Data Over the Network** + +When copying data to another table in the same database, it may be preferable +to use INSERT INTO SELECT or CREATE AS SELECT to avoid the overhead of copying +data to, and back from, the Python process. This also avoids any data type +changes. For example to create a complete copy of a table: .. code-block:: python @@ -547,3 +576,5 @@ Similarly, when copying to a different database, consider creating a `database link `__ between the databases and using INSERT INTO SELECT or CREATE AS SELECT. + +You can control the data transfer by changing your SELECT statement. diff --git a/doc/src/user_guide/bind.rst b/doc/src/user_guide/bind.rst index fe5394d6..a5531022 100644 --- a/doc/src/user_guide/bind.rst +++ b/doc/src/user_guide/bind.rst @@ -146,14 +146,36 @@ Python tuples can also be used for binding by position: If only a single bind placeholder is used in the SQL or PL/SQL statement, the data can be a list like ``[280]`` or a single element tuple like ``(280,)``. -When using bind by position for SQL statements, the order of the bind values -must exactly match the order of each bind variable and duplicated names must -have their values repeated. For PL/SQL statements, however, the order of the -bind values must exactly match the order of each **unique** bind variable found -in the PL/SQL block and values should not be repeated. In order to avoid this -difference, binding by name is recommended when bind variable names are +.. _dupbindplaceholders: + +Duplicate Bind Variable Placeholders +==================================== + +:ref:`Binding by name ` is recommended when bind variable +placeholder names are repeated in statements. + +In python-oracledb Thin mode, when :ref:`binding by position ` +for SQL statements, the order of the bind values must exactly match the order +of each bind variable placeholder and duplicated names must have their values +repeated: + +.. code-block:: python + + cursor.execute(""" + select dname from dept1 where deptno = :1 + union all + select dname from dept2 where deptno = :1 = """, [30, 30]) + +In some cases python-oracledb Thick mode may allow non-duplicated values for +SQL statements, but this usage is not consistent and is not recommended. It +will result in an error in python-oracledb Thin mode. + +When binding by position for PL/SQL calls in python-oracledb Thin or Thick +modes, the order of the bind values must exactly match the order of each +**unique** placeholder found in the PL/SQL block and values should not be repeated. +Binding by name does not have these issues. Bind Direction ============== diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 4d2bc68a..9d2f9860 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -1968,7 +1968,7 @@ For example: oracledb.register_protocol("ldap", ldap_hook) - connection = oracledb.connect(user="hr" password=userpwd, + connection = oracledb.connect(user="hr", password=userpwd, dsn="ldap://ldapserver/dbname,cn=OracleContext,dc=dom,dc=com") You can modify or extend this as needed, for example to use an LDAP module that diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 64dcbb4e..a33f1b85 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -108,32 +108,42 @@ Oracle Database will result in an exception. :ref:`Output type handlers * - Oracle Database Type - Arrow Data Type - * - DB_TYPE_NUMBER - - DECIMAL128, INT64, or DOUBLE - * - DB_TYPE_CHAR - - STRING - * - DB_TYPE_VARCHAR - - STRING - * - DB_TYPE_BINARY_FLOAT - - FLOAT * - DB_TYPE_BINARY_DOUBLE - DOUBLE + * - DB_TYPE_BINARY_FLOAT + - FLOAT + * - DB_TYPE_BLOB + - LARGE_BINARY * - DB_TYPE_BOOLEAN - BOOLEAN + * - DB_TYPE_CHAR + - STRING + * - DB_TYPE_CLOB + - LARGE_STRING * - DB_TYPE_DATE - TIMESTAMP + * - DB_TYPE_LONG + - LARGE_STRING + * - DB_TYPE_LONG_RAW + - LARGE_BINARY + * - DB_TYPE_NCHAR + - STRING + * - DB_TYPE_NCLOB + - LARGE_STRING + * - DB_TYPE_NUMBER + - DECIMAL128, INT64, or DOUBLE + * - DB_TYPE_NVARCHAR + - STRING + * - DB_TYPE_RAW + - BINARY * - DB_TYPE_TIMESTAMP - TIMESTAMP * - DB_TYPE_TIMESTAMP_LTZ - TIMESTAMP * - DB_TYPE_TIMESTAMP_TZ - TIMESTAMP - * - DB_TYPE_CLOB - - LARGE_STRING - * - DB_TYPE_BLOB - - LARGE_BINARY - * - DB_TYPE_RAW - - BINARY + * - DB_TYPE_VARCHAR + - STRING When converting Oracle Database NUMBERs: @@ -156,8 +166,8 @@ When converting Oracle Database DATEs and TIMESTAMPs: - Arrow TIMESTAMPs will not have timezone data. -- For Oracle Database DATE types, the Arrow TIMESTAMP will have a time unit of - "seconds". +- For Oracle Database DATE columns, the Arrow TIMESTAMP will have a time unit + of "seconds". - For Oracle Database TIMESTAMP types, the Arrow TIMESTAMP time unit depends on the Oracle type's fractional precision as shown in the table below: diff --git a/doc/src/user_guide/extending.rst b/doc/src/user_guide/extending.rst index 3cba483c..5d324884 100644 --- a/doc/src/user_guide/extending.rst +++ b/doc/src/user_guide/extending.rst @@ -70,7 +70,7 @@ instead:: select department_name from departments where department_id = :id - ORA-00942: table or view does not exist + ORA-00942: table or view "HR"."DEPARTMENTS" does not exist In production applications, be careful not to log sensitive information. @@ -174,6 +174,7 @@ strings prefixed with "myprefix://". zip_safe = False package_dir = =src + install_requires = oracledb [options.packages.find] where = src @@ -224,6 +225,31 @@ strings prefixed with "myprefix://". python -m pip uninstall myplugin +Another sample plugin shows how all connection creations can be logged, +regardless of the connection string. If the plugin +``myplugin/src/oracledb/plugins/myplugin.py`` registers a :ref:`connection +parameter hook `: + +.. code-block:: python + + import oracledb + + def my_params_hook(params: oracledb.ConnectParams): + print(f"Connecting to the database as {params.user}") + + oracledb.register_params_hook(my_params_hook) + +Then running an application that contains: + +.. code-block:: python + + connection = oracledb.connect(user="hr", password=userpwd, + dsn="dbhost.example.com/orclpdb") + +will print the trace output:: + + Connecting to the database as hr + .. _connectionhooks: Connection Hooks diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index bc860224..1521adca 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -312,9 +312,10 @@ terminal. Explicitly Enabling python-oracledb Thin Mode ============================================= -Python-oracledb defaults to Thin mode after determining that Thick mode is not -going to be used. In one special case, you may wish to explicitly enable Thin -mode to prevent Thick mode from being enabled later. +Python-oracledb defaults to Thin mode but can be changed to use Thick mode. In +one special case, you may wish to explicitly enable Thin mode by calling +:meth:`oracledb.enable_thin_mode()` which will prevent Thick mode from ever +being used. Most applications will not need to call this method. To allow application portability, the driver's internal logic allows applications to initially attempt :ref:`standalone connection @@ -325,7 +326,8 @@ version that requires Thick mode. This heuristic means Thin mode is not enforced until the initial connection is successful. Since all connections must be the same mode, any second and subsequent concurrent Thin mode connection attempt will wait for the initial standalone connection to succeed, -meaning the driver mode is no longer potentially changeable to Thick mode. +meaning the driver mode is no longer potentially changeable to Thick mode, thus +letting those additional connections be established in Thin mode. If you have multiple threads concurrently creating standalone Thin mode connections, you may wish to call :meth:`oracledb.enable_thin_mode()` as part diff --git a/doc/src/user_guide/tracing.rst b/doc/src/user_guide/tracing.rst index 9f7c5bc4..1aa9c145 100644 --- a/doc/src/user_guide/tracing.rst +++ b/doc/src/user_guide/tracing.rst @@ -187,13 +187,9 @@ For an application that does a single query, the log file might contain a tracing line consisting of the prefix 'ODPI', a thread identifier, a timestamp, and the SQL statement executed:: - ODPI [26188] 2019-03-26 09:09:03.909: ODPI-C 3.1.1 - ODPI [26188] 2019-03-26 09:09:03.909: debugging messages initialized at level 16 - ODPI [26188] 2019-03-26 09:09:09.917: SQL SELECT * FROM jobss - Traceback (most recent call last): - File "end-to-endtracing.py", line 14, in - cursor.execute("select * from jobss") - oracledb.DatabaseError: ORA-00942: table or view does not exist + ODPI [23389068] 2025-06-25 12:07:55.405: ODPI-C 5.5.1 + ODPI [23389068] 2025-06-25 12:07:55.405: debugging messages initialized at level 16 + ODPI [23389068] 2025-06-25 12:08:01.363: SQL select name from jobs See `ODPI-C Debugging `__ for diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index ec67ee59..f9b1cd95 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -359,7 +359,7 @@ verifiers 11G and later. show parameter sec_case_sensitive_logon - Note this parameter has been `removed in Oracle Database 21c + Note this parameter was `removed in Oracle Database 21c `__ so only step 2 is required for this, or subsequent, database versions. @@ -473,7 +473,7 @@ syntax. Perform one of the following: ORA Error Messages ------------------ -A common ORA error message is discussed below. +Some common ORA error messages are discussed below. ORA-00933 +++++++++ @@ -504,3 +504,20 @@ Note with Oracle Database 23ai this incorrect usage gives the message For other causes and solutions see `Database Error Messages ORA-00933 `__ + +ORA-28009 ++++++++++ + +**Message:** ``ORA-28009: connection as SYS should be as SYSDBA or SYSOPER`` + +**Cause:** Commonly this error occurs when you try to create a connection pool +using :data:`oracledb.AUTH_MODE_SYSDBA` in python-oracledb Thick mode. + +**Action:** Use a :ref:`standalone connection `. +Alternatively, use python-oracledb Thin mode by removing all calls to +:func:`oracledb.init_oracle_client()`. + +.. seealso:: + + For other causes and solutions see `Database Error Messages ORA-28009 + `__ diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index 84d20f7f..70d36ed1 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -61,14 +61,18 @@ Some general tuning tips are: :ref:`optnetfiles`. In python-oracledb Thin mode, the SDU size can be passed as a connection or pool creation parameter. In both modes it may optionally be set in the connection :ref:`Easy Connect string ` or - :ref:`connect descriptor `. + :ref:`connect descriptor `. The SDU size that will actually + be used is negotiated down to the lower of application-side value and the + database network SDU configuration value. * Do not commit or rollback unnecessarily. Use :attr:`Connection.autocommit` on the last of a sequence of DML statements. -* If Python's Global Interpreter Lock (GIL) is limiting - :ref:`concurrent program performance `, then explore using parallel - Python processes. +* Consider using :ref:`concurrent programming ` or + :ref:`pipelining `. + +* If Python's Global Interpreter Lock (GIL) is limiting concurrent program + performance, then explore using parallel Python processes. .. _tuningfetch: @@ -216,6 +220,8 @@ numbers of rows with different ``prefetchrows`` and ``arraysize`` values. - 20 - 1 +The number of round-trips will be the same regardless of which +:ref:`python-oracledb method ` is used to fetch query results. Application Default Prefetchrows and Arraysize Values +++++++++++++++++++++++++++++++++++++++++++++++++++++ From 44d64c56d5bf4da4c4ce99b35870d820030ea566 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:54:23 -0600 Subject: [PATCH 113/239] Simplify code. --- src/oracledb/impl/thin/messages/base.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index f9040fff..90a7f048 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -1415,12 +1415,12 @@ cdef class MessageWithData(Message): first followed by any LONG values. """ cdef: - uint32_t i, num_elements, offset = self.offset + uint32_t num_elements, offset = self.offset bint found_long = False OracleMetadata metadata ThinVarImpl var_impl BindInfo bind_info - for i, bind_info in enumerate(params): + for bind_info in params: if bind_info._is_return_bind: continue var_impl = bind_info._bind_var_impl @@ -1438,7 +1438,7 @@ cdef class MessageWithData(Message): self._write_bind_params_column(buf, metadata, var_impl._values[pos + offset]) if found_long: - for i, bind_info in enumerate(params): + for bind_info in params: if bind_info._is_return_bind: continue var_impl = bind_info._bind_var_impl From d27d4a422a814c61b21981ddebf3f5d3f0c41405 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:54:55 -0600 Subject: [PATCH 114/239] Added support for fetching VECTORs in Arrow arrays. --- doc/src/release_notes.rst | 3 +- doc/src/user_guide/dataframes.rst | 200 +++++++++- doc/src/user_guide/vector_data_type.rst | 3 + samples/dataframe_numpy.py | 63 ++- samples/dataframe_pandas.py | 89 ++++- samples/dataframe_pandas_async.py | 81 ++++ samples/sql/create_schema_23.sql | 1 + src/oracledb/base_impl.pxd | 2 + src/oracledb/base_impl.pyx | 7 + src/oracledb/errors.py | 5 + src/oracledb/impl/base/converters.pyx | 15 + src/oracledb/impl/base/metadata.pyx | 5 + src/oracledb/impl/base/var.pyx | 22 +- src/oracledb/impl/thick/var.pyx | 4 + src/oracledb/impl/thin/messages/base.pyx | 4 + src/oracledb/interchange/nanoarrow_bridge.pxd | 13 + src/oracledb/interchange/nanoarrow_bridge.pyx | 240 ++++++++++- src/oracledb/thick_impl.pyx | 1 + src/oracledb/thin_impl.pyx | 1 + tests/test_8000_dataframe.py | 375 +++++++++++++++++- tests/test_8100_dataframe_async.py | 116 +++++- 21 files changed, 1217 insertions(+), 33 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index d710c6b5..5a758832 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -63,8 +63,9 @@ Common Changes #) Added Instance Principal authentication support when using :ref:`OCI Cloud Native Authentication `. -#) Improvements to :ref:`data frames `: +#) Improvements to :ref:`data frame ` support: + - Added support for VECTOR columns when fetching data frames. - Fixed date handling to match PyArrow's and avoid localization issues (`issue 499 `__). - Fixed bug on Windows when fetching dates prior to 1970 and after 2038 diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index a33f1b85..2a6f11b3 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -51,6 +51,8 @@ To fetch in batches, use an iterator: .. code-block:: python + import pyarrow + sql = "select * from departments where department_id < 80" # Adjust "size" to tune the query fetch performance # Here it is small to show iteration @@ -144,6 +146,10 @@ Oracle Database will result in an exception. :ref:`Output type handlers - TIMESTAMP * - DB_TYPE_VARCHAR - STRING + * - DB_TYPE_VECTOR + - List or struct with DOUBLE, FLOAT, INT8, or UINT8 values + +**Numbers** When converting Oracle Database NUMBERs: @@ -158,10 +164,51 @@ When converting Oracle Database NUMBERs: - In all other cases, the Arrow data type is DOUBLE. +**Vectors** + +When converting Oracle Database VECTORs: + +- Dense vectors are fetched as lists. + +- Sparse vectors are fetched as structs with fields ``num_dimensions``, + ``indices`` and ``values`` similar to :ref:`SparseVector objects + `. + +- VECTOR columns with flexible dimensions are supported. + +- VECTOR columns with flexible formats are not supported. Each vector value + must have the same storage format data type. + +- Vector values are fetched as the following types: + + .. list-table-with-summary:: + :header-rows: 1 + :class: wy-table-responsive + :widths: 1 1 + :align: left + :summary: The first column is the Oracle Database VECTOR format. The second column is the resulting Arrow data type in the list. + + * - Oracle Database VECTOR format + - Arrow data type + * - FLOAT64 + - DOUBLE + * - FLOAT32 + - FLOAT + * - INT8 + - INT8 + * - BINARY + - UINT8 + +See :ref:`dfvector` for more information. + +**LOBs** + When converting Oracle Database CLOBs and BLOBs: - The LOBs must be no more than 1 GB in length. +**Dates and Timestamps** + When converting Oracle Database DATEs and TIMESTAMPs: - Arrow TIMESTAMPs will not have timezone data. @@ -236,6 +283,8 @@ An example that creates and uses a `PyArrow Table .. code-block:: python + import pyarrow + # Get an OracleDataFrame # Adjust arraysize to tune the query fetch performance sql = "select id, name from SampleQueryTab order by id" @@ -303,8 +352,8 @@ An example that creates and uses a `Polars DataFrame .. code-block:: python - import pyarrow import polars + import pyarrow # Get an OracleDataFrame # Adjust arraysize to tune the query fetch performance @@ -377,8 +426,8 @@ For example, to convert to `NumPy `__ ``ndarray`` format: .. code-block:: python - import pyarrow import numpy + import pyarrow SQL = "select id from SampleQueryTab order by id" @@ -426,3 +475,150 @@ An example of working with data as a `Torch tensor See `samples/dataframe_torch.py `__ for a runnable example. + +.. _dfvector: + +Using VECTOR data with Data Frames +---------------------------------- + +Columns of the `VECTOR `__ data type can be fetched with +the methods :meth:`Connection.fetch_df_all()` and +:meth:`Connection.fetch_df_batches()`. VECTOR columns can have flexible +dimensions, but flexible storage formats are not supported: each vector value +must have the same format data type. Vectors can be dense or sparse. + +See :ref:`dftypemapping` for the type mapping for VECTORs. + +**Dense Vectors** + +By default, Oracle Database vectors are "dense". These are fetched in +python-oracledb as Arrow lists. For example, if the table:: + + create table myvec (v64 vector(3, float64)); + +contains these two vectors:: + + [4.1, 5.2, 6.3] + [7.1, 8.2, 9.3] + +then the code: + +.. code-block:: python + + odf = connection.fetch_df_all("select v64 from myvec") + pyarrow_table = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ) + +will result in a PyArrow table containing lists of doubles. The table can be +converted to a data frame of your chosen library using functionality described +earlier in this chapter. For example, to convert to Pandas: + +.. code-block:: python + + pdf = pyarrow_table.to_pandas() + print(pdf) + +The output will be:: + + V64 + 0 [4.1, 5.2, 6.3] + 1 [7.1, 8.2, 9.3] + +**Sparse Vectors** + +Sparse vectors (where many of the values are 0) are fetched as structs with +fields ``num_dimensions``, ``indices``, and ``values`` similar to +:ref:`SparseVector objects ` which are discussed in a +non-data frame context in :ref:`sparsevectors`. + +If the table:: + + create table myvec (v64 vector(3, float64, sparse)); + +contains these two vectors:: + + [3, [1,2], [4.1, 5.2]] + [3, [0], [9.3]] + +then the code to fetch as data frames: + +.. code-block:: python + + import pyarrow + + odf = connection.fetch_df_all("select v64 from myvec") + pdf = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ).to_pandas() + + print(pdf) + + print("First row:") + + num_dimensions = pdf.iloc[0].V64['num_dimensions'] + print(f"num_dimensions={num_dimensions}") + + indices = pdf.iloc[0].V64['indices'] + print(f"indices={indices}") + + values = pdf.iloc[0].V64['values'] + print(f"values={values}") + +will display:: + + V64 + 0 {'num_dimensions': 3, 'indices': [1, 2], 'valu... + 1 {'num_dimensions': 3, 'indices': [0], 'values'... + + First row: + num_dimensions=3 + indices=[1 2] + values=[4.1 5.2] + +You can convert each struct as needed. One way to convert into `Pandas +dataframes with sparse values +`__ is via a `SciPy +coordinate format matrix `__. The Pandas method +`from_spmatrix() `__ can then be used to create the +final sparse dataframe: + +.. code-block:: python + + import numpy + import pandas + import pyarrow + import scipy + + def convert_to_sparse_array(val): + dimensions = val["num_dimensions"] + col_indices = val["indices"] + row_indices = numpy.zeros(len(col_indices)) + values = val["values"] + sparse_matrix = scipy.sparse.coo_matrix( + (values, (col_indices, row_indices)), shape=(dimensions, 1)) + return pandas.arrays.SparseArray.from_spmatrix(sparse_matrix) + + odf = connection.fetch_df_all("select v64 from myvec") + pdf = pyarrow.Table.from_arrays( + odf.column_arrays(), odf.column_names() + ).to_pandas() + + pdf["SPARSE_ARRAY_V64"] = pdf["V64"].apply(convert_to_sparse_array) + + print(pdf.SPARSE_ARRAY_V64) + +The code will print:: + + 0 [0.0, 4.1, 5.2] + Fill: 0.0 + IntIndex + Indices: ar... + 1 [9.3, 0.0, 0.0] + Fill: 0.0 + IntIndex + Indices: ar... + Name: SPARSE_ARRAY_V64, dtype: object diff --git a/doc/src/user_guide/vector_data_type.rst b/doc/src/user_guide/vector_data_type.rst index 9f2d1397..1f070c45 100644 --- a/doc/src/user_guide/vector_data_type.rst +++ b/doc/src/user_guide/vector_data_type.rst @@ -32,6 +32,9 @@ various storage formats mentioned above. For example: vec_data vector ) +If you are interested in using VECTOR data with data frames, see +:ref:`dfvector`. + .. _intfloatformat: Using FLOAT32, FLOAT64, and INT8 Vectors diff --git a/samples/dataframe_numpy.py b/samples/dataframe_numpy.py index 8bc7a476..4285720c 100644 --- a/samples/dataframe_numpy.py +++ b/samples/dataframe_numpy.py @@ -25,12 +25,14 @@ # ----------------------------------------------------------------------------- # dataframe_numpy.py # -# Shows how to use connection.fetch_df_all() to efficiently put data into a -# NumPy ndarray via the DLPack standard memory layout. +# Shows how to use connection.fetch_df_all() to put data into a NumPy ndarray # ----------------------------------------------------------------------------- -import pyarrow +import array +import sys + import numpy +import pyarrow import oracledb import sample_env @@ -46,11 +48,14 @@ params=sample_env.get_connect_params(), ) -SQL = "select id from SampleQueryTab order by id" +# ----------------------------------------------------------------------------- +# +# Fetching all records # Get an OracleDataFrame # Adjust arraysize to tune the query fetch performance -odf = connection.fetch_df_all(statement=SQL, arraysize=100) +sql = "select id from SampleQueryTab order by id" +odf = connection.fetch_df_all(statement=sql, arraysize=100) # Convert to an ndarray via the Python DLPack specification pyarrow_array = pyarrow.array(odf.get_column_by_name("ID")) @@ -62,10 +67,56 @@ print("Type:") print(type(np)) # -# Perform various numpy operations on the ndarray +print("Values:") +print(np) + +# Perform various NumPy operations on the ndarray print("\nSum:") print(numpy.sum(np)) print("\nLog10:") print(numpy.log10(np)) + +# ----------------------------------------------------------------------------- +# +# Fetching VECTORs + +# The VECTOR example only works with Oracle Database 23.4 or later +if sample_env.get_server_version() < (23, 4): + sys.exit("This example requires Oracle Database 23.4 or later.") + +# The VECTOR example works with thin mode, or with thick mode using Oracle +# Client 23.4 or later +if not connection.thin and oracledb.clientversion()[:2] < (23, 4): + sys.exit( + "This example requires python-oracledb thin mode, or Oracle Client" + " 23.4 or later" + ) + +# Insert sample data +rows = [ + (array.array("d", [11.25, 11.75, 11.5]),), + (array.array("d", [12.25, 12.75, 12.5]),), +] + +with connection.cursor() as cursor: + cursor.executemany("insert into SampleVectorTab (v64) values (:1)", rows) + +# Get an OracleDataFrame +# Adjust arraysize to tune the query fetch performance +sql = "select v64 from SampleVectorTab order by id" +odf = connection.fetch_df_all(statement=sql, arraysize=100) + +# Convert to a NumPy ndarray +pyarrow_array = pyarrow.array(odf.get_column_by_name("V64")) +np = pyarrow_array.to_numpy(zero_copy_only=False) + +print("Type:") +print(type(np)) # + +print("Values:") +print(np) + +print("\nSum:") +print(numpy.sum(np)) diff --git a/samples/dataframe_pandas.py b/samples/dataframe_pandas.py index f233703f..229fbf2f 100644 --- a/samples/dataframe_pandas.py +++ b/samples/dataframe_pandas.py @@ -29,6 +29,10 @@ # to create Pandas dataframes. # ----------------------------------------------------------------------------- +import array +import sys + +import numpy import pandas import pyarrow @@ -46,15 +50,14 @@ params=sample_env.get_connect_params(), ) -SQL = "select id, name from SampleQueryTab order by id" - # ----------------------------------------------------------------------------- # # Fetching all records # Get an OracleDataFrame. # Adjust arraysize to tune the query fetch performance -odf = connection.fetch_df_all(statement=SQL, arraysize=100) +sql = "select id, name from SampleQueryTab order by id" +odf = connection.fetch_df_all(statement=sql, arraysize=100) # Get a Pandas DataFrame from the data df = pyarrow.Table.from_arrays( @@ -88,7 +91,8 @@ # Tune 'size' for your data set. Here it is small to show the batch fetch # behavior on the sample table. -for odf in connection.fetch_df_batches(statement=SQL, size=10): +sql = "select id, name from SampleQueryTab order by id" +for odf in connection.fetch_df_batches(statement=sql, size=10): df_b = pyarrow.Table.from_arrays( odf.column_arrays(), names=odf.column_names() ).to_pandas() @@ -100,3 +104,80 @@ print("\nLast three rows:") print(df.tail(3)) + +# ----------------------------------------------------------------------------- +# +# Fetching VECTORs + +# The VECTOR example only works with Oracle Database 23.4 or later +if sample_env.get_server_version() < (23, 4): + sys.exit("This example requires Oracle Database 23.4 or later.") + +# The VECTOR example works with thin mode, or with thick mode using Oracle +# Client 23.4 or later +if not connection.thin and oracledb.clientversion()[:2] < (23, 4): + sys.exit( + "This example requires python-oracledb thin mode, or Oracle Client" + " 23.4 or later" + ) + +# Insert sample data +rows = [ + (array.array("d", [11.25, 11.75, 11.5]),), + (array.array("d", [12.25, 12.75, 12.5]),), +] + +with connection.cursor() as cursor: + cursor.executemany("insert into SampleVectorTab (v64) values (:1)", rows) + + +# Get an OracleDataFrame. +# Adjust arraysize to tune the query fetch performance +sql = "select id, v64 from SampleVectorTab order by id" +odf = connection.fetch_df_all(statement=sql, arraysize=100) + +# Get a Pandas DataFrame from the data +df = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() +).to_pandas() + +# Perform various Pandas operations on the DataFrame + +print("\nDataFrame:") +print(df) + +print("\nMean:") +print(pandas.DataFrame(df["V64"].tolist()).mean()) + +print("\nList:") +df2 = pandas.DataFrame(df["V64"].tolist()).T +print(df2) +print(df2.sum()) + +# You can manipulate vectors using Pandas's apply or list comprehension with +# NumPy for efficient array operations. + +# Scaling all vectors by a factor of two +print("\nScaled:") +df["SCALED_V64_COL"] = df["V64"].apply(lambda x: numpy.array(x) * 2) +print(df) + +# Calculating vector norms +# +# L2_NORM = Straight line distance from the origin to vector's endpoint +# L1_NORM = Sum of absolute values of the vector's components +# Linf_NORM = Largest absolute component of the vector; useful in scenarios +# where maximum deviation matters +print("\nNorms:") +df["L2_NORM"] = df["V64"].apply(lambda x: numpy.linalg.norm(x, ord=2)) +df["L1_NORM"] = df["V64"].apply(lambda x: numpy.linalg.norm(x, ord=1)) +df["Linf_NORM"] = df["V64"].apply( + lambda x: numpy.linalg.norm(x, ord=numpy.inf) +) +print(df) + +# Calculating the vector dot product with a reference vector +print("\nDot product:") +ref_vector = numpy.array([1, 10, 10]) +df["DOT_PRODUCT"] = df["V64"].apply(lambda x: numpy.dot(x, ref_vector)) +print(df) diff --git a/samples/dataframe_pandas_async.py b/samples/dataframe_pandas_async.py index 796da794..b7e49b8e 100644 --- a/samples/dataframe_pandas_async.py +++ b/samples/dataframe_pandas_async.py @@ -33,8 +33,11 @@ # other, synchronous, data frame samples. # ----------------------------------------------------------------------------- +import array import asyncio +import sys +import numpy import pandas import pyarrow @@ -105,5 +108,83 @@ async def main(): print("\nLast three rows:") print(df.tail(3)) + # ------------------------------------------------------------------------- + # + # Fetching VECTORs + + # The VECTOR example only works with Oracle Database 23.4 or later + if sample_env.get_server_version() < (23, 4): + sys.exit("This example requires Oracle Database 23.4 or later.") + + # The VECTOR example works with thin mode, or with thick mode using Oracle + # Client 23.4 or later + if not connection.thin and oracledb.clientversion()[:2] < (23, 4): + sys.exit( + "This example requires python-oracledb thin mode, or Oracle Client" + " 23.4 or later" + ) + + # Insert sample data + rows = [ + (array.array("d", [11.25, 11.75, 11.5]),), + (array.array("d", [12.25, 12.75, 12.5]),), + ] + + with connection.cursor() as cursor: + await cursor.executemany( + "insert into SampleVectorTab (v64) values (:1)", rows + ) + + # Get an OracleDataFrame. + # Adjust arraysize to tune the query fetch performance + sql = "select id, v64 from SampleVectorTab order by id" + odf = await connection.fetch_df_all(statement=sql, arraysize=100) + + # Get a Pandas DataFrame from the data + df = pyarrow.Table.from_arrays( + odf.column_arrays(), names=odf.column_names() + ).to_pandas() + + # Perform various Pandas operations on the DataFrame + + print("\nDataFrame:") + print(df) + + print("\nMean:") + print(pandas.DataFrame(df["V64"].tolist()).mean()) + + print("\nList:") + df2 = pandas.DataFrame(df["V64"].tolist()).T + print(df2) + print(df2.sum()) + + # You can manipulate vectors using Pandas's apply or list comprehension + # with NumPy for efficient array operations. + + # Scaling all vectors by a factor of two + print("\nScaled:") + df["SCALED_V64_COL"] = df["V64"].apply(lambda x: numpy.array(x) * 2) + print(df) + + # Calculating vector norms + # + # L2_NORM = Straight line distance from the origin to vector's endpoint + # L1_NORM = Sum of absolute values of the vector's components + # Linf_NORM = Largest absolute component of the vector; useful in scenarios + # where maximum deviation matters + print("\nNorms:") + df["L2_NORM"] = df["V64"].apply(lambda x: numpy.linalg.norm(x, ord=2)) + df["L1_NORM"] = df["V64"].apply(lambda x: numpy.linalg.norm(x, ord=1)) + df["Linf_NORM"] = df["V64"].apply( + lambda x: numpy.linalg.norm(x, ord=numpy.inf) + ) + print(df) + + # Calculating the vector dot product with a reference vector + print("\nDot product:") + ref_vector = numpy.array([1, 10, 10]) + df["DOT_PRODUCT"] = df["V64"].apply(lambda x: numpy.dot(x, ref_vector)) + print(df) + asyncio.run(main()) diff --git a/samples/sql/create_schema_23.sql b/samples/sql/create_schema_23.sql index 30daef5f..53226f02 100644 --- a/samples/sql/create_schema_23.sql +++ b/samples/sql/create_schema_23.sql @@ -32,6 +32,7 @@ *---------------------------------------------------------------------------*/ create table &main_user..SampleVectorTab ( + id number generated by default on null as identity primary key, v32 vector(3, float32), v64 vector(3, float64), v8 vector(3, int8), diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index c685495f..f00dc07a 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -978,6 +978,8 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, OracleData* data, const char* encoding_errors, bint from_dbobject) +cdef int convert_vector_to_arrow(OracleArrowArray arrow_array, + object vector) except -1 cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer) cdef uint16_t decode_uint16be(const char_type *buf) cdef uint32_t decode_uint32be(const char_type *buf) diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 3ed00048..3a83c94d 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -42,21 +42,28 @@ from libc.stdlib cimport atoi, atof from libc.string cimport memcpy from cpython cimport array +from .constants import VECTOR_META_FLAG_SPARSE_VECTOR + from .interchange.nanoarrow_bridge cimport ( NANOARROW_TIME_UNIT_SECOND, NANOARROW_TIME_UNIT_MILLI, NANOARROW_TIME_UNIT_MICRO, NANOARROW_TIME_UNIT_NANO, + NANOARROW_TYPE_NA, NANOARROW_TYPE_BOOL, NANOARROW_TYPE_BINARY, NANOARROW_TYPE_DECIMAL128, NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT8, NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LIST, NANOARROW_TYPE_LARGE_BINARY, NANOARROW_TYPE_LARGE_STRING, NANOARROW_TYPE_STRING, + NANOARROW_TYPE_STRUCT, NANOARROW_TYPE_TIMESTAMP, + NANOARROW_TYPE_UINT8, ) import array diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 3c434c13..df6ca6b4 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -320,6 +320,7 @@ def _raise_not_supported(feature: str) -> None: ERR_UNSUPPORTED_PIPELINE_OPERATION = 3028 ERR_INVALID_NETWORK_NAME = 3029 ERR_ARROW_UNSUPPORTED_DATA_TYPE = 3030 +ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT = 3031 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -899,4 +900,8 @@ def _raise_not_supported(feature: str) -> None: ERR_ARROW_C_API_ERROR: ( "Arrow C Data Interface operation failed with error code {code}" ), + ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT: ( + "flexible vector formats are not supported. Only fixed 'FLOAT32', " + "'FLOAT64', 'INT8' or 'BINARY' formats are supported" + ), } diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 0843b711..81809457 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -438,3 +438,18 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, errors._raise_err(errors.ERR_INCONSISTENT_DATATYPES, input_type=from_metadata.dbtype.name, output_type=to_metadata.dbtype.name) + + +cdef int convert_vector_to_arrow(OracleArrowArray arrow_array, + object vector) except -1: + """ + Converts the vector to the format required by the Arrow array. + """ + if vector is None: + arrow_array.append_null() + elif isinstance(vector, PY_TYPE_SPARSE_VECTOR): + arrow_array.append_sparse_vector(vector.num_dimensions, + vector.indices, + vector.values) + else: + arrow_array.append_vector( vector) diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 076e478b..f19ea81c 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -92,6 +92,11 @@ cdef class OracleMetadata: self._arrow_type = NANOARROW_TYPE_LARGE_STRING elif db_type_num == DB_TYPE_NUM_RAW: self._arrow_type = NANOARROW_TYPE_BINARY + elif db_type_num == DB_TYPE_NUM_VECTOR: + if self.vector_flags & VECTOR_META_FLAG_SPARSE_VECTOR: + self._arrow_type = NANOARROW_TYPE_STRUCT + else: + self._arrow_type = NANOARROW_TYPE_LIST else: errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_TYPE, db_type_name=self.dbtype.name) diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 5565972f..1fb79e64 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -252,7 +252,10 @@ cdef class BaseVarImpl: Creates an Arrow array based on the type information selected by the user. """ - cdef ArrowTimeUnit time_unit = NANOARROW_TIME_UNIT_SECOND + cdef: + ArrowTimeUnit time_unit = NANOARROW_TIME_UNIT_SECOND + ArrowType child_arrow_type = NANOARROW_TYPE_NA + self.metadata._set_arrow_type() if self.metadata._arrow_type == NANOARROW_TYPE_TIMESTAMP: if self.metadata.scale > 0 and self.metadata.scale <= 3: @@ -261,12 +264,29 @@ cdef class BaseVarImpl: time_unit = NANOARROW_TIME_UNIT_MICRO elif self.metadata.scale > 6 and self.metadata.scale <= 9: time_unit = NANOARROW_TIME_UNIT_NANO + + if self.metadata._arrow_type in ( + NANOARROW_TYPE_LIST, + NANOARROW_TYPE_STRUCT + ): + if self.metadata.vector_format == VECTOR_FORMAT_FLOAT32: + child_arrow_type = NANOARROW_TYPE_FLOAT + elif self.metadata.vector_format == VECTOR_FORMAT_FLOAT64: + child_arrow_type = NANOARROW_TYPE_DOUBLE + elif self.metadata.vector_format == VECTOR_FORMAT_INT8: + child_arrow_type = NANOARROW_TYPE_INT8 + elif self.metadata.vector_format == VECTOR_FORMAT_BINARY: + child_arrow_type = NANOARROW_TYPE_UINT8 + else: + errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT) + self._arrow_array = OracleArrowArray( arrow_type=self.metadata._arrow_type, name=self.metadata.name, precision=self.metadata.precision, scale=self.metadata.scale, time_unit=time_unit, + child_arrow_type=child_arrow_type, ) cdef int _finalize_init(self) except -1: diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 2d2d0183..ab0475b9 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -358,6 +358,7 @@ cdef class ThickVarImpl(BaseVarImpl): uint32_t ora_type_num OracleData ora_data dpiBytes *as_bytes + object vector ora_data.is_null = data.isNull if not data.isNull: ora_type_num = self._fetch_metadata.dbtype.num @@ -423,6 +424,9 @@ cdef class ThickVarImpl(BaseVarImpl): memcpy(ora_data.buffer.as_number.chars, as_bytes.ptr, as_bytes.length); ora_data.buffer.as_number.num_chars = as_bytes.length; + elif ora_type_num == DPI_ORACLE_TYPE_VECTOR: + vector = _convert_vector_to_python(data.value.asVector) + return convert_vector_to_arrow(self._arrow_array, vector) else: errors._raise_err(errors.ERR_DB_TYPE_NOT_SUPPORTED, name=self._fetch_metadata.dbtype.name) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 90a7f048..4bb98aa4 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -971,6 +971,10 @@ cdef class MessageWithData(Message): column_value = buf.read_oson() elif ora_type_num == ORA_TYPE_NUM_VECTOR: column_value = buf.read_vector() + if self.cursor_impl.fetching_arrow: + convert_vector_to_arrow( + var_impl._arrow_array, column_value + ) elif ora_type_num == ORA_TYPE_NUM_OBJECT: typ_impl = metadata.objtype if typ_impl is None: diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/interchange/nanoarrow_bridge.pxd index 5c413f22..4a03523d 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/interchange/nanoarrow_bridge.pxd @@ -33,6 +33,7 @@ from libc.stdint cimport int8_t, uint8_t, int16_t, uint16_t from libc.stdint cimport int32_t, uint32_t, int64_t, uint64_t +from cpython cimport array cdef extern from "nanoarrow.h": @@ -47,6 +48,7 @@ cdef extern from "nanoarrow.h": void (*release)(ArrowArray*) cdef struct ArrowSchema: + ArrowSchema** children void (*release)(ArrowSchema*) cpdef enum ArrowType: @@ -55,11 +57,17 @@ cdef extern from "nanoarrow.h": NANOARROW_TYPE_DECIMAL128 NANOARROW_TYPE_DOUBLE NANOARROW_TYPE_FLOAT + NANOARROW_TYPE_INT8 NANOARROW_TYPE_INT64 NANOARROW_TYPE_LARGE_BINARY NANOARROW_TYPE_LARGE_STRING + NANOARROW_TYPE_LIST + NANOARROW_TYPE_NA NANOARROW_TYPE_STRING + NANOARROW_TYPE_STRUCT NANOARROW_TYPE_TIMESTAMP + NANOARROW_TYPE_UINT8 + NANOARROW_TYPE_UINT32 NANOARROW_TYPE_UNINITIALIZED cpdef enum ArrowTimeUnit: @@ -91,6 +99,7 @@ cdef class OracleArrowArray: double factor ArrowArray *arrow_array ArrowSchema *arrow_schema + ArrowType child_arrow_type cdef str _schema_to_string(self) cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 @@ -100,4 +109,8 @@ cdef class OracleArrowArray: cdef int append_int64(self, int64_t value) except -1 cdef int append_last_value(self, OracleArrowArray array) except -1 cdef int append_null(self) except -1 + cdef int append_sparse_vector(self, int64_t num_dimensions, + array.array indices, + array.array values) except -1 + cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index 9e756819..e259d0cd 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -85,6 +85,10 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, ArrowError* error) + ArrowErrorCode ArrowArrayFinishElement(ArrowArray *array) + ArrowErrorCode ArrowArrayInitFromSchema(ArrowArray *array, + ArrowSchema *schema, + ArrowError *error) ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, ArrowType storage_type) void ArrowArrayRelease(ArrowArray *array) @@ -108,10 +112,13 @@ cdef extern from "nanoarrow/nanoarrow.c": ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) void ArrowSchemaRelease(ArrowSchema *schema) ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) + ArrowErrorCode ArrowSchemaSetType(ArrowSchema * schema, ArrowType type) ArrowErrorCode ArrowSchemaSetTypeDateTime(ArrowSchema* schema, ArrowType arrow_type, ArrowTimeUnit time_unit, const char* timezone) + ArrowErrorCode ArrowSchemaSetTypeStruct(ArrowSchema *schema, + int64_t n_children) ArrowErrorCode ArrowSchemaSetTypeDecimal(ArrowSchema* schema, ArrowType type, int32_t decimal_precision, @@ -146,6 +153,79 @@ cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: cpython.PyMem_Free(schema) +cdef int append_double_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of doubles to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + double *double_buf = value.data.as_doubles + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendDouble(child_array, double_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_float_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of floats to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + float *float_buf = value.data.as_floats + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendDouble(child_array, float_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_int8_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of signed one-byte integers to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + int8_t *int8_buf = value.data.as_schars + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, int8_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_uint8_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of unsigned one-byte integers to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + uint8_t *uint8_buf = value.data.as_uchars + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, uint8_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_uint32_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of unsigned four-byte integers to the Arrow array. Note + that Python's array.array doesn't natively support uint32_t but an upper + layer has verified that the data in the buffer consists of only four byte + integers. + """ + cdef: + uint32_t *uint32_buf = value.data.as_voidptr + ArrowArray *child_array = arrow_array.children[0] + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, uint32_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + cdef void arrow_buffer_dealloc_callback(ArrowBufferAllocator *allocator, uint8_t *ptr, int64_t size): """ @@ -203,13 +283,59 @@ cdef int copy_arrow_array(OracleArrowArray oracle_arrow_array, oracle_arrow_array, src.children[i], dest.children[i] ) +cdef int build_arrow_schema_for_sparse_vector( + ArrowSchema *schema, + ArrowType vector_value_type +) except -1: + + # Initialize struct with 3 fields - num_dimensions, indices, values + ArrowSchemaInit(schema) + _check_nanoarrow(ArrowSchemaSetTypeStruct(schema, 3)) + + # first child: "num_dimensions" + _check_nanoarrow( + ArrowSchemaSetType(schema.children[0], NANOARROW_TYPE_INT64) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[0], "num_dimensions")) + + # second child: "indices" + _check_nanoarrow(ArrowSchemaSetType( + schema.children[1], + NANOARROW_TYPE_LIST + ) + ) + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[1].children[0], + NANOARROW_TYPE_UINT32 + ) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[1], "indices")) + + # third child: "values" + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[2], + NANOARROW_TYPE_LIST + ) + ) + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[2].children[0], + vector_value_type + ) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[2], "values")) + cdef class OracleArrowArray: def __cinit__(self, ArrowType arrow_type, str name, int8_t precision, - int8_t scale, ArrowTimeUnit time_unit): + int8_t scale, ArrowTimeUnit time_unit, + ArrowType child_arrow_type): cdef ArrowType storage_type = arrow_type self.arrow_type = arrow_type + self.child_arrow_type = child_arrow_type self.time_unit = time_unit self.name = name self.arrow_array = \ @@ -225,25 +351,71 @@ cdef class OracleArrowArray: else: self.factor = 1 - _check_nanoarrow(ArrowArrayInitFromType(self.arrow_array, - storage_type)) self.arrow_schema = \ cpython.PyMem_Malloc(sizeof(ArrowSchema)) - _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) if arrow_type == NANOARROW_TYPE_DECIMAL128: self.precision = precision self.scale = scale ArrowSchemaInit(self.arrow_schema) - _check_nanoarrow(ArrowSchemaSetTypeDecimal(self.arrow_schema, - arrow_type, - precision, scale)) + _check_nanoarrow( + ArrowSchemaSetTypeDecimal( + self.arrow_schema, + arrow_type, + precision, + scale + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + # Currently struct is used for Sparse vector only + build_arrow_schema_for_sparse_vector(self.arrow_schema, + child_arrow_type) else: - _check_nanoarrow(ArrowSchemaInitFromType(self.arrow_schema, - storage_type)) + _check_nanoarrow( + ArrowSchemaInitFromType( + self.arrow_schema, + storage_type + ) + ) if arrow_type == NANOARROW_TYPE_TIMESTAMP: - _check_nanoarrow(ArrowSchemaSetTypeDateTime(self.arrow_schema, - arrow_type, - time_unit, NULL)) + _check_nanoarrow( + ArrowSchemaSetTypeDateTime( + self.arrow_schema, + arrow_type, + time_unit, + NULL + ) + ) + if arrow_type == NANOARROW_TYPE_LIST: + # Set the schema for child using child_arrow_type + _check_nanoarrow( + ArrowSchemaSetType( + self.arrow_schema.children[0], + child_arrow_type + ) + ) + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + else: # primitive type array init + _check_nanoarrow( + ArrowArrayInitFromType( + self.arrow_array, + storage_type + ) + ) + _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) def __dealloc__(self): @@ -412,6 +584,50 @@ cdef class OracleArrowArray: """ _check_nanoarrow(ArrowArrayAppendNull(self.arrow_array, 1)) + cdef int append_vector(self, array.array value) except -1: + """ + Append a vector to the array. + """ + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + append_float_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + append_double_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + append_int8_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + append_uint8_array(self.arrow_array, value) + + cdef int append_sparse_vector(self, + int64_t num_dims, + array.array indices, + array.array values) except -1: + """ + Append a sparse vector to the array. + """ + cdef: + ArrowArray *num_dims_array = self.arrow_array.children[0] + ArrowArray *indices_array = self.arrow_array.children[1] + ArrowArray *values_array = self.arrow_array.children[2] + + # append number of dimensions + _check_nanoarrow(ArrowArrayAppendInt(num_dims_array, num_dims)) + + # append indices array + append_uint32_array(indices_array, indices) + + # append values array + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + append_float_array(values_array, values) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + append_double_array(values_array, values) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + append_int8_array(values_array, values) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + append_uint8_array(values_array, values) + + # indicate structure is completed + _check_nanoarrow(ArrowArrayFinishElement(self.arrow_array)) + cdef int finish_building(self) except -1: """ Finish building the array. No more data will be added to it. diff --git a/src/oracledb/thick_impl.pyx b/src/oracledb/thick_impl.pyx index 8dfe11f1..4c763d7d 100644 --- a/src/oracledb/thick_impl.pyx +++ b/src/oracledb/thick_impl.pyx @@ -67,6 +67,7 @@ from .base_impl cimport ( char_type, ConnectParamsImpl, convert_oracle_data_to_arrow, + convert_vector_to_arrow, DbType, DB_TYPE_NUM_CURSOR, DRIVER_NAME, diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index bb03a29d..157a22e7 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -111,6 +111,7 @@ from .base_impl cimport ( ConnectParamsImpl, convert_oracle_data_to_python, convert_oracle_data_to_arrow, + convert_vector_to_arrow, convert_date_to_python, CS_FORM_IMPLICIT, CS_FORM_NCHAR, diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index d1f10497..95c8b549 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -26,6 +26,7 @@ Module for testing dataframes """ +import array import datetime import decimal import unittest @@ -33,8 +34,9 @@ import oracledb try: - import pyarrow + import numpy import pandas + import pyarrow HAS_INTEROP = True except ImportError: @@ -308,6 +310,23 @@ def __convert_to_df(self, data): pa_tab.validate(full=True) return pa_tab.to_pandas() + def __convert_df_value(self, df_val): + """ + This method converts a dataframe cell value to use with assertEqual() + For e.g. NaN and np.array cannot be compared directly. Values are + converted according to the following rules: + - NaN -> None + - np.array -> np.array.tolist() (Python list) + """ + if isinstance(df_val, numpy.ndarray): + return df_val.tolist() + elif pandas.isna(df_val): + return None + elif isinstance(df_val, dict): + return {k: self.__convert_df_value(v) for k, v in df_val.items()} + else: + return df_val + def __get_data_from_df(self, df): """ Returns data from the data frame in a normalized fashion suitable for @@ -315,7 +334,7 @@ def __get_data_from_df(self, df): so they are converted to the value None for comparison purposes. """ return [ - tuple(None if pandas.isna(v) else v for v in row) + tuple(self.__convert_df_value(v) for v in row) for row in df.itertuples(index=False, name=None) ] @@ -748,6 +767,358 @@ def test_8029(self): self.assertIsNotNone(buffers["offsets"]) self.assertIsNotNone(buffers["validity"]) + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8030(self): + "8030 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + # number of children for a nested list = 1 + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8031(self): + "8031 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8, 55.9],), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT64) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8032(self): + "8032 - fetch int8 vector" + data = [ + ([34, -77],), + ([34, 77, 55],), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34, -77]', 2, INT8) + union all + SELECT TO_VECTOR('[34, 77, 55]', 3, INT8) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8033(self): + "8033 - fetch binary vector" + data = [ + ([3, 2, 3],), + ([3, 2],), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[3, 2, 3]', 24, BINARY) + union all + SELECT TO_VECTOR('[3, 2]', 16, BINARY) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8034(self): + "8034 - fetch float32 vectors with None" + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + (None,), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) + union all + select NULL + """ + ) + self.assertEqual(ora_df.num_rows(), 3) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 1) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8035(self): + "8035 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + """ + ) + self.assertEqual(ora_df.num_rows(), 12) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 7), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 7), "unsupported server" + ) + def test_8036(self): + "8036 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT32), + 8, + FLOAT32, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT32), + 8, + FLOAT32, + SPARSE + ) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + # number of children for a struct = 3 (num_dimensions, indices, values) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 7), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 7), "unsupported server" + ) + def test_8037(self): + "8037 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + self.__check_interop() + ora_df = self.conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + # number of children for a struct = 3 (num_dimensions, indices, values) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) + def test_8038(self): + "8038 - DPY-3031 - Unsupported flexible vector formats" + with self.assertRaisesFullCode("DPY-3031"): + self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[44, 55, 89]', 3, INT8) as flex_col + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) as flex_col + """ + ) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index a5959bd8..6ce27933 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -26,6 +26,7 @@ Module for testing dataframes using asyncio. """ +import array import datetime import decimal import unittest @@ -33,8 +34,9 @@ import oracledb try: - import pyarrow + import numpy import pandas + import pyarrow HAS_INTEROP = True except ImportError: @@ -311,6 +313,23 @@ def __convert_to_df(self, data): pa_tab.validate(full=True) return pa_tab.to_pandas() + def __convert_df_value(self, df_val): + """ + This method converts a dataframe cell value to use with assertEqual() + For e.g. NaN and np.array cannot be compared directly. Values are + converted according to the following rules: + - NaN -> None + - np.array -> np.array.tolist() (Python list) + """ + if isinstance(df_val, numpy.ndarray): + return df_val.tolist() + elif pandas.isna(df_val): + return None + elif isinstance(df_val, dict): + return {k: self.__convert_df_value(v) for k, v in df_val.items()} + else: + return df_val + def __get_data_from_df(self, df): """ Returns data from the data frame in a normalized fashion suitable for @@ -318,7 +337,7 @@ def __get_data_from_df(self, df): so they are converted to the value None for comparison purposes. """ return [ - tuple(None if pandas.isna(v) else v for v in row) + tuple(self.__convert_df_value(v) for v in row) for row in df.itertuples(index=False, name=None) ] @@ -586,8 +605,95 @@ async def test_8122(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + @unittest.skipUnless( + test_env.has_client_version(23, 4), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 4), "unsupported server" + ) async def test_8123(self): - "8123 - fetch data with multiple rows containing null values" + "8123 - fetch float32 vector" + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + + # number of children for a nested list = 1 + self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @unittest.skipUnless( + test_env.has_client_version(23, 7), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 7), "unsupported server" + ) + async def test_8124(self): + "8124 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + self.__check_interop() + ora_df = await self.conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + """ + ) + self.assertEqual(ora_df.num_rows(), 2) + self.assertEqual(ora_df.num_columns(), 1) + ora_col = ora_df.get_column(0) + self.assertEqual(ora_col.null_count, 0) + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + # number of children for a struct = 3 (num_dimensions, indices, values) + self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) + fetched_df = fetched_tab.to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_8125(self): + "8125 - fetch data with multiple rows containing null values" self.__check_interop() ora_df = await self.conn.fetch_df_all( """ @@ -628,8 +734,8 @@ async def test_8123(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - async def test_8124(self): - "8124 - verify dtype for all Arrow types" + async def test_8126(self): + "8126 - verify dtype for all Arrow types" query = """ select cast(1 as number(10)) as col_int64, From 883c14859c286d6f1070ffd35e58f51906b70a5b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:55:43 -0600 Subject: [PATCH 115/239] Removed dead code. --- src/oracledb/impl/thin/queue.pyx | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 4fcb6747..61cb3b38 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -506,8 +506,6 @@ cdef class ThinMsgPropsImpl(BaseMsgPropsImpl): """ Internal method for setting the payload from an object. """ - if not isinstance(value, ThinDbObjectImpl): - raise TypeError("Expected ThinDbObjectImpl instance.") self.payload_obj = value def set_payload_json(self, object json_val): From c8678fa68da5478486bd9d68748c05009fbd4f7a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:56:39 -0600 Subject: [PATCH 116/239] Fixed bug using AQ when attempting to dequeue with an invalid message id. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/messages/aq_deq.pyx | 6 +++++- tests/test_7800_aq_raw.py | 16 ++++++++++++++++ tests/test_7900_aq_raw_async.py | 16 ++++++++++++++++ 4 files changed, 39 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 5a758832..4a903736 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -42,6 +42,8 @@ Thin Mode Changes :attr:`DeqOptions.correlation` for buffered delivery mode. #) Fixed bug when fetching multiple consecutive null values into a :ref:`data frame `. +#) Fixed bug using :ref:`Oracle Advanced Queuing ` when + attempting to dequeue using an invalid :attr:`DeqOptions.msgid`. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/aq_deq.pyx b/src/oracledb/impl/thin/messages/aq_deq.pyx index 83428ffc..36ade017 100644 --- a/src/oracledb/impl/thin/messages/aq_deq.pyx +++ b/src/oracledb/impl/thin/messages/aq_deq.pyx @@ -67,6 +67,7 @@ cdef class AqDeqMessage(AqBaseMessage): bytes consumer_name_bytes bytes correlation_bytes bytes condition_bytes + bytes msgid_bytes uint16_t delivery_mode int deq_flags self._write_function_code(buf) @@ -135,7 +136,10 @@ cdef class AqDeqMessage(AqBaseMessage): if consumer_name_bytes is not None: buf.write_bytes_with_length(consumer_name_bytes) if self.deq_options_impl.msgid: - buf.write_bytes(self.deq_options_impl.msgid) + msgid_bytes = self.deq_options_impl.msgid[:16] + if len(msgid_bytes) < 16: + msgid_bytes += bytes(16 - len(msgid_bytes)) + buf.write_bytes(msgid_bytes) if correlation_bytes is not None: buf.write_bytes_with_length(correlation_bytes) buf.write_bytes(self.queue_impl.payload_toid) diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index b1343904..be18d1bf 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -482,6 +482,22 @@ def test_7829(self): self.conn.commit() self.assertEqual(msg.payload, value) + def test_7830(self): + "7830 - test deq options with msgid > 16 bytes" + queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.msgid = b"invalid_msgid_123456789" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with self.assertRaisesFullCode("ORA-25263"): + queue.deqone() + + def test_7831(self): + "7831 - test deq options with msgid < 16 bytes" + queue = self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.msgid = b"short_msgid" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with self.assertRaisesFullCode("ORA-25263"): + queue.deqone() + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index af7808bb..90621c51 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -421,6 +421,22 @@ async def test_7925(self): await self.conn.commit() self.assertEqual(msg.payload, value) + async def test_7926(self): + "7926 - test deq options with msgid > 16 bytes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.msgid = b"invalid_msgid_123456789" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with self.assertRaisesFullCode("ORA-25263"): + await queue.deqone() + + async def test_7927(self): + "7927 - test deq options with msgid < 16 bytes" + queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + queue.deqoptions.msgid = b"short_msgid" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with self.assertRaisesFullCode("ORA-25263"): + await queue.deqone() + if __name__ == "__main__": test_env.run_test_cases() From 4bbc3203be88c1eb74b3ad42023ef8673b090cc6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:57:11 -0600 Subject: [PATCH 117/239] Fixed bug resulting in an exception when Cursor.executemany() is called with bind variables and an empty parameter set (#508). --- doc/src/release_notes.rst | 3 ++ src/oracledb/cursor.py | 14 ++++++---- src/oracledb/impl/thick/cursor.pyx | 31 ++++++++++----------- tests/test_4000_cursor_executemany.py | 4 +++ tests/test_6100_cursor_executemany_async.py | 5 ++++ 5 files changed, 35 insertions(+), 22 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 4a903736..16ee99be 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -44,6 +44,9 @@ Thin Mode Changes frame `. #) Fixed bug using :ref:`Oracle Advanced Queuing ` when attempting to dequeue using an invalid :attr:`DeqOptions.msgid`. +#) Fixed bug resulting in an exception when :meth:`Cursor.executemany()` is + called with SQL containing bind variables and an empty parameter set + (`issue 508 `__). Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index 08fa1a51..596b2e20 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -755,9 +755,10 @@ def executemany( num_execs = self._impl._prepare_for_executemany( self, statement, parameters ) - self._impl.executemany( - self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) - ) + if num_execs > 0: + self._impl.executemany( + self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) + ) def fetchall(self) -> list: """ @@ -1007,9 +1008,10 @@ async def executemany( num_execs = self._impl._prepare_for_executemany( self, statement, parameters ) - await self._impl.executemany( - self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) - ) + if num_execs > 0: + await self._impl.executemany( + self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) + ) async def fetchall(self) -> list: """ diff --git a/src/oracledb/impl/thick/cursor.pyx b/src/oracledb/impl/thick/cursor.pyx index 2373dace..600fd591 100644 --- a/src/oracledb/impl/thick/cursor.pyx +++ b/src/oracledb/impl/thick/cursor.pyx @@ -346,22 +346,21 @@ cdef class ThickCursorImpl(BaseCursorImpl): if self.bind_vars is not None: self._perform_binds(cursor.connection, num_execs_int) - if num_execs_int > 0: - with nogil: - status = dpiStmt_executeMany(self._handle, mode, num_execs_int) - dpiContext_getError(driver_info.context, &error_info) - dpiStmt_getRowCount(self._handle, &rowcount) - if not self._stmt_info.isPLSQL: - self.rowcount = rowcount - if status < 0: - error = _create_new_from_info(&error_info) - if self._stmt_info.isPLSQL and error_info.offset == 0: - error.offset = rowcount - raise error.exc_type(error) - elif error_info.isWarning: - self.warning = _create_new_from_info(&error_info) - if self._stmt_info.isReturning or self._stmt_info.isPLSQL: - self._transform_binds() + with nogil: + status = dpiStmt_executeMany(self._handle, mode, num_execs_int) + dpiContext_getError(driver_info.context, &error_info) + dpiStmt_getRowCount(self._handle, &rowcount) + if not self._stmt_info.isPLSQL: + self.rowcount = rowcount + if status < 0: + error = _create_new_from_info(&error_info) + if self._stmt_info.isPLSQL and error_info.offset == 0: + error.offset = rowcount + raise error.exc_type(error) + elif error_info.isWarning: + self.warning = _create_new_from_info(&error_info) + if self._stmt_info.isReturning or self._stmt_info.isPLSQL: + self._transform_binds() def get_array_dml_row_counts(self): """ diff --git a/tests/test_4000_cursor_executemany.py b/tests/test_4000_cursor_executemany.py index d5df07e1..dba50e41 100644 --- a/tests/test_4000_cursor_executemany.py +++ b/tests/test_4000_cursor_executemany.py @@ -437,6 +437,10 @@ def test_4027(self): with self.assertRaisesFullCode("DPY-2016"): self.cursor.executemany(None, 4) + def test_4028(self): + "4028 - test executemany with empty parameter set" + self.cursor.executemany("insert into TestTempTable values (:1)", []) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6100_cursor_executemany_async.py b/tests/test_6100_cursor_executemany_async.py index 28c9b7b0..8b4d35f5 100644 --- a/tests/test_6100_cursor_executemany_async.py +++ b/tests/test_6100_cursor_executemany_async.py @@ -378,6 +378,11 @@ async def test_6123(self): with self.assertRaisesFullCode("DPY-2001"): await cursor.executemany(None, [1, 2]) + async def test_6124(self): + "6124 - test executemany with empty parameter set" + sql = "insert into TestTempTable values (:1)" + await self.cursor.executemany(sql, []) + if __name__ == "__main__": test_env.run_test_cases() From 6e86db901c6998775d757a85357374a47b211788 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:58:14 -0600 Subject: [PATCH 118/239] Avoid segfault when fetching sparse vectors with flexible dimensions. --- doc/src/user_guide/dataframes.rst | 3 +- src/oracledb/errors.py | 5 ++++ src/oracledb/interchange/nanoarrow_bridge.pyx | 24 +++++++++------- tests/test_8000_dataframe.py | 28 +++++++++++++++++++ 4 files changed, 49 insertions(+), 11 deletions(-) diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 2a6f11b3..3c1b35ff 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -174,7 +174,8 @@ When converting Oracle Database VECTORs: ``indices`` and ``values`` similar to :ref:`SparseVector objects `. -- VECTOR columns with flexible dimensions are supported. +- Fixed and flexible dimensions are supported for dense VECTOR columns. For + sparse VECTOR columns, the dimension of each vector must be the same. - VECTOR columns with flexible formats are not supported. Each vector value must have the same storage format data type. diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index df6ca6b4..1f066201 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -288,6 +288,7 @@ def _raise_not_supported(feature: str) -> None: ERR_PAYLOAD_CANNOT_BE_ENQUEUED = 2062 ERR_SCROLL_OUT_OF_RESULT_SET = 2063 ERR_POOL_MAX_LESS_THAN_MIN = 2064 +ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED = 2065 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -893,6 +894,10 @@ def _raise_not_supported(feature: str) -> None: ERR_INVALID_NETWORK_NAME: ( '"{name}" includes characters that are not allowed' ), + ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED: ( + "Apache Arrow format does not support sparse vectors with flexible " + "dimensions" + ), ERR_ARROW_UNSUPPORTED_DATA_TYPE: ( "conversion from Oracle Database type {db_type_name} to Apache " "Arrow format is not supported" diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx index e259d0cd..461d7113 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ b/src/oracledb/interchange/nanoarrow_bridge.pyx @@ -604,26 +604,30 @@ cdef class OracleArrowArray: """ Append a sparse vector to the array. """ - cdef: - ArrowArray *num_dims_array = self.arrow_array.children[0] - ArrowArray *indices_array = self.arrow_array.children[1] - ArrowArray *values_array = self.arrow_array.children[2] + cdef ArrowArray *array + + # validate that the array supports sparse vectors + if self.arrow_type != NANOARROW_TYPE_STRUCT: + errors._raise_err(errors.ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED) # append number of dimensions - _check_nanoarrow(ArrowArrayAppendInt(num_dims_array, num_dims)) + array = self.arrow_array.children[0] + _check_nanoarrow(ArrowArrayAppendInt(array, num_dims)) # append indices array - append_uint32_array(indices_array, indices) + array = self.arrow_array.children[1] + append_uint32_array(array, indices) # append values array + array = self.arrow_array.children[2] if self.child_arrow_type == NANOARROW_TYPE_FLOAT: - append_float_array(values_array, values) + append_float_array(array, values) elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: - append_double_array(values_array, values) + append_double_array(array, values) elif self.child_arrow_type == NANOARROW_TYPE_INT8: - append_int8_array(values_array, values) + append_int8_array(array, values) elif self.child_arrow_type == NANOARROW_TYPE_UINT8: - append_uint8_array(values_array, values) + append_uint8_array(array, values) # indicate structure is completed _check_nanoarrow(ArrowArrayFinishElement(self.arrow_array)) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 95c8b549..d48e1fd4 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -1119,6 +1119,34 @@ def test_8038(self): """ ) + @unittest.skipUnless( + test_env.has_client_version(23, 7), "unsupported client" + ) + @unittest.skipUnless( + test_env.has_server_version(23, 7), "unsupported server" + ) + def test_8039(self): + "8039 - DPY-4007 -fetch sparse vectors with flexible dimensions" + self.__check_interop() + with self.assertRaisesFullCode("DPY-2065"): + self.conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 77.8]', 7, FLOAT64), + 7, + FLOAT64, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + """ + ) + if __name__ == "__main__": test_env.run_test_cases() From d66fba83211a675e1afe72a38e6a3fc3205b4fc9 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:59:28 -0600 Subject: [PATCH 119/239] Test updates. --- tests/test_3000_subscription.py | 1 + tests/test_8000_dataframe.py | 2 +- tests/test_8100_dataframe_async.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/tests/test_3000_subscription.py b/tests/test_3000_subscription.py index 1b33f869..88c275cd 100644 --- a/tests/test_3000_subscription.py +++ b/tests/test_3000_subscription.py @@ -222,6 +222,7 @@ def test_3002(self): ) def test_3003(self): "3003 - test verifying what registerquery returns" + self.skipTest("fails intermittently") data = DMLSubscriptionData(5) qos_constants = [ oracledb.SUBSCR_QOS_QUERY, diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index d48e1fd4..369b7ce3 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -716,7 +716,7 @@ def test_8028(self): from dual """ decimal_query = ( - "select cast(123.45 as decimal(10, 2)) as col_decimal128" + "select cast(123.45 as decimal(10, 2)) as col_decimal128 from dual" ) # determine dtype kind enumeration diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 6ce27933..2737d9d2 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -756,7 +756,7 @@ async def test_8126(self): from dual """ decimal_query = ( - "select cast(123.45 as decimal(10, 2)) as col_decimal128" + "select cast(123.45 as decimal(10, 2)) as col_decimal128 from dual" ) # determine dtype kind enumeration From e2323c0c264266c05eb3e8355a9d6173636b1886 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 10:59:52 -0600 Subject: [PATCH 120/239] Avoid exception due to timing issue on interpreter shutdown. --- src/oracledb/impl/thin/pool.pyx | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/src/oracledb/impl/thin/pool.pyx b/src/oracledb/impl/thin/pool.pyx index 77c1bbc6..c93f8507 100644 --- a/src/oracledb/impl/thin/pool.pyx +++ b/src/oracledb/impl/thin/pool.pyx @@ -429,12 +429,13 @@ cdef class BaseThinPoolImpl(BasePoolImpl): """ cdef Protocol protocol = conn_impl._protocol with self._condition: - try: - protocol._end_request(conn_impl) - except: - if not in_del: - raise - self._return_connection_helper(conn_impl) + if self._open: + try: + protocol._end_request(conn_impl) + except: + if not in_del: + raise + self._return_connection_helper(conn_impl) def set_getmode(self, uint32_t value): """ From 2325059859ca9b4a517b8bfae7fa3c4f5b536ab2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 26 Jun 2025 15:28:57 -0600 Subject: [PATCH 121/239] Preparing to release python-oracledb 3.2.0. --- doc/src/release_notes.rst | 4 ++-- src/oracledb/version.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 16ee99be..64e55048 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,8 +11,8 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. -oracledb `3.2.0 `__ (TBD) --------------------------------------------------------------------------------------------- +oracledb `3.2.0 `__ (June 2025) +-------------------------------------------------------------------------------------------------- Thin Mode Changes +++++++++++++++++ diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 0aa11aed..5a4521a9 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.2.0b1" +__version__ = "3.2.0" From afa5ba66c0a9ee84f9bd9268e5079f9a916e3333 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:38:39 -0600 Subject: [PATCH 122/239] Bump version in preparation for new changes. --- doc/src/release_notes.rst | 13 +++++++++++++ src/oracledb/version.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 64e55048..6df9e972 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,6 +11,19 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. +oracledb `3.3.0 `__ (TBD) +-------------------------------------------------------------------------------------------------- + +Thin Mode Changes ++++++++++++++++++ + +Thick Mode Changes +++++++++++++++++++ + +Common Changes +++++++++++++++ + + oracledb `3.2.0 `__ (June 2025) -------------------------------------------------------------------------------------------------- diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 5a4521a9..9a33e6af 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.2.0" +__version__ = "3.3.0b1" From 916ca5dcb0babba4718fde8c712dbf16213ed95a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:39:26 -0600 Subject: [PATCH 123/239] Improve thin mode error message for connection pool tagging. --- doc/src/release_notes.rst | 5 +++++ src/oracledb/impl/thin/pool.pyx | 4 ++-- tests/test_7300_unsupported_features_thin.py | 6 ++++++ 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 6df9e972..e2fa0dcd 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -17,6 +17,11 @@ oracledb `3.3.0 Date: Fri, 11 Jul 2025 11:40:01 -0600 Subject: [PATCH 124/239] Simplify code. --- src/oracledb/impl/thin/messages/base.pyx | 46 +++++++++++++----------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 4bb98aa4..581c74e4 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -216,6 +216,28 @@ cdef class Message: if not buf._caps.supports_end_of_response: self.end_of_response = True + cdef int _process_keyword_value_pairs(self, ReadBuffer buf, + uint16_t num_pairs) except -1: + """ + Processes the keyword/value pairs returned by the server. + """ + cdef: + uint16_t i, num_bytes, keyword_num + bytes text_value, binary_value + for i in range(num_pairs): + text_value = binary_value = None + buf.read_ub2(&num_bytes) # text value + if num_bytes > 0: + text_value = buf.read_bytes() + buf.read_ub2(&num_bytes) # binary value + if num_bytes > 0: + binary_value = buf.read_bytes() + buf.read_ub2(&keyword_num) # keyword num + if keyword_num == TNS_KEYWORD_NUM_CURRENT_SCHEMA: + self.conn_impl._current_schema = text_value.decode() + elif keyword_num == TNS_KEYWORD_NUM_EDITION: + self.conn_impl._edition = text_value.decode() + cdef int _process_message(self, ReadBuffer buf, uint8_t message_type) except -1: cdef uint64_t token_num @@ -342,14 +364,7 @@ cdef class Message: buf.skip_ub1() # skip length of DTYs buf.read_ub2(&num_elements) buf.skip_ub1() # skip length - for i in range(num_elements): - buf.read_ub2(&temp16) - if temp16 > 0: # skip key - buf.skip_raw_bytes_chunked() - buf.read_ub2(&temp16) - if temp16 > 0: # skip value - buf.skip_raw_bytes_chunked() - buf.skip_ub2() # skip flags + self._process_keyword_value_pairs(buf, num_elements) buf.skip_ub4() # skip overall flags elif opcode == TNS_SERVER_PIGGYBACK_EXT_SYNC: buf.skip_ub2() # skip number of DTYs @@ -1172,7 +1187,7 @@ cdef class MessageWithData(Message): cdef int _process_return_parameters(self, ReadBuffer buf) except -1: cdef: - uint16_t keyword_num, num_params, num_bytes + uint16_t num_params, num_bytes uint32_t num_rows, i uint64_t rowcount bytes key_value @@ -1184,18 +1199,7 @@ cdef class MessageWithData(Message): if num_bytes > 0: buf.skip_raw_bytes(num_bytes) buf.read_ub2(&num_params) # num key/value pairs - for i in range(num_params): - buf.read_ub2(&num_bytes) # key - if num_bytes > 0: - key_value = buf.read_bytes() - buf.read_ub2(&num_bytes) # value - if num_bytes > 0: - buf.skip_raw_bytes_chunked() - buf.read_ub2(&keyword_num) # keyword num - if keyword_num == TNS_KEYWORD_NUM_CURRENT_SCHEMA: - self.conn_impl._current_schema = key_value.decode() - elif keyword_num == TNS_KEYWORD_NUM_EDITION: - self.conn_impl._edition = key_value.decode() + self._process_keyword_value_pairs(buf, num_params) buf.read_ub2(&num_bytes) # registration if num_bytes > 0: buf.skip_raw_bytes(num_bytes) From ab59c5f48714bc7fb6fc1b49087ae0d2af2be708 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:41:48 -0600 Subject: [PATCH 125/239] Test improvements. --- tests/test_1000_module.py | 5 +- tests/test_1100_connection.py | 81 +++++------------ tests/test_1600_dml_returning.py | 2 +- tests/test_1700_error.py | 2 +- tests/test_2200_number_var.py | 5 +- tests/test_2300_object_var.py | 9 +- tests/test_2400_pool.py | 86 +++++-------------- tests/test_2500_string_var.py | 5 +- tests/test_3000_subscription.py | 4 +- tests/test_3100_boolean_var.py | 14 +-- tests/test_3500_json.py | 4 +- tests/test_3700_var.py | 8 +- tests/test_3800_typehandler.py | 3 +- tests/test_4300_cursor_other.py | 23 +++-- tests/test_5300_connection_async.py | 22 ++--- tests/test_5400_cursor_execute_async.py | 7 +- tests/test_5500_pool_async.py | 17 ++-- tests/test_5600_dbobject_async.py | 5 +- tests/test_5700_lob_var_async.py | 13 +-- tests/test_5800_cursor_var_async.py | 8 +- tests/test_5900_dml_returning_async.py | 5 +- tests/test_6000_typehandler_async.py | 7 +- tests/test_6100_cursor_executemany_async.py | 7 +- tests/test_6200_cursor_callproc_async.py | 6 +- tests/test_6300_cursor_other_async.py | 20 ++--- tests/test_6400_vector_var.py | 3 +- tests/test_6600_defaults.py | 8 +- tests/test_6700_json_23.py | 4 +- tests/test_6800_error_async.py | 7 +- tests/test_6900_oson.py | 5 +- ..._7000_connection_async_shortcut_methods.py | 8 +- tests/test_7300_unsupported_features_thin.py | 4 +- tests/test_7400_tpc_async.py | 8 +- tests/test_7500_binary_vector.py | 4 +- tests/test_7600_pipelining_async.py | 7 +- tests/test_7700_sparse_vector.py | 4 +- tests/test_7900_aq_raw_async.py | 6 +- tests/test_8000_dataframe.py | 74 +++------------- tests/test_8100_dataframe_async.py | 21 +---- tests/test_8200_aq_bulk_async.py | 78 +++-------------- tests/test_8400_aq_dbobject_async.py | 5 +- tests/test_8500_aq_json_async.py | 4 +- tests/test_8600_cursor_scrollable_async.py | 6 +- tests/test_env.py | 78 +++++++++++++++++ 44 files changed, 235 insertions(+), 467 deletions(-) diff --git a/tests/test_1000_module.py b/tests/test_1000_module.py index 617234a9..7ab55473 100644 --- a/tests/test_1000_module.py +++ b/tests/test_1000_module.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,6 @@ """ import datetime -import unittest import oracledb import test_env @@ -169,7 +168,7 @@ def test_1006(self): ) self.assertIs(oracledb.version, oracledb.__version__) - @unittest.skipUnless(test_env.get_is_thin(), "not relevant for thick mode") + @test_env.skip_unless_thin_mode() def test_1007(self): "1007 - test clientversion() fails without init_oracle_client()" with self.assertRaisesFullCode("DPY-2021"): diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index bbca631a..acf13bb7 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -30,7 +30,6 @@ import string import threading import time -import unittest import oracledb import test_env @@ -84,7 +83,7 @@ def test_1100(self): ) self.assertEqual(conn.thin, test_env.get_is_thin()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1101(self): "1101 - test use of application context" namespace = "CLIENTCONTEXT" @@ -175,7 +174,7 @@ def test_1106(self): password=test_env.get_main_password() + "X" ) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1107(self): "1107 - test changing password" conn = test_env.get_connection() @@ -189,7 +188,7 @@ def test_1107(self): conn = test_env.get_connection(password=new_password) conn.changepassword(new_password, test_env.get_main_password()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1108(self): "1108 - test changing password to an invalid value" conn = test_env.get_connection() @@ -201,7 +200,7 @@ def test_1108(self): with self.assertRaisesFullCode("ORA-01017", "ORA-00988", "ORA-28008"): conn.changepassword("incorrect old password", new_password) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1109(self): "1109 - test connecting with password containing / and @ symbols" conn = test_env.get_connection() @@ -227,7 +226,7 @@ def test_1110(self): with self.assertRaisesFullCode("DPY-1001"): conn.rollback() - @unittest.skipIf(test_env.get_is_thin(), "not relevant for thin mode") + @test_env.skip_unless_thick_mode() def test_1111(self): "1111 - test creating a connection using a handle" conn = test_env.get_connection() @@ -370,10 +369,7 @@ def test_1120(self): self.conn.ping() self.assertRoundTrips(1) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support two-phase commit yet", - ) + @test_env.skip_unless_thick_mode() def test_1121(self): "1121 - test begin, prepare, cancel transaction" conn = test_env.get_connection() @@ -395,10 +391,7 @@ def test_1121(self): (count,) = cursor.fetchone() self.assertEqual(count, 0) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support two-phase commit yet", - ) + @test_env.skip_unless_thick_mode() def test_1122(self): "1122 - test multiple transactions on the same connection" conn = test_env.get_connection() @@ -438,10 +431,7 @@ def test_1122(self): cursor.execute("select IntCol, StringCol1 from TestTempTable") self.assertEqual(cursor.fetchall(), expected_rows) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support two-phase commit yet", - ) + @test_env.skip_unless_thick_mode() def test_1123(self): "1123 - test multiple global transactions on the same connection" conn = test_env.get_connection() @@ -499,10 +489,7 @@ def test_1123(self): cursor.execute("select IntCol, StringCol1 from TestTempTable") self.assertEqual(cursor.fetchall(), expected_rows) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support two-phase commit yet", - ) + @test_env.skip_unless_thick_mode() def test_1124(self): "1124 - test creating global txn after a local txn" conn = test_env.get_connection() @@ -562,7 +549,7 @@ def perform_cancel(): (user,) = cursor.fetchone() self.assertEqual(user, test_env.get_main_user().upper()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1127(self): "1127 - test changing password during connect" conn = test_env.get_connection() @@ -625,7 +612,7 @@ def test_1130(self): cursor.callproc("dbms_output.get_line", (string_var, number_var)) self.assertEqual(string_var.getvalue(), test_string) - @unittest.skipUnless(test_env.has_client_version(18), "unsupported client") + @test_env.skip_unless_call_timeout_supported() def test_1131(self): "1131 - test connection call_timeout" conn = test_env.get_connection() @@ -694,18 +681,15 @@ def test_1135(self): (instance_name,) = cursor.fetchone() self.assertEqual(conn.instance_name.upper(), instance_name) - @unittest.skipUnless( - test_env.has_client_version(18), "not supported on this client" - ) + @test_env.skip_unless_call_timeout_supported() def test_1136(self): "1136 - test deprecated attributes" conn = test_env.get_connection() conn.callTimeout = 500 self.assertEqual(conn.callTimeout, 500) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @test_env.skip_if_drcp() + @test_env.skip_unless_long_passwords_supported() def test_1137(self): "1137 - test maximum allowed length for password" conn = test_env.get_connection() @@ -784,9 +768,7 @@ def test_1143(self): self.assertEqual(conn.username, test_env.get_main_user()) self.assertEqual(conn.proxy_user, proxy_user) - @unittest.skipIf( - not test_env.get_is_thin(), "thick mode doesn't support SDU yet" - ) + @test_env.skip_unless_thin_mode() def test_1144(self): "1144 - test connection.sdu" conn = test_env.get_connection() @@ -799,10 +781,7 @@ def test_1145(self): with self.assertRaisesFullCode("DPY-2023"): test_env.get_connection(conn_class=oracledb.ConnectionPool) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support program yet", - ) + @test_env.skip_unless_thin_mode() def test_1146(self): "1146 - test passing program when creating a connection" sql = ( @@ -811,10 +790,7 @@ def test_1146(self): ) self.__verify_connect_arg("program", "newprogram", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support machine yet", - ) + @test_env.skip_unless_thin_mode() def test_1147(self): "1147 - test passing machine when creating a connection" sql = ( @@ -823,10 +799,7 @@ def test_1147(self): ) self.__verify_connect_arg("machine", "newmachine", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support terminal yet", - ) + @test_env.skip_unless_thin_mode() def test_1148(self): "1148 - test passing terminal when creating a connection" sql = ( @@ -835,10 +808,7 @@ def test_1148(self): ) self.__verify_connect_arg("terminal", "newterminal", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support osuser yet", - ) + @test_env.skip_unless_thin_mode() def test_1149(self): "1149 - test passing osuser when creating a connection" sql = ( @@ -855,9 +825,7 @@ def test_1150(self): ) self.__verify_connect_arg("driver_name", "newdriver", sql) - @unittest.skipUnless( - test_env.get_is_thin(), "thick mode doesn't support session_id yet" - ) + @test_env.skip_unless_thin_mode() def test_1151(self): "1151 - test getting session id" conn = test_env.get_connection() @@ -866,9 +834,7 @@ def test_1151(self): (fetched_value,) = cursor.fetchone() self.assertEqual(conn.session_id, fetched_value) - @unittest.skipUnless( - test_env.get_is_thin(), "thick mode doesn't support serial_num yet" - ) + @test_env.skip_unless_thin_mode() def test_1152(self): "1152 - test getting session serial number" conn = test_env.get_connection() @@ -879,10 +845,7 @@ def test_1152(self): (fetched_value,) = cursor.fetchone() self.assertEqual(conn.serial_num, fetched_value) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support registered protocols", - ) + @test_env.skip_unless_thin_mode() def test_1153(self): "1153 - test passed params in hook with standalone connection" sdu = 4096 diff --git a/tests/test_1600_dml_returning.py b/tests/test_1600_dml_returning.py index fd7787a2..f9bd20c2 100644 --- a/tests/test_1600_dml_returning.py +++ b/tests/test_1600_dml_returning.py @@ -526,7 +526,7 @@ def test_1622(self): self.cursor.execute(sql, in_val=25, out_val=out_val) self.assertEqual(out_val.getvalue(), [25]) - @unittest.skipUnless(test_env.get_is_thin(), "cannot be checked") + @test_env.skip_unless_thin_mode() def test_1623(self): "1623 - execute DML returning with duplicated binds" self.cursor.execute("truncate table TestTempTable") diff --git a/tests/test_1700_error.py b/tests/test_1700_error.py index 201ca334..4791aa72 100644 --- a/tests/test_1700_error.py +++ b/tests/test_1700_error.py @@ -197,7 +197,7 @@ def test_1708(self): self.assertEqual(error_obj.full_code, f"ORA-{code}") self.assertTrue("Help:" not in error_obj.message) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_1709(self): "1709 - error from killed connection is deemed recoverable" admin_conn = test_env.get_admin_connection() diff --git a/tests/test_2200_number_var.py b/tests/test_2200_number_var.py index f98af3fd..0c0438c4 100644 --- a/tests/test_2200_number_var.py +++ b/tests/test_2200_number_var.py @@ -27,7 +27,6 @@ """ import decimal -import unittest import oracledb import test_env @@ -74,9 +73,7 @@ def setUp(self): self.raw_data.append(data_tuple) self.data_by_key[i] = data_tuple - @unittest.skipUnless( - test_env.has_client_version(12, 1), "not supported on this client" - ) + @test_env.skip_unless_plsql_boolean_supported() def test_2200(self): "2200 - test binding in a boolean" result = self.cursor.callfunc( diff --git a/tests/test_2300_object_var.py b/tests/test_2300_object_var.py index 87d8ce32..919fe3bf 100644 --- a/tests/test_2300_object_var.py +++ b/tests/test_2300_object_var.py @@ -28,7 +28,6 @@ import datetime import decimal -import unittest import oracledb import test_env @@ -662,13 +661,13 @@ def test_2327(self): ) self.assertEqual(result, 7146445847327) - @unittest.skipIf(test_env.get_is_thin(), "thin mode supports xmltype") + @test_env.skip_unless_thick_mode() def test_2328(self): "2328 - test object with unknown type in one of its attributes" typ = self.conn.gettype("UDT_OBJECTWITHXMLTYPE") self.assertEqual(typ.attributes[1].type, oracledb.DB_TYPE_UNKNOWN) - @unittest.skipIf(test_env.get_is_thin(), "thin mode supports xmltype") + @test_env.skip_unless_thick_mode() def test_2329(self): "2329 - test object with unknown type as the element type" typ = self.conn.gettype("UDT_XMLTYPEARRAY") @@ -813,9 +812,7 @@ def test_2338(self): result = [i for i in obj] self.assertEqual(result, [5, 10, 15]) - @unittest.skipUnless( - test_env.get_is_thin(), "thick mode does not support xmltype" - ) + @test_env.skip_unless_thin_mode() def test_2339(self): "2339 - test fetching an object containing an XmlType instance" num_val = 2339 diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index aab8e5f8..2496fd95 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -196,9 +196,7 @@ def test_2400(self): self.assertEqual(pool.timeout, 0) self.assertEqual(pool.username, test_env.get_main_user()) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support proxy users yet" - ) + @test_env.skip_unless_thick_mode() def test_2401(self): "2401 - test that proxy authentication is possible" pool = test_env.get_pool( @@ -299,7 +297,7 @@ def test_2405(self): for thread in threads: thread.join() - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_2406(self): "2406 - test session pool with various types of purity" pool = test_env.get_pool( @@ -335,10 +333,8 @@ def test_2406(self): cursor.close() pool.release(conn) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support proxy users yet" - ) + @test_env.skip_if_drcp() + @test_env.skip_unless_thick_mode() def test_2407(self): "2407 - test heterogeneous pool with user and password specified" pool = test_env.get_pool( @@ -371,10 +367,8 @@ def test_2407(self): ) conn.close() - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support proxy users yet" - ) + @test_env.skip_if_drcp() + @test_env.skip_unless_thick_mode() def test_2408(self): "2408 - test heterogeneous pool without user and password specified" pool = test_env.get_pool( @@ -402,9 +396,7 @@ def test_2408(self): conn, test_env.get_proxy_user(), test_env.get_main_user() ) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support proxy users yet" - ) + @test_env.skip_unless_thick_mode() def test_2409(self): "2409 - test heterogeneous pool with wrong password specified" pool = test_env.get_pool( @@ -419,9 +411,7 @@ def test_2409(self): test_env.get_proxy_user(), "this is the wrong password" ) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support tagging yet" - ) + @test_env.skip_unless_thick_mode() def test_2410(self): "2410 - test tagging a session" pool = test_env.get_pool( @@ -447,10 +437,7 @@ def test_2410(self): self.assertEqual(conn.tag, tag_utc) conn.close() - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support session callbacks yet", - ) + @test_env.skip_unless_thick_mode() def test_2411(self): "2411 - test PL/SQL session callbacks" if not test_env.has_client_version(12, 2): @@ -500,9 +487,7 @@ def test_2411(self): self.assertEqual(results, expected_results) conn.close() - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support tagging yet" - ) + @test_env.skip_unless_thick_mode() def test_2412(self): "2412 - testTagging with Invalid key" pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_NOWAIT) @@ -544,10 +529,7 @@ def test_2414(self): self.assertEqual(pool.opened, 2, "opened (2)") pool.release(conn3) - @unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support all the pool params yet", - ) + @test_env.skip_unless_thick_mode() def test_2415(self): "2415 - test the reconfigure values are changed and rest unchanged" self.__perform_reconfigure_test("min", 5) @@ -568,9 +550,7 @@ def test_2415(self): if test_env.has_client_version(19, 11): self.__perform_reconfigure_test("soda_metadata_cache", True) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support tagging yet" - ) + @test_env.skip_unless_thick_mode() def test_2417(self): "2417 - test that session callbacks are being called correctly" pool = test_env.get_pool( @@ -693,7 +673,7 @@ def session_callback(cls, conn, requested_tag): pass self.assertEqual(Counter.num_calls, 2) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_2424(self): "2424 - drop the pooled connection on receiving dead connection error" admin_conn = test_env.get_admin_connection() @@ -753,7 +733,7 @@ def test_2427(self): pool.acquire(), test_env.get_proxy_user(), test_env.get_main_user() ) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_2428(self): "2428 - test acquiring conn from pool in LIFO order" pool = test_env.get_pool( @@ -844,12 +824,7 @@ def test_2437(self): with self.assertRaisesFullCode("DPY-2023"): test_env.get_pool(connectiontype=int) - @unittest.skipUnless( - test_env.has_server_version(12, 2), "not supported on this server" - ) - @unittest.skipUnless( - test_env.has_client_version(19), "not supported on this client" - ) + @test_env.skip_unless_pool_timed_wait_supported() def test_2438(self): "2438 - ensure that timed wait times out with appropriate exception" pool = test_env.get_pool( @@ -858,9 +833,7 @@ def test_2438(self): with self.assertRaisesFullCode("DPY-4005"): pool.acquire() - @unittest.skipUnless( - test_env.has_client_version(18), "not supported on this client" - ) + @test_env.skip_unless_call_timeout_supported() def test_2439(self): "2439 - ensure call timeout is reset on connections returned by pool" pool = test_env.get_pool(ping_timeout=1000, ping_interval=0) @@ -888,10 +861,7 @@ def test_2441(self): self.assertEqual(pool.busy, num_conns) self.assertEqual(len(active_sessions), num_conns) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support program yet", - ) + @test_env.skip_unless_thin_mode() def test_2442(self): "2442 - test passing program when creating a pool" sql = ( @@ -900,10 +870,7 @@ def test_2442(self): ) self.__verify_create_arg("program", "newprogram", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support machine yet", - ) + @test_env.skip_unless_thin_mode() def test_2443(self): "2443 - test passing machine when creating a pool" sql = ( @@ -912,10 +879,7 @@ def test_2443(self): ) self.__verify_create_arg("machine", "newmachine", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support terminal yet", - ) + @test_env.skip_unless_thin_mode() def test_2444(self): "2444 - test passing terminal when creating a pool" sql = ( @@ -924,10 +888,7 @@ def test_2444(self): ) self.__verify_create_arg("terminal", "newterminal", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support osuser yet", - ) + @test_env.skip_unless_thin_mode() def test_2445(self): "2445 - test passing osuser when creating a pool" sql = ( @@ -944,10 +905,7 @@ def test_2446(self): ) self.__verify_create_arg("driver_name", "newdriver", sql) - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support registered protocols", - ) + @test_env.skip_unless_thin_mode() def test_2447(self): "2447 - test register_parameter with pooled connection" sdu = 4096 @@ -1066,7 +1024,7 @@ def test_2456(self): with self.assertRaisesFullCode("DPY-2064"): test_env.get_pool(min=3, max=2) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_2457(self): "2457 - ping pooled connection on receiving dead connection error" admin_conn = test_env.get_admin_connection() diff --git a/tests/test_2500_string_var.py b/tests/test_2500_string_var.py index c9b363a1..3dff96c6 100644 --- a/tests/test_2500_string_var.py +++ b/tests/test_2500_string_var.py @@ -536,10 +536,7 @@ def test_2533(self): cursor.execute("select IntCol, StringCol1 from TestTempTable") self.assertEqual(cursor.fetchone(), (1, string_val)) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support fetching XMLType > VARCHAR2", - ) + @test_env.skip_unless_thin_mode() def test_2534(self): "2534 - test inserting and fetching XMLType (32K) as a string" self.cursor.execute("truncate table TestTempXML") diff --git a/tests/test_3000_subscription.py b/tests/test_3000_subscription.py index 88c275cd..d34b89c6 100644 --- a/tests/test_3000_subscription.py +++ b/tests/test_3000_subscription.py @@ -78,9 +78,7 @@ def _process_message(self, message): self.rowids.append(row.rowid) -@unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support subscriptions" -) +@test_env.skip_unless_thick_mode() class TestCase(test_env.BaseTestCase): @unittest.skipUnless( test_env.has_client_version(23), "crashes in older clients" diff --git a/tests/test_3100_boolean_var.py b/tests/test_3100_boolean_var.py index 41bb8a83..c9ee0998 100644 --- a/tests/test_3100_boolean_var.py +++ b/tests/test_3100_boolean_var.py @@ -26,14 +26,11 @@ 3100 - Module for testing boolean variables """ -import unittest - import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(12, 1), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(12, 1), "unsupported server") +@test_env.skip_unless_plsql_boolean_supported() class TestCase(test_env.BaseTestCase): def __test_bind_value_as_boolean(self, value): expected_result = str(bool(value)).upper() @@ -102,8 +99,7 @@ def test_3108(self): ) self.assertIsNone(result) - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_unless_native_boolean_supported() def test_3109(self): "3109 - test binding and fetching boolean with 23ai" for value in (True, False): @@ -113,8 +109,7 @@ def test_3109(self): self.assertIsInstance(fetched_value, bool) self.assertEqual(fetched_value, not value) - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_unless_native_boolean_supported() def test_3110(self): "3110 - test binding and fetching string literals that represent True" self.cursor.execute("truncate table TestBooleans") @@ -129,8 +124,7 @@ def test_3110(self): expected_values = [(True, True) for _ in true_values] self.assertEqual(self.cursor.fetchall(), expected_values) - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_unless_native_boolean_supported() def test_3111(self): "3111 - test binding and fetching string literals that represent False" self.cursor.execute("truncate table TestBooleans") diff --git a/tests/test_3500_json.py b/tests/test_3500_json.py index b706ab18..c5a6a11a 100644 --- a/tests/test_3500_json.py +++ b/tests/test_3500_json.py @@ -28,14 +28,12 @@ import datetime import decimal -import unittest import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(21), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(21), "unsupported server") +@test_env.skip_unless_native_json_supported() class TestCase(test_env.BaseTestCase): json_data = [ True, diff --git a/tests/test_3700_var.py b/tests/test_3700_var.py index f5fa17a7..91737697 100644 --- a/tests/test_3700_var.py +++ b/tests/test_3700_var.py @@ -28,7 +28,6 @@ import datetime import decimal -import unittest import oracledb import test_env @@ -361,8 +360,7 @@ def test_3721(self): wrong_obj_type = self.conn.gettype("UDT_OBJECTARRAY") self._test_negative_set_and_get(wrong_obj_type, obj) - @unittest.skipUnless(test_env.has_client_version(21), "unsupported client") - @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") + @test_env.skip_unless_native_json_supported() def test_3722(self): "3722 - setting values on variables of type DB_TYPE_JSON" json_data = [ @@ -425,9 +423,7 @@ def test_3724(self): [(None, None, None, None, None, None, None)], ) - @unittest.skipIf( - not test_env.get_is_thin(), "thick mode doesn't support DB_TYPE_UROWID" - ) + @test_env.skip_unless_thin_mode() def test_3725(self): "3725 - setting values on variables of type DB_TYPE_UROWID" self._test_negative_set_and_get(oracledb.DB_TYPE_UROWID, 12345) diff --git a/tests/test_3800_typehandler.py b/tests/test_3800_typehandler.py index 032b6e74..d6faf521 100644 --- a/tests/test_3800_typehandler.py +++ b/tests/test_3800_typehandler.py @@ -28,7 +28,6 @@ import datetime import json -import unittest import oracledb import test_env @@ -217,7 +216,7 @@ def output_type_handler(cursor, metadata): expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] self.assertEqual(self.cursor.fetchall(), expected_data) - @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") + @test_env.skip_unless_native_json_supported() def test_3806(self): "3806 - output type handler for fetching 21c JSON" diff --git a/tests/test_4300_cursor_other.py b/tests/test_4300_cursor_other.py index 6f1f1a9d..71a9faaa 100644 --- a/tests/test_4300_cursor_other.py +++ b/tests/test_4300_cursor_other.py @@ -27,7 +27,6 @@ """ import decimal -import unittest import oracledb import test_env @@ -648,7 +647,7 @@ def test_4346(self): self.assertIsNone(self.cursor.bindvars.get("a")) self.assertIsInstance(self.cursor.bindvars["b"], oracledb.Var) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_4347(self): "4547 - kill connection with open cursor" admin_conn = test_env.get_admin_connection() @@ -663,7 +662,7 @@ def test_4347(self): cursor.execute("select user from dual") self.assertFalse(conn.is_healthy()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() def test_4348(self): "4348 - kill connection in cursor context manager" admin_conn = test_env.get_admin_connection() @@ -879,12 +878,7 @@ def test_4359(self): fetched_data = [(n, c.read()) for n, c in self.cursor] self.assertEqual(fetched_data, data) - @unittest.skipUnless( - test_env.has_server_version(12, 2), "unsupported database" - ) - @unittest.skipUnless( - test_env.has_client_version(12, 2), "unsupported database" - ) + @test_env.skip_unless_json_supported() def test_4360(self): "4360 - fetch JSON columns as Python objects" expected_data = [ @@ -894,10 +888,7 @@ def test_4360(self): self.cursor.execute("select * from TestJsonCols order by IntCol") self.assertEqual(self.cursor.fetchall(), expected_data) - @unittest.skipUnless( - test_env.has_server_version(23), "unsupported database" - ) - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") + @test_env.skip_unless_domains_supported() def test_4361(self): "4361 - fetch table with domain and annotations" self.cursor.execute("select * from TableWithDomainAndAnnotations") @@ -1000,6 +991,12 @@ def test_4368(self): (fetched_value,) = self.cursor.fetchone() self.assertEqual(fetched_value, value) + def test_4369(self): + "4369 - access cursor.rowcount after closing connection" + with test_env.get_connection() as conn: + cursor = conn.cursor() + self.assertEqual(cursor.rowcount, -1) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5300_connection_async.py b/tests/test_5300_connection_async.py index 19c69356..c8703400 100644 --- a/tests/test_5300_connection_async.py +++ b/tests/test_5300_connection_async.py @@ -29,15 +29,12 @@ import asyncio import random import string -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): requires_connection = False @@ -162,7 +159,7 @@ async def test_5306(self): password=test_env.get_main_password() + "X", ) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5307(self): "5307 - test changing password" async with test_env.get_connection_async() as conn: @@ -182,7 +179,7 @@ async def test_5307(self): new_password, test_env.get_main_password() ) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5308(self): "5308 - test changing password to an invalid value" async with test_env.get_connection_async() as conn: @@ -202,7 +199,7 @@ async def test_5308(self): "incorrect old password", new_password ) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5309(self): "5309 - test connecting with password containing / and @ symbols" async with test_env.get_connection_async() as conn: @@ -341,10 +338,7 @@ async def test_5325(self): coroutines = [self.__verify_fetched_data(conn) for i in range(3)] await asyncio.gather(*coroutines) - @unittest.skipIf( - test_env.get_is_implicit_pooling(), - "sessions can change with implicit pooling", - ) + @test_env.skip_if_implicit_pooling() async def test_5326(self): "5326 - test connection cancel" async with test_env.get_connection_async() as conn: @@ -366,7 +360,7 @@ async def perform_work(): (user,) = await cursor.fetchone() self.assertEqual(user, test_env.get_main_user().upper()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5327(self): "5327 - test changing password during connect" async with test_env.get_connection_async() as conn: @@ -511,8 +505,8 @@ async def test_5335(self): (instance_name,) = await cursor.fetchone() self.assertEqual(conn.instance_name.upper(), instance_name) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_if_drcp() + @test_env.skip_unless_long_passwords_supported() async def test_5337(self): "5337 - test maximum allowed length for password" async with test_env.get_connection_async() as conn: diff --git a/tests/test_5400_cursor_execute_async.py b/tests/test_5400_cursor_execute_async.py index 3026af78..556ed5bf 100644 --- a/tests/test_5400_cursor_execute_async.py +++ b/tests/test_5400_cursor_execute_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,15 +27,12 @@ """ import collections -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_5400(self): "5400 - test executing a statement without any arguments" diff --git a/tests/test_5500_pool_async.py b/tests/test_5500_pool_async.py index 520c5ed0..4e8aa82a 100644 --- a/tests/test_5500_pool_async.py +++ b/tests/test_5500_pool_async.py @@ -27,15 +27,12 @@ """ import asyncio -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): require_connection = False @@ -162,7 +159,7 @@ async def test_5504(self): finally: await pool.close(force=True) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5505(self): "5505 - test session pool with various types of purity" pool = test_env.get_pool_async(min=1, max=8, increment=1) @@ -305,7 +302,7 @@ async def session_callback(cls, conn, requested_tag): finally: await pool.close(force=True) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5514(self): "5514 - drop the pooled connection on receiving dead connection error" admin_conn = await test_env.get_admin_connection_async() @@ -379,7 +376,7 @@ async def test_5517(self): finally: await pool.close(force=True) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5518(self): "5518 - test acquiring conn from pool in LIFO order" pool = test_env.get_pool_async(min=5, max=10, increment=1) @@ -494,9 +491,7 @@ async def test_5527(self): with self.assertRaisesFullCode("DPY-2023"): test_env.get_pool_async(connectiontype=int) - @unittest.skipUnless( - test_env.has_server_version(12, 2), "not supported on this server" - ) + @test_env.skip_unless_pool_timed_wait_supported() async def test_5528(self): "5528 - ensure that timed wait times out with appropriate exception" pool = test_env.get_pool_async( @@ -634,7 +629,7 @@ async def test_5542(self): with self.assertRaisesFullCode("DPY-2064"): test_env.get_pool_async(min=3, max=2) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_5543(self): "5543 - ping pooled connection on receiving dead connection error" admin_conn = await test_env.get_admin_connection_async() diff --git a/tests/test_5600_dbobject_async.py b/tests/test_5600_dbobject_async.py index b376f727..0a4edcad 100644 --- a/tests/test_5600_dbobject_async.py +++ b/tests/test_5600_dbobject_async.py @@ -28,15 +28,12 @@ import datetime import decimal -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): maxDiff = None diff --git a/tests/test_5700_lob_var_async.py b/tests/test_5700_lob_var_async.py index b09b6beb..6757d728 100644 --- a/tests/test_5700_lob_var_async.py +++ b/tests/test_5700_lob_var_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,15 +26,11 @@ 5700 - Module for testing LOB (CLOB and BLOB) variables with asyncio """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def __get_temp_lobs(self, sid): cursor = self.conn.cursor() @@ -363,10 +359,7 @@ async def test_5714(self): "5714 - test operations on NCLOBs" await self.__test_lob_operations("NCLOB") - @unittest.skipIf( - test_env.get_is_implicit_pooling(), - "sessions can change with implicit pooling", - ) + @test_env.skip_if_implicit_pooling() async def test_5715(self): "5715 - test temporary LOBs" await self.cursor.execute( diff --git a/tests/test_5800_cursor_var_async.py b/tests/test_5800_cursor_var_async.py index 79bbfe6c..4047b077 100644 --- a/tests/test_5800_cursor_var_async.py +++ b/tests/test_5800_cursor_var_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,15 +26,11 @@ 5800 - Module for testing cursor variables with asyncio """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_5800(self): "5800 - test binding in a cursor" diff --git a/tests/test_5900_dml_returning_async.py b/tests/test_5900_dml_returning_async.py index 6603b5ca..96e171cd 100644 --- a/tests/test_5900_dml_returning_async.py +++ b/tests/test_5900_dml_returning_async.py @@ -27,15 +27,12 @@ """ import datetime -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_5900(self): "5900 - test insert (single row) with DML returning" diff --git a/tests/test_6000_typehandler_async.py b/tests/test_6000_typehandler_async.py index 9a423d27..b13ba872 100644 --- a/tests/test_6000_typehandler_async.py +++ b/tests/test_6000_typehandler_async.py @@ -28,7 +28,6 @@ import datetime import json -import unittest import oracledb import test_env @@ -61,9 +60,7 @@ def from_json(cls, value): return cls(**result) -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): def building_in_converter(self, value): return value.to_json() @@ -232,7 +229,7 @@ def output_type_handler(cursor, metadata): expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] self.assertEqual(await self.cursor.fetchall(), expected_data) - @unittest.skipUnless(test_env.has_server_version(21), "unsupported server") + @test_env.skip_unless_native_json_supported() async def test_6006(self): "6006 - output type handler for fetching 21c JSON" diff --git a/tests/test_6100_cursor_executemany_async.py b/tests/test_6100_cursor_executemany_async.py index 8b4d35f5..1fbea095 100644 --- a/tests/test_6100_cursor_executemany_async.py +++ b/tests/test_6100_cursor_executemany_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,15 +27,12 @@ """ import decimal -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_6100(self): "6100 - test executing a statement multiple times (named args)" diff --git a/tests/test_6200_cursor_callproc_async.py b/tests/test_6200_cursor_callproc_async.py index 025f3884..f539a456 100644 --- a/tests/test_6200_cursor_callproc_async.py +++ b/tests/test_6200_cursor_callproc_async.py @@ -27,15 +27,11 @@ functions (callproc() and callfunc()) with asyncio """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_6200(self): "6200 - test executing a stored procedure" diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index 64937149..d2667f20 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -26,15 +26,11 @@ 6300 - Module for testing other cursor methods and attributes with asyncio. """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_6300(self): "6300 - test preparing a statement and executing it multiple times" @@ -573,7 +569,7 @@ def type_handler(cursor, metadata): expected_data = [("A", 2, 3)] * 3 self.assertEqual(await self.cursor.fetchall(), expected_data) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_6335(self): "6335 - kill connection with open cursor" admin_conn = await test_env.get_admin_connection_async() @@ -588,7 +584,7 @@ async def test_6335(self): await cursor.execute("select user from dual") self.assertFalse(conn.is_healthy()) - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_6336(self): "6336 - kill connection in cursor context manager" admin_conn = await test_env.get_admin_connection_async() @@ -803,9 +799,7 @@ async def test_6346(self): fetched_data = [(n, await c.read()) async for n, c in self.cursor] self.assertEqual(fetched_data, data) - @unittest.skipUnless( - test_env.has_server_version(23), "unsupported database" - ) + @test_env.skip_unless_domains_supported() async def test_6347(self): "6347 - fetch table with domain and annotations" await self.cursor.execute( @@ -913,6 +907,12 @@ async def test_6352(self): nested_rows, [("Nested String for Top Level String 2",)] ) + async def test_6353(self): + "6353 - access cursor.rowcount after closing connection" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + self.assertEqual(cursor.rowcount, -1) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6400_vector_var.py b/tests/test_6400_vector_var.py index 4bdbc1be..f1e0bcc7 100644 --- a/tests/test_6400_vector_var.py +++ b/tests/test_6400_vector_var.py @@ -33,8 +33,7 @@ import test_env -@unittest.skipUnless(test_env.has_client_version(23, 4), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(23, 4), "unsupported server") +@test_env.skip_unless_vectors_supported() class TestCase(test_env.BaseTestCase): def __test_insert_and_fetch(self, value, column_name, expected_typecode): """ diff --git a/tests/test_6600_defaults.py b/tests/test_6600_defaults.py index 2a408a35..e9078a19 100644 --- a/tests/test_6600_defaults.py +++ b/tests/test_6600_defaults.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,6 @@ import oracledb import test_env -import unittest class TestCase(test_env.BaseTestCase): @@ -198,10 +197,7 @@ def test_6614(self): "6614 - test setting defaults.osuser attribute" self.__verify_network_name_attr("osuser") - @unittest.skipUnless( - test_env.get_is_thin(), - "thick mode doesn't support program yet", - ) + @test_env.skip_unless_thin_mode() def test_6615(self): "6615 - test program with two pools" default_value = "defaultprogram" diff --git a/tests/test_6700_json_23.py b/tests/test_6700_json_23.py index 58614a51..ef3365d7 100644 --- a/tests/test_6700_json_23.py +++ b/tests/test_6700_json_23.py @@ -27,14 +27,12 @@ """ import json -import unittest import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(23), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(23), "unsupported server") +@test_env.skip_unless_native_json_extensions_supported() class TestCase(test_env.BaseTestCase): def __test_fetch_json(self, value, table_name="TestJson"): """ diff --git a/tests/test_6800_error_async.py b/tests/test_6800_error_async.py index dda09e96..79a765c1 100644 --- a/tests/test_6800_error_async.py +++ b/tests/test_6800_error_async.py @@ -27,15 +27,12 @@ """ import pickle -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_6800(self): "6800 - test parse error returns offset correctly" @@ -207,7 +204,7 @@ async def test_6808(self): self.assertEqual(result.warning.full_code, "DPY-7000") await self.cursor.execute(f"drop procedure {proc_name}") - @unittest.skipIf(test_env.get_is_drcp(), "not supported with DRCP") + @test_env.skip_if_drcp() async def test_6809(self): "6809 - error from killed connection is deemed recoverable" admin_conn = await test_env.get_admin_connection_async() diff --git a/tests/test_6900_oson.py b/tests/test_6900_oson.py index 4120af9a..767e3197 100644 --- a/tests/test_6900_oson.py +++ b/tests/test_6900_oson.py @@ -26,14 +26,11 @@ 6900 - Module for testing OSON encoding and decoding. """ -import unittest - import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(21), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(21), "unsupported server") +@test_env.skip_unless_native_json_supported() class TestCase(test_env.BaseTestCase): def test_6900(self): "6900 - test OSON metadata" diff --git a/tests/test_7000_connection_async_shortcut_methods.py b/tests/test_7000_connection_async_shortcut_methods.py index 1e87e228..b8c7ce33 100644 --- a/tests/test_7000_connection_async_shortcut_methods.py +++ b/tests/test_7000_connection_async_shortcut_methods.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,15 +26,11 @@ 7000 - Module for testing async connections shortcut methods """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_7000(self): "7000 - test execute() and fetchall()" diff --git a/tests/test_7300_unsupported_features_thin.py b/tests/test_7300_unsupported_features_thin.py index d421362f..d7c4b6bd 100644 --- a/tests/test_7300_unsupported_features_thin.py +++ b/tests/test_7300_unsupported_features_thin.py @@ -26,13 +26,11 @@ 7300 - Module for testing unsupported features in Thin mode """ -import unittest - import oracledb import test_env -@unittest.skipUnless(test_env.get_is_thin(), "only relevant in thin mode") +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseTestCase): def test_7300(self): diff --git a/tests/test_7400_tpc_async.py b/tests/test_7400_tpc_async.py index 0cd707cf..d2b50c70 100644 --- a/tests/test_7400_tpc_async.py +++ b/tests/test_7400_tpc_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,15 +26,11 @@ 7400 - Module for testing TPC (two-phase commit) transactions with asyncio. """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_7400(self): "7400 - test begin, prepare, roll back global transaction" diff --git a/tests/test_7500_binary_vector.py b/tests/test_7500_binary_vector.py index 9be1a80c..625a093d 100644 --- a/tests/test_7500_binary_vector.py +++ b/tests/test_7500_binary_vector.py @@ -28,14 +28,12 @@ """ import array -import unittest import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(23, 5), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(23, 5), "unsupported server") +@test_env.skip_unless_binary_vectors_supported() class TestCase(test_env.BaseTestCase): def test_7500(self): diff --git a/tests/test_7600_pipelining_async.py b/tests/test_7600_pipelining_async.py index 9d776864..ddddffc1 100644 --- a/tests/test_7600_pipelining_async.py +++ b/tests/test_7600_pipelining_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,7 +26,6 @@ 7600 - Module for testing async pipelining. """ -import unittest import datetime import decimal @@ -34,9 +33,7 @@ import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_7600(self): diff --git a/tests/test_7700_sparse_vector.py b/tests/test_7700_sparse_vector.py index bc52d3d6..5b706d8c 100644 --- a/tests/test_7700_sparse_vector.py +++ b/tests/test_7700_sparse_vector.py @@ -29,13 +29,11 @@ import array import json -import unittest import oracledb import test_env -@unittest.skipUnless(test_env.has_client_version(23, 7), "unsupported client") -@unittest.skipUnless(test_env.has_server_version(23, 7), "unsupported client") +@test_env.skip_unless_sparse_vectors_supported() class TestCase(test_env.BaseTestCase): def __test_insert_and_fetch(self, vector, column_name, expected_typecode): """ diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index 90621c51..0772a80c 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -26,15 +26,11 @@ 7900 - Module for testing AQ with raw queues with asyncio """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): raw_data = [ b"sample raw data 1", diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 369b7ce3..0054f54d 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -29,7 +29,6 @@ import array import datetime import decimal -import unittest import oracledb @@ -626,8 +625,7 @@ def test_8025(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_unless_native_boolean_supported() def test_8026(self): "8026 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] @@ -767,12 +765,7 @@ def test_8029(self): self.assertIsNotNone(buffers["offsets"]) self.assertIsNotNone(buffers["validity"]) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8030(self): "8030 - fetch float32 vector" @@ -806,12 +799,7 @@ def test_8030(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8031(self): "8031 - fetch float64 vector" data = [ @@ -837,12 +825,7 @@ def test_8031(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8032(self): "8032 - fetch int8 vector" data = [ @@ -868,12 +851,7 @@ def test_8032(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8033(self): "8033 - fetch binary vector" data = [ @@ -899,12 +877,7 @@ def test_8033(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8034(self): "8034 - fetch float32 vectors with None" data = [ @@ -933,12 +906,7 @@ def test_8034(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8035(self): "8035 - fetch duplicate float64 vectors" data = [ @@ -994,12 +962,7 @@ def test_8035(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 7), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 7), "unsupported server" - ) + @test_env.skip_unless_sparse_vectors_supported() def test_8036(self): "8036 - fetch float32 sparse vectors" data = [ @@ -1048,12 +1011,7 @@ def test_8036(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 7), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 7), "unsupported server" - ) + @test_env.skip_unless_sparse_vectors_supported() def test_8037(self): "8037 - fetch float64 sparse vectors" data = [ @@ -1102,12 +1060,7 @@ def test_8037(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() def test_8038(self): "8038 - DPY-3031 - Unsupported flexible vector formats" with self.assertRaisesFullCode("DPY-3031"): @@ -1119,12 +1072,7 @@ def test_8038(self): """ ) - @unittest.skipUnless( - test_env.has_client_version(23, 7), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 7), "unsupported server" - ) + @test_env.skip_unless_sparse_vectors_supported() def test_8039(self): "8039 - DPY-4007 -fetch sparse vectors with flexible dimensions" self.__check_interop() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 2737d9d2..964a5d5a 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -29,7 +29,6 @@ import array import datetime import decimal -import unittest import oracledb @@ -237,9 +236,7 @@ ] -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): def __check_interop(self): @@ -580,7 +577,7 @@ async def test_8121(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - @unittest.skipUnless(test_env.has_server_version(23), "unsupported server") + @test_env.skip_unless_native_boolean_supported() async def test_8122(self): "8122 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] @@ -605,12 +602,7 @@ async def test_8122(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 4), "unsupported server" - ) + @test_env.skip_unless_vectors_supported() async def test_8123(self): "8123 - fetch float32 vector" data = [ @@ -638,12 +630,7 @@ async def test_8123(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - @unittest.skipUnless( - test_env.has_client_version(23, 7), "unsupported client" - ) - @unittest.skipUnless( - test_env.has_server_version(23, 7), "unsupported server" - ) + @test_env.skip_unless_sparse_vectors_supported() async def test_8124(self): "8124 - fetch float64 sparse vectors" data = [ diff --git a/tests/test_8200_aq_bulk_async.py b/tests/test_8200_aq_bulk_async.py index 3b47ff5f..14b8f78d 100644 --- a/tests/test_8200_aq_bulk_async.py +++ b/tests/test_8200_aq_bulk_async.py @@ -27,8 +27,6 @@ """ import datetime -import threading -import unittest import oracledb import test_env @@ -63,9 +61,7 @@ ] -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def __deq_in_thread(self, results): async with test_env.get_connection_async() as conn: @@ -100,25 +96,8 @@ async def test_8201(self): await self.conn.commit() self.assertEqual(messages, []) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" - ) async def test_8202(self): - "8202 - test bulk dequeue with wait" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - results = [] - thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) - thread.start() - messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA - ] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - await queue.enqmany(messages) - thread.join() - self.assertEqual(results, RAW_PAYLOAD_DATA) - - async def test_8203(self): - "8203 - test enqueue and dequeue multiple times" + "8202 - test enqueue and dequeue multiple times" queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) data_to_enqueue = RAW_PAYLOAD_DATA for num in (2, 6, 4): @@ -136,48 +115,15 @@ async def test_8203(self): await self.conn.commit() self.assertEqual(all_data, RAW_PAYLOAD_DATA) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" - ) - async def test_8204(self): - "8204 - test visibility option for enqueue and dequeue" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - - # first test with ENQ_ON_COMMIT (commit required) - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props1 = self.conn.msgproperties(payload="A first message") - props2 = self.conn.msgproperties(payload="A second message") - await queue.enqmany([props1, props2]) - async with test_env.get_connection_async() as other_conn: - other_queue = other_conn.queue(RAW_QUEUE_NAME) - other_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - other_queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - messages = await other_queue.deqmany(5) - self.assertEqual(len(messages), 0) - await self.conn.commit() - messages = await other_queue.deqmany(5) - self.assertEqual(len(messages), 2) - await other_conn.rollback() - - # second test with ENQ_IMMEDIATE (no commit required) - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - other_queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.enqmany([props1, props2]) - messages = await other_queue.deqmany(5) - self.assertEqual(len(messages), 4) - await other_conn.rollback() - messages = await other_queue.deqmany(5) - self.assertEqual(len(messages), 0) - - async def test_8205(self): - "8205 - test error for messages with no payload" + async def test_8203(self): + "8203 - test error for messages with no payload" queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) messages = [self.conn.msgproperties() for _ in RAW_PAYLOAD_DATA] with self.assertRaisesFullCode("DPY-2000"): await queue.enqmany(messages) - async def test_8206(self): - "8206 - verify that the msgid property is returned correctly" + async def test_8204(self): + "8204 - verify that the msgid property is returned correctly" queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) messages = [ self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA @@ -191,8 +137,8 @@ async def test_8206(self): msgids = set(message.msgid for message in messages) self.assertEqual(msgids, actual_msgids) - async def test_8207(self): - "4800 - test enqueuing and dequeuing JSON message" + async def test_8205(self): + "8205 - test enqueuing and dequeuing JSON message" queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") props = [ self.conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD @@ -204,15 +150,15 @@ async def test_8207(self): actual_data = [message.payload for message in messages] self.assertEqual(actual_data, JSON_DATA_PAYLOAD) - async def test_8208(self): - "8208 - test enqueuing to a JSON queue without a JSON payload" + async def test_8206(self): + "8206 - test enqueuing to a JSON queue without a JSON payload" queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") props = self.conn.msgproperties(payload="string message") with self.assertRaisesFullCode("DPY-2062"): await queue.enqmany([props, props]) - async def test_8209(self): - "8209 - test errors for invalid values for enqmany and deqmany" + async def test_8207(self): + "8207 - test errors for invalid values for enqmany and deqmany" queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") props = self.conn.msgproperties(payload="string message") with self.assertRaises(TypeError): diff --git a/tests/test_8400_aq_dbobject_async.py b/tests/test_8400_aq_dbobject_async.py index 018c58d8..3d46f8fc 100644 --- a/tests/test_8400_aq_dbobject_async.py +++ b/tests/test_8400_aq_dbobject_async.py @@ -27,15 +27,12 @@ """ import decimal -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): book_type_name = "UDT_BOOK" book_queue_name = "TEST_BOOK_QUEUE" diff --git a/tests/test_8500_aq_json_async.py b/tests/test_8500_aq_json_async.py index 9375d4bb..0d640be0 100644 --- a/tests/test_8500_aq_json_async.py +++ b/tests/test_8500_aq_json_async.py @@ -35,9 +35,7 @@ import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): json_queue_name = "TEST_JSON_QUEUE" json_data = [ diff --git a/tests/test_8600_cursor_scrollable_async.py b/tests/test_8600_cursor_scrollable_async.py index f99d8249..fbb9c592 100644 --- a/tests/test_8600_cursor_scrollable_async.py +++ b/tests/test_8600_cursor_scrollable_async.py @@ -26,14 +26,10 @@ 8600 - Module for testing scrollable cursors with asyncio """ -import unittest - import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): async def test_8600(self): "8600 - test creating a scrollable cursor" diff --git a/tests/test_env.py b/tests/test_env.py index f6243dc2..9ccda834 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -490,6 +490,84 @@ def skip_soda_tests(): return False +def skip_if_drcp(): + return unittest.skipIf(get_is_drcp(), "not supported with DRCP") + + +def skip_if_implicit_pooling(): + return unittest.skipIf( + get_is_implicit_pooling(), "not supported with implicit pooling" + ) + + +def skip_unless_binary_vectors_supported(): + supported = has_client_version(23, 5) and has_server_version(23, 5) + return unittest.skipUnless(supported, "no binary vector support") + + +def skip_unless_call_timeout_supported(): + supported = has_client_version(18) + return unittest.skipUnless(supported, "no call timeout support") + + +def skip_unless_domains_supported(): + supported = has_server_version(23) + return unittest.skipUnless(supported, "no domain support") + + +def skip_unless_json_supported(): + supported = has_client_version(12, 2) and has_server_version(12, 2) + return unittest.skipUnless(supported, "no JSON support") + + +def skip_unless_long_passwords_supported(): + supported = has_server_version(23) + return unittest.skipUnless(supported, "no long password support") + + +def skip_unless_native_boolean_supported(): + supported = has_client_version(23) and has_server_version(23) + return unittest.skipUnless(supported, "no native boolean support") + + +def skip_unless_native_json_extensions_supported(): + supported = has_client_version(23) and has_server_version(23) + return unittest.skipUnless(supported, "no native JSON extensions support") + + +def skip_unless_native_json_supported(): + supported = has_client_version(21) and has_server_version(21) + return unittest.skipUnless(supported, "no native JSON support") + + +def skip_unless_plsql_boolean_supported(): + supported = has_client_version(12, 1) and has_server_version(12, 1) + return unittest.skipUnless(supported, "no PL/SQL boolean support") + + +def skip_unless_pool_timed_wait_supported(): + supported = has_client_version(12, 2) and has_server_version(12, 2) + return unittest.skipUnless(supported, "no pool timed wait support") + + +def skip_unless_sparse_vectors_supported(): + supported = has_client_version(23, 7) and has_server_version(23, 7) + return unittest.skipUnless(supported, "no sparse vector support") + + +def skip_unless_thick_mode(): + return unittest.skipIf(get_is_thin(), "requires thick mode") + + +def skip_unless_thin_mode(): + return unittest.skipUnless(get_is_thin(), "requires thin mode") + + +def skip_unless_vectors_supported(): + supported = has_client_version(23, 4) and has_server_version(23, 4) + return unittest.skipUnless(supported, "no vector support") + + class DefaultsContextManager: def __init__(self, attribute, desired_value): self.attribute = attribute From b63b71c1225a7d2d8418b1f74ba52c3c04f66a86 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:42:23 -0600 Subject: [PATCH 126/239] Fixed bug in the calculation of attribute MessageProperties.deliverymode. Previously it was being set to the value of the attribute DeqOptions.deliverymode. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thin/messages/aq_array.pyx | 3 --- src/oracledb/impl/thin/messages/aq_base.pyx | 6 +++++- src/oracledb/impl/thin/queue.pyx | 1 - tests/test_2700_aq_dbobject.py | 3 +++ tests/test_8400_aq_dbobject_async.py | 3 +++ 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e2fa0dcd..f0a19298 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -21,6 +21,9 @@ Thin Mode Changes thick mode`` is now raised when attempting to use session tagging with a connection pool. Previously a ``NotImplementedError`` exception was raised instead. +#) Fixed bug in the calculation of attribute + :attr:`MessageProperties.deliverymode`. Previously it was being set to the + value of the attribute :attr:`DeqOptions.deliverymode`. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/aq_array.pyx b/src/oracledb/impl/thin/messages/aq_array.pyx index 8158b3f0..ebad7f69 100644 --- a/src/oracledb/impl/thin/messages/aq_array.pyx +++ b/src/oracledb/impl/thin/messages/aq_array.pyx @@ -69,9 +69,6 @@ cdef class AqArrayMessage(AqBaseMessage): props_impl.msgid = msgid[j * 16:(j + 1) * 16] else: props_impl.msgid = msgid - props_impl.delivery_mode = ( - self.deq_options_impl.delivery_mode - ) buf.read_ub2(&temp16) # extensions len if temp16 > 0: errors._raise_err(errors.ERR_NOT_IMPLEMENTED) diff --git a/src/oracledb/impl/thin/messages/aq_base.pyx b/src/oracledb/impl/thin/messages/aq_base.pyx index f1a31cf5..dd5ff2b5 100644 --- a/src/oracledb/impl/thin/messages/aq_base.pyx +++ b/src/oracledb/impl/thin/messages/aq_base.pyx @@ -121,7 +121,11 @@ cdef class AqBaseMessage(Message): errors._raise_err(errors.ERR_NOT_IMPLEMENTED) buf.skip_ub4() # csn buf.skip_ub4() # dsn - buf.skip_ub4() # flags + buf.read_ub4(&temp32) # flags + if temp32 == TNS_KPD_AQ_BUFMSG: + props_impl.delivery_mode = TNS_AQ_MSG_BUFFERED + else: + props_impl.delivery_mode = TNS_AQ_MSG_PERSISTENT if buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_21_1: buf.skip_ub4() # shard number diff --git a/src/oracledb/impl/thin/queue.pyx b/src/oracledb/impl/thin/queue.pyx index 61cb3b38..904b2d0e 100644 --- a/src/oracledb/impl/thin/queue.pyx +++ b/src/oracledb/impl/thin/queue.pyx @@ -74,7 +74,6 @@ cdef class BaseThinQueueImpl(BaseQueueImpl): message = self._conn_impl._create_message(AqDeqMessage) message.queue_impl = self message.deq_options_impl = self.deq_options_impl - props_impl.delivery_mode = message.deq_options_impl.delivery_mode message.props_impl = props_impl return message diff --git a/tests/test_2700_aq_dbobject.py b/tests/test_2700_aq_dbobject.py index 23a0bd3b..f2899327 100644 --- a/tests/test_2700_aq_dbobject.py +++ b/tests/test_2700_aq_dbobject.py @@ -239,6 +239,7 @@ def test_2710(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) other_conn.commit() @@ -264,6 +265,7 @@ def test_2711(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) other_conn.commit() @@ -289,6 +291,7 @@ def test_2712(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) other_conn.commit() diff --git a/tests/test_8400_aq_dbobject_async.py b/tests/test_8400_aq_dbobject_async.py index 3d46f8fc..4a338b1c 100644 --- a/tests/test_8400_aq_dbobject_async.py +++ b/tests/test_8400_aq_dbobject_async.py @@ -209,6 +209,7 @@ async def test_8409(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) await other_conn.commit() @@ -234,6 +235,7 @@ async def test_8410(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) await other_conn.commit() @@ -259,6 +261,7 @@ async def test_8411(self): queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() + self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) book = props.payload results = (book.TITLE, book.AUTHORS, book.PRICE) await other_conn.commit() From 28ff772c151c13adc9c7c0ef9d0003757af27464 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:44:41 -0600 Subject: [PATCH 127/239] Update token parameter requirement wording. --- doc/src/user_guide/connection_handling.rst | 89 ++++++++++++---------- 1 file changed, 49 insertions(+), 40 deletions(-) diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 9d2f9860..542c8240 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -3727,8 +3727,8 @@ Standalone connection example: # PROXY_USER: MYUSER # SESSION_USER: MYSESSIONUSER -You can also explicitly set the ``externalauth`` parameter to True in standalone -connections as shown below. The ``externalauth`` parameter is optional. +You can also set the ``externalauth`` parameter to *True* in standalone +connections: .. code-block:: python @@ -3738,7 +3738,7 @@ connections as shown below. The ``externalauth`` parameter is optional. # PROXY_USER: MYUSER # SESSION_USER: MYSESSIONUSER -Pooled connection example: +A connection pool example is: .. code-block:: python @@ -3949,8 +3949,8 @@ connect to Oracle Autonomous Database with mutual TLS (mTLS). See When using a class such as the :ref:`TokenHandlerOAuth class ` to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``access_token``, ``config_dir``, -``wallet_location``, and ``wallet_password`` parameters of +you need to explicitly set the ``access_token``, and also any desired +``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of :func:`~oracledb.connect`. For example: .. code:: python @@ -3966,9 +3966,10 @@ you need to explicitly set the ``access_token``, ``config_dir``, When using a class such as the :ref:`TokenHandlerOAuth class ` to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``access_token``, ``homogeneous``, -``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of -:func:`~oracledb.create_pool`. For example: +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: .. code:: python @@ -4004,15 +4005,16 @@ parameters of :func:`~oracledb.connect`. For example: When using a class such as the :ref:`TokenHandlerOAuth class ` to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, -you need to explicitly set the ``access_token``, ``externalauth``, and -``homogeneous`` parameters of :func:`~oracledb.create_pool`. For example: +you need to explicitly set the ``access_token`` and ``externalauth`` parameters +of :func:`~oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(which is its default value). For example: .. code:: python pool = oracledb.create_pool( access_token=TokenHandlerOAuth(), externalauth=True, # must always be True in Thick mode - homogeneous=True, # must always be True in connection pools + homogeneous=True, # must always be True for connection pools dsn=mydb_low, min=1, max=5, increment=2) Note that the ``access_token`` parameter should be set to a callable. This is @@ -4200,9 +4202,9 @@ Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. When using the :ref:`azure_tokens ` plugin to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``extra_auth_params``, ``config_dir``, -``wallet_location``, and ``wallet_password`` parameter of -:func:`~oracledb.connect`. For example: +you need to explicitly set the ``extra_auth_params`` parameter, and also any +required ``config_dir``, ``wallet_location``, and ``wallet_password`` +parameters of :func:`~oracledb.connect`. For example: .. code:: python @@ -4227,9 +4229,10 @@ you need to explicitly set the ``extra_auth_params``, ``config_dir``, When using the :ref:`azure_tokens ` plugin to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``homogeneous``, ``extra_auth_params``, -``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of -:func:`~oracledb.create_pool`. For example: +you need to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: .. code:: python @@ -4256,7 +4259,7 @@ you need to explicitly set the ``homogeneous``, ``extra_auth_params``, When using the :ref:`azure_tokens ` plugin to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, you need to explicitly set the ``extra_auth_params`` and ``externalauth`` -parameter of :func:`~oracledb.connect`. For example: +parameters of :func:`~oracledb.connect`. For example: .. code:: python @@ -4279,8 +4282,9 @@ parameter of :func:`~oracledb.connect`. For example: When using the :ref:`azure_tokens ` plugin to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, -you need to explicitly set the ``extra_auth_params``, ``externalauth``, and -``homogeneous`` parameters of :func:`~oracledb.create_pool`. +you need to explicitly set the ``extra_auth_params`` and ``externalauth`` +parameters of :func:`~oracledb.create_pool`. The ``homogeneous`` parameter must +be *True* (its default value). For example: .. code:: python @@ -4312,8 +4316,8 @@ issued by OCI IAM to authenticate to the Oracle Autonomous Database. Both Thin and Thick modes of the python-oracledb driver support OCI IAM token-based authentication. -When using python-oracledb in Thick mode, Oracle Client libraries 19.14 (or later), -or 21.5 (or later) are needed. +When using python-oracledb in Thick mode, Oracle Client libraries 19.14 (or +later), or 21.5 (or later) are needed. Standalone connections and pooled connections can be created in python-oracledb Thick and Thin modes using OCI IAM token-based authentication. This can be done @@ -4414,9 +4418,9 @@ to Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. When using a class such as the :ref:`TokenHandlerIAM class ` to generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``access_token``, ``config_dir``, -``wallet_location``, and ``wallet_password`` parameters of -:func:`~oracledb.connect`. For example: +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.connect`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. For example: .. code:: python @@ -4431,15 +4435,16 @@ you need to explicitly set the ``access_token``, ``config_dir``, When using a class such as :ref:`TokenHandlerIAM class ` to generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, -you need to explicitly set the ``access_token``, ``homogeneous``, -``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of -:func:`~oracledb.create_pool`. For example: +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: .. code:: python connection = oracledb.create_pool( access_token=TokenHandlerIAM(), - homogeneous=True, # must always be set to True for connection pools + homogeneous=True, # must always be True for connection pools dsn=mydb_low, config_dir="path_to_unzipped_wallet", wallet_location="location_of_pem_file", @@ -4469,15 +4474,16 @@ of :func:`~oracledb.connect`. For example: When using a class such as :ref:`TokenHandlerIAM class ` to generate OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, -you need to explicitly set the ``access_token``, ``externalauth``, and -``homogeneous`` parameters of :func:`oracledb.create_pool`. For example: +you need to explicitly set the ``access_token`` and ``externalauth`` parameters +of :func:`oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(its default value). For example: .. code:: python pool = oracledb.create_pool( access_token=TokenHandlerIAM(), externalauth=True, # must always be True in Thick mode - homogeneous=True, # must always be True in connection pools + homogeneous=True, # must always be True for connection pools dsn=mydb_low, min=1, max=5, increment=2) Note that the ``access_token`` parameter should be set to a callable. This is @@ -4683,8 +4689,9 @@ Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. When using the :ref:`oci_tokens ` plugin to generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, you need -to explicitly set the ``config_dir``, ``wallet_location``, ``wallet_password`` -and ``extra_auth_params`` parameters of :func:`~oracledb.connect`. For example: +to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.connect`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. For example: .. code:: python @@ -4707,9 +4714,10 @@ and ``extra_auth_params`` parameters of :func:`~oracledb.connect`. For example: When using the :ref:`oci_tokens ` plugin to generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, you need -to explicitly set the ``config_dir``, ``homogeneous``, ``wallet_location``, -``wallet_password``, and ``extra_auth_params`` parameters of -:func:`~oracledb.create_pool`. For example: +to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: .. code:: python @@ -4761,9 +4769,10 @@ to explicitly set the ``externalauth`` and ``extra_auth_params`` parameters of **Connection Pools in Thick Mode Using OCI IAM Tokens** When using the :ref:`oci_tokens ` plugin to generate -OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, you -need to explicitly set the ``externalauth``, ``homogeneous``, and -``extra_auth_params`` parameters of :func:`~oracledb.create_pool`. For example: +OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, you need +to explicitly set the ``extra_auth_params`` and ``externalauth`` parameters of +:func:`~oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(its default value). For example: .. code:: python From fc8618a0f3c2127cfa946991d0b328b99deffd0a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:45:43 -0600 Subject: [PATCH 128/239] Fixed bug with detection of when a connection has been closed by the database without notification. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/packet.pyx | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index f0a19298..0fdf397e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -24,6 +24,8 @@ Thin Mode Changes #) Fixed bug in the calculation of attribute :attr:`MessageProperties.deliverymode`. Previously it was being set to the value of the attribute :attr:`DeqOptions.deliverymode`. +#) Fixed bug with detection of when a connection has been closed by the + database without notification. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index 77fff14e..a97b2437 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -230,7 +230,7 @@ cdef class ReadBuffer(Buffer): else: errors._raise_err(errors.ERR_UNSUPPORTED_INBAND_NOTIFICATION, err_num=self._pending_error_num) - elif self._transport is None: + elif self._transport is None or self._transport._transport is None: if self._pending_error_num == TNS_ERR_SESSION_SHUTDOWN: errors._raise_err(errors.ERR_CONNECTION_CLOSED) errors._raise_err(errors.ERR_NOT_CONNECTED) From 8229dd2b8bba0d0cabcdc7a83ee19b95f542ca6c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:48:09 -0600 Subject: [PATCH 129/239] Remove use of the DataFrame interchange protocol in favor of the Arrow PyCapsule interface; add top-level objects "DataFrame" and "ArrowArray" for consistency with the rest of the package. --- doc/src/api_manual/async_connection.rst | 4 +- doc/src/api_manual/connection.rst | 4 +- doc/src/api_manual/dataframe.rst | 143 +--- doc/src/release_notes.rst | 10 + doc/src/user_guide/dataframes.rst | 2 +- setup.cfg | 1 - setup.py | 34 +- src/oracledb/__init__.py | 8 +- src/oracledb/arrow_array.py | 103 +++ .../nanoarrow_bridge.pxd => arrow_impl.pxd} | 38 +- src/oracledb/arrow_impl.pyx | 42 + src/oracledb/base_impl.pxd | 12 +- src/oracledb/base_impl.pyx | 6 +- src/oracledb/dataframe.py | 99 +++ src/oracledb/errors.py | 2 + src/oracledb/impl/arrow/array.pyx | 403 ++++++++++ src/oracledb/impl/arrow/dataframe.pyx | 38 + .../arrow}/nanoarrow/nanoarrow.c | 0 .../arrow}/nanoarrow/nanoarrow.h | 0 src/oracledb/impl/arrow/utils.pyx | 304 ++++++++ src/oracledb/impl/base/converters.pyx | 12 +- src/oracledb/impl/base/cursor.pyx | 12 +- src/oracledb/impl/base/utils.pyx | 4 +- src/oracledb/impl/base/var.pyx | 6 +- src/oracledb/impl/thin/cursor.pyx | 4 +- src/oracledb/impl/thin/messages/base.pyx | 2 +- src/oracledb/impl/thin/var.pyx | 4 +- src/oracledb/interchange/__init__.py | 0 src/oracledb/interchange/buffer.py | 84 -- src/oracledb/interchange/column.py | 217 ------ src/oracledb/interchange/dataframe.py | 163 ---- src/oracledb/interchange/nanoarrow_bridge.pyx | 736 ------------------ src/oracledb/interchange/protocol.py | 538 ------------- src/oracledb/thin_impl.pyx | 4 +- tests/test_8000_dataframe.py | 226 +----- tests/test_8100_dataframe_async.py | 127 +-- 36 files changed, 1143 insertions(+), 2249 deletions(-) create mode 100644 src/oracledb/arrow_array.py rename src/oracledb/{interchange/nanoarrow_bridge.pxd => arrow_impl.pxd} (79%) create mode 100644 src/oracledb/arrow_impl.pyx create mode 100644 src/oracledb/dataframe.py create mode 100644 src/oracledb/impl/arrow/array.pyx create mode 100644 src/oracledb/impl/arrow/dataframe.pyx rename src/oracledb/{interchange => impl/arrow}/nanoarrow/nanoarrow.c (100%) rename src/oracledb/{interchange => impl/arrow}/nanoarrow/nanoarrow.h (100%) create mode 100644 src/oracledb/impl/arrow/utils.pyx delete mode 100644 src/oracledb/interchange/__init__.py delete mode 100644 src/oracledb/interchange/buffer.py delete mode 100644 src/oracledb/interchange/column.py delete mode 100644 src/oracledb/interchange/dataframe.py delete mode 100644 src/oracledb/interchange/nanoarrow_bridge.pyx delete mode 100644 src/oracledb/interchange/protocol.py diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 1bf156dc..cdcd4204 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -147,7 +147,7 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. .. versionadded:: 3.0.0 @@ -175,7 +175,7 @@ AsyncConnection Methods .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 2d70fef2..3164b937 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -140,7 +140,7 @@ Connection Methods .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. .. dbapimethodextension:: @@ -172,7 +172,7 @@ Connection Methods .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. .. dbapimethodextension:: diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index 8cd01dcf..90e22d20 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -13,7 +13,7 @@ from Oracle Database types to Arrow data types. .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. .. _oracledataframeobj: @@ -37,45 +37,25 @@ interface, giving access to the underlying Arrow array. OracleDataFrame Methods ----------------------- -The object implements the Python DataFrame Interchange Protocol `DataFrame API -Interface `__ - .. method:: OracleDataFrame.column_arrays() Returns a list of :ref:`OracleArrowArray ` objects, each containing a select list column. - This is an extension to the DataFrame Interchange Protocol. - .. method:: OracleDataFrame.column_names() Returns a list of the column names in the data frame. -.. method:: OracleDataFrame.get_chunks(n_chunks) - - Returns itself, since python-oracledb only uses one chunk. - .. method:: OracleDataFrame.get_column(i) - Returns an :ref:`OracleColumn ` object for the column + Returns an :ref:`OracleArrowArray ` object for the column at the given index ``i``. .. method:: OracleDataFrame.get_column_by_name(name) - Returns an :ref:`OracleColumn ` object for the column + Returns an :ref:`OracleArrowArray ` object for the column with the given name ``name``. -.. method:: OracleDataFrame.get_columns() - - Returns a list of :ref:`OracleColumn ` objects, one - object for each column in the data frame. - -.. method:: OracleDataFrame.num_chunks() - - Return the number of chunks the data frame consists of. - - This always returns 1. - .. method:: OracleDataFrame.num_columns() Returns the number of columns in the data frame. @@ -109,120 +89,3 @@ These are used for conversion to `PyArrow Tables :ref:`dataframeformat`. .. versionadded:: 3.0.0 - -.. _oraclecolumnobj: - -OracleColumn Objects -==================== - -OracleColumn objects are returned by :meth:`OracleDataFrame.get_column()`, -:meth:`OracleDataFrame.get_column_by_name()`, and -:meth:`OracleDataFrame.get_columns()`. - -.. versionadded:: 3.0.0 - -.. _oraclecolumnmeth: - -OracleColumn Methods --------------------- - -.. method:: OracleColumn.get_buffers() - - Returns a dictionary containing the underlying buffers. - - The returned dictionary contains the ``data``, ``validity``, and ``offset`` - keys. - - The ``data`` attribute is a two-element tuple whose first element is a - buffer containing the data and whose second element is the data buffer's - associated dtype. - - The ``validity`` attribute is a a two-element tuple whose first element - is a buffer containing mask values indicating missing data and whose - second element is the mask value buffer's associated dtype. The value of - this attribute is *None* if the null representation is not a bit or byte - mask. - - The ``offset`` attribute is a two-element tuple whose first element is a - buffer containing the offset values for variable-size binary data (for - example, variable-length strings) and whose second element is the offsets - buffer's associated dtype. The value of this attribute is *None* if the - data buffer does not have an associated offsets buffer. - -.. method:: OracleColumn.get_chunks(n_chunks) - - Returns itself, since python-oracledb only uses one chunk. - -.. method:: OracleColumn.num_chunks() - - Returns the number of chunks the column consists of. - - This always returns 1. - -.. method:: OracleColumn.size() - - Returns the number of rows in the column. - -.. _oraclecolumnattr: - -OracleColumn Attributes ------------------------ - -.. attribute:: OracleColumn.describe_null - - This read-only property returns the description of the null representation - that the column uses. - -.. attribute:: OracleColumn.dtype - - This read-only attribute returns the Dtype description as a tuple - containing the values for the attributes ``kind``, ``bit-width``, - ``format string``, and ``endianess``. - - The ``kind`` attribute specifies the type of the data. - - The ``bit-width`` attribute specifies the number of bits as an integer. - - The ``format string`` attribute specifies the data type description format - string in Apache Arrow C Data Interface format. - - The ``endianess`` attribute specifies the byte order of the data type. - Currently, only native endianess is supported. - -.. attribute:: OracleColumn.metadata - - This read-only attribute returns the metadata for the column as a - dictionary with string keys. - -.. attribute:: OracleColumn.null_count - - This read-only attribute returns the number of null row values, if known. - -.. attribute:: OracleColumn.offset - - This read-only attribute specifies the offset of the first row. - -.. _oraclecolumnbufferobj: - -OracleColumnBuffer Objects -========================== - -A buffer object backed by an ArrowArray consisting of a single chunk. - -This is an internal class used for conversion to third party data frames. - -.. versionadded:: 3.0.0 - -.. _oraclecolumnbufferattr: - -OracleColumnBuffer Attributes ------------------------------ - -.. attribute:: OracleColumnBuffer.bufsize - - This read-only property returns the buffer size in bytes. - -.. attribute:: OracleColumnBuffer.ptr - - This read-only attribute specifies the pointer to the start of the buffer - as an integer. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0fdf397e..44eb4d57 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -33,6 +33,16 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Changes to :ref:`data frame ` support: + + - Remove use of the DataFrame Interchange Protocol in + :ref:`OracleDataFrames `. + - Documentation on methods and attributes on the ``DataFrame`` and + ``ArrowArray`` objects are now available in Python plugins such as those + found in VS Code + + Note the data frame support in python-oracledb 3.3 is a pre-release, and + may change in a future version oracledb `3.2.0 `__ (June 2025) -------------------------------------------------------------------------------------------------- diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 3c1b35ff..b500b51c 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -18,7 +18,7 @@ frame objects of other libraries. .. note:: - The data frame support in python-oracledb 3.2 is a pre-release and may + The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. **Fetching Data Frames** diff --git a/setup.cfg b/setup.cfg index feddd7f1..39864d29 100644 --- a/setup.cfg +++ b/setup.cfg @@ -44,7 +44,6 @@ test_suite = tests packages = oracledb oracledb.plugins - oracledb.interchange package_dir = =src diff --git a/setup.py b/setup.py index 9e753737..3718bf38 100644 --- a/setup.py +++ b/setup.py @@ -31,13 +31,18 @@ # base source directory source_dir = os.path.join("src", "oracledb") -# determine the nanoarrow bridge dependent source files (included) -base_dir = os.path.join(source_dir, "interchange") -nanoarrow_bridge_depends = [ - os.path.join(base_dir, "nanoarrow", "nanoarrow.c"), - os.path.join(base_dir, "nanoarrow", "nanoarrow.h"), +# determine the Arrow dependent source files (included) +impl_dir = os.path.join(source_dir, "impl", "arrow") +nanoarrow_include_dir = os.path.join(impl_dir, "nanoarrow") +arrow_depends = [ + os.path.join(impl_dir, n) + for n in sorted(os.listdir(impl_dir)) + if n.endswith(".pyx") or n.endswith(".pxi") or n.endswith(".pxd") ] -nanoarrow_bridge_pxd = os.path.join(base_dir, "nanoarrow_bridge.pxd") +arrow_depends.append(os.path.join(nanoarrow_include_dir, "nanoarrow.c")) +arrow_depends.append(os.path.join(nanoarrow_include_dir, "nanoarrow.h")) +arrow_pxd = os.path.join(source_dir, "arrow_impl.pxd") +arrow_depends.append(arrow_pxd) # determine the base implementation dependent source files (included) impl_dir = os.path.join(source_dir, "impl", "base") @@ -47,7 +52,7 @@ if n.endswith(".pyx") ] base_pxd = os.path.join(source_dir, "base_impl.pxd") -base_depends.extend([base_pxd, nanoarrow_bridge_pxd]) +base_depends.extend([base_pxd, arrow_pxd]) # determine the thick mode dependent source files (included) impl_dir = os.path.join(source_dir, "impl", "thick") @@ -77,6 +82,7 @@ ] thin_depends.append(base_pxd) + # if the platform is macOS: # - target the minimum OS version that current Python packages work with. # (Use 'otool -l /path/to/python' and look for 'version' in the @@ -99,14 +105,14 @@ Extension( "oracledb.base_impl", sources=["src/oracledb/base_impl.pyx"], - include_dirs=["src/oracledb/interchange/nanoarrow"], + include_dirs=[nanoarrow_include_dir], depends=base_depends, extra_compile_args=extra_compile_args, ), Extension( "oracledb.thin_impl", sources=["src/oracledb/thin_impl.pyx"], - include_dirs=["src/oracledb/interchange/nanoarrow"], + include_dirs=[nanoarrow_include_dir], depends=thin_depends, extra_compile_args=extra_compile_args, ), @@ -115,16 +121,16 @@ sources=["src/oracledb/thick_impl.pyx"], include_dirs=[ "src/oracledb/impl/thick/odpi/include", - "src/oracledb/interchange/nanoarrow", + nanoarrow_include_dir, ], depends=thick_depends, extra_compile_args=extra_compile_args, ), Extension( - "oracledb.interchange.nanoarrow_bridge", - sources=["src/oracledb/interchange/nanoarrow_bridge.pyx"], - include_dirs=["src/oracledb/interchange/nanoarrow"], - depends=nanoarrow_bridge_depends, + "oracledb.arrow_impl", + sources=["src/oracledb/arrow_impl.pyx"], + include_dirs=[nanoarrow_include_dir], + depends=arrow_depends, extra_compile_args=extra_compile_args, ), ] diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index f15a5a79..ab3608ff 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -316,8 +316,12 @@ SparseVector as SparseVector, ) -from .interchange.dataframe import ( - OracleDataFrame as OracleDataFrame, +from .arrow_array import ( + ArrowArray as ArrowArray, +) + +from .dataframe import ( + DataFrame as DataFrame, ) from . import builtin_hooks diff --git a/src/oracledb/arrow_array.py b/src/oracledb/arrow_array.py new file mode 100644 index 00000000..d1afeece --- /dev/null +++ b/src/oracledb/arrow_array.py @@ -0,0 +1,103 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# arrow_array.py +# +# Implement an ArrowArray that is used for efficiently transferring Arrow +# array data to other data frame libraries. +# ----------------------------------------------------------------------------- + +from . import errors + + +class ArrowArray: + _impl = None + + def __init__(self): + errors._raise_err(errors.ERR_INTERNAL_CREATION_REQUIRED) + + def __len__(self): + return self.num_rows + + def __repr__(self): + return ( + f"ArrowArray(name={self.name}, " + f"len={self.num_rows}, " + f"type={self.dtype})" + ) + + def __str__(self): + return self.__repr__() + + @classmethod + def _from_impl(cls, impl): + array = cls.__new__(cls) + array._impl = impl + return array + + def __arrow_c_array__(self, requested_schema=None): + """ + Returns a tuple containing an ArrowSchema and ArrowArray PyCapsules. + """ + if requested_schema is not None: + raise NotImplementedError("requested_schema") + return ( + self._impl.get_schema_capsule(), + self._impl.get_array_capsule(), + ) + + def __arrow_c_schema__(self): + """ + Returns an ArrowSchema PyCapsule. + """ + return self._impl.get_schema_capsule() + + @property + def dtype(self) -> str: + """ + Returns the data type associated with the array. + """ + return self._impl.get_data_type() + + @property + def name(self) -> str: + """ + Returns the name associated with the array. + """ + return self._impl.get_name() + + @property + def null_count(self) -> int: + """ + Returns the number of rows that contain null values. + """ + return self._impl.get_null_count() + + @property + def num_rows(self) -> int: + """ + Returns the number of rows in the array. + """ + return self._impl.get_num_rows() diff --git a/src/oracledb/interchange/nanoarrow_bridge.pxd b/src/oracledb/arrow_impl.pxd similarity index 79% rename from src/oracledb/interchange/nanoarrow_bridge.pxd rename to src/oracledb/arrow_impl.pxd index 4a03523d..bbb84cce 100644 --- a/src/oracledb/interchange/nanoarrow_bridge.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -23,10 +23,10 @@ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ -# nanoarrow_bridge.pxd +# arrow_impl.pxd # -# Cython definition file declaring the classes used for bridging between the -# nanoarrow C interface and Python. +# Cython definition file declaring the classes used for implementing the Arrow +# interface. #------------------------------------------------------------------------------ # cython: language_level = 3 @@ -77,40 +77,32 @@ cdef extern from "nanoarrow.h": NANOARROW_TIME_UNIT_NANO -cdef class OracleArrowArray: - """ - OracleArrowArray corresponds to a Column in the Relational model - - It uses functions defined in the Arrow C Data Interface - to work with Arrow buffers and incrementally append values - - The only user-facing API in this object will be __arrow_c_array__() - which is documented in the Arrow PyCapsule Interface. Arrow-backed - DataFrame libraries will use __arrow_c_array__() to directly access - the underlying arrow data - - """ +cdef class ArrowArrayImpl: cdef: - public int32_t precision - public int32_t scale - public str name - public ArrowType arrow_type - public ArrowTimeUnit time_unit + int32_t precision + int32_t scale + str name + ArrowType arrow_type + ArrowTimeUnit time_unit double factor ArrowArray *arrow_array ArrowSchema *arrow_schema ArrowType child_arrow_type - cdef str _schema_to_string(self) cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int append_double(self, double value) except -1 cdef int append_float(self, float value) except -1 cdef int append_int64(self, int64_t value) except -1 - cdef int append_last_value(self, OracleArrowArray array) except -1 + cdef int append_last_value(self, ArrowArrayImpl array) except -1 cdef int append_null(self) except -1 cdef int append_sparse_vector(self, int64_t num_dimensions, array.array indices, array.array values) except -1 cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 + + +cdef class DataFrameImpl: + cdef: + list arrays diff --git a/src/oracledb/arrow_impl.pyx b/src/oracledb/arrow_impl.pyx new file mode 100644 index 00000000..5983aa0c --- /dev/null +++ b/src/oracledb/arrow_impl.pyx @@ -0,0 +1,42 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# arrow_impl.pyx +# +# Cython file for the Arrow implementation. +#------------------------------------------------------------------------------ + +# cython: language_level=3 + +cimport cpython + +from libc.stdint cimport uintptr_t +from libc.string cimport memcpy, strlen, strchr + +from . import errors + +include "impl/arrow/utils.pyx" +include "impl/arrow/array.pyx" +include "impl/arrow/dataframe.pyx" diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index f00dc07a..d2e97929 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -39,10 +39,10 @@ cimport cpython.datetime as cydatetime ctypedef unsigned char char_type -from .interchange.nanoarrow_bridge cimport ( +from .arrow_impl cimport ( ArrowTimeUnit, ArrowType, - OracleArrowArray, + ArrowArrayImpl, ) cdef enum: @@ -723,7 +723,7 @@ cdef class BaseVarImpl: BaseConnImpl _conn_impl OracleMetadata _fetch_metadata list _values - OracleArrowArray _arrow_array + ArrowArrayImpl _arrow_array bint _has_returned_data bint _is_value_set @@ -736,7 +736,7 @@ cdef class BaseVarImpl: cdef DbType _check_fetch_conversion(self) cdef int _create_arrow_array(self) except -1 cdef int _finalize_init(self) except -1 - cdef OracleArrowArray _finish_building_arrow_array(self) + cdef ArrowArrayImpl _finish_building_arrow_array(self) cdef DbType _get_adjusted_type(self, uint8_t ora_type_num) cdef list _get_array_value(self) cdef object _get_scalar_value(self, uint32_t pos) @@ -972,13 +972,13 @@ cdef struct OracleData: cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, OracleMetadata to_metadatda, OracleData* data, - OracleArrowArray arrow_array) except -1 + ArrowArrayImpl arrow_array) except -1 cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, OracleMetadata to_metadatda, OracleData* data, const char* encoding_errors, bint from_dbobject) -cdef int convert_vector_to_arrow(OracleArrowArray arrow_array, +cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, object vector) except -1 cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer) cdef uint16_t decode_uint16be(const char_type *buf) diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 3a83c94d..1004b891 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -44,7 +44,8 @@ from cpython cimport array from .constants import VECTOR_META_FLAG_SPARSE_VECTOR -from .interchange.nanoarrow_bridge cimport ( +from .arrow_impl cimport ( + DataFrameImpl, NANOARROW_TIME_UNIT_SECOND, NANOARROW_TIME_UNIT_MILLI, NANOARROW_TIME_UNIT_MICRO, @@ -88,11 +89,12 @@ import warnings cydatetime.import_datetime() # Python types used by the driver +cdef type PY_TYPE_ARROW_ARRAY cdef type PY_TYPE_ASYNC_CURSOR cdef type PY_TYPE_ASYNC_LOB cdef type PY_TYPE_BOOL = bool cdef type PY_TYPE_CURSOR -cdef object PY_TYPE_DATAFRAME +cdef type PY_TYPE_DATAFRAME cdef type PY_TYPE_DATE = datetime.date cdef type PY_TYPE_DATETIME = datetime.datetime cdef type PY_TYPE_DB_OBJECT diff --git a/src/oracledb/dataframe.py b/src/oracledb/dataframe.py new file mode 100644 index 00000000..90fea896 --- /dev/null +++ b/src/oracledb/dataframe.py @@ -0,0 +1,99 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# dataframe.py +# +# Implement a data frame that can be used for efficiently transferring Arrow +# array data to other data frame libraries. +# ----------------------------------------------------------------------------- + +from typing import List + +from .arrow_array import ArrowArray +from . import errors + + +class DataFrame: + _impl = None + + def __init__(self): + errors._raise_err(errors.ERR_INTERNAL_CREATION_REQUIRED) + + @classmethod + def _from_impl(cls, impl): + df = cls.__new__(cls) + df._impl = impl + df._arrays = [ArrowArray._from_impl(a) for a in impl.get_arrays()] + df._arrays_by_name = {} + for array in df._arrays: + df._arrays_by_name[array.name] = array + return df + + def column_arrays(self) -> List: + """ + Returns a list of the Arrow arrays corresponding to each column in the + data frame. + """ + return self._arrays + + def column_names(self) -> List[str]: + """ + Returns a list of the names of the columns in the data frame. + """ + return [a.name for a in self._arrays] + + def get_column(self, i: int) -> ArrowArray: + """ + Returns a column from the data frame given its zero-based index. If the + index is out of range, an IndexError exception is raised. + """ + if i < 0 or i >= self.num_columns(): + raise IndexError( + f"Column index {i} is out of bounds for " + f"DataFrame with {self.num_columns()} columns" + ) + return self._arrays[i] + + def get_column_by_name(self, name: str) -> ArrowArray: + """ + Returns a column from the data frame given the name of the column. If + the column name is not found, a KeyError exception is raised. + """ + try: + return self._arrays_by_name[name] + except KeyError: + raise KeyError(f"Column {name} not found in DataFrame") + + def num_columns(self) -> int: + """ + Returns the number of columns in the data frame. + """ + return len(self._arrays) + + def num_rows(self) -> int: + """ + Returns the number of rows in the data frame. + """ + return len(self._arrays[0]) diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 1f066201..3f1f15ba 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -371,6 +371,7 @@ def _raise_not_supported(feature: str) -> None: ERR_UNKNOWN_TRANSACTION_STATE = 5010 ERR_UNEXPECTED_PIPELINE_FAILURE = 5011 ERR_NOT_IMPLEMENTED = 5012 +ERR_INTERNAL_CREATION_REQUIRED = 5013 # error numbers that result in OperationalError ERR_LISTENER_REFUSED_CONNECTION = 6000 @@ -618,6 +619,7 @@ def _raise_not_supported(feature: str) -> None: "internal error: read integer of length {length} when expecting " "integer of no more than length {max_length}" ), + ERR_INTERNAL_CREATION_REQUIRED: "object may not be created directly", ERR_INVALID_ACCESS_TOKEN_PARAM: ( "invalid access token: value must be a string (for OAuth), a " "2-tuple containing the token and private key strings (for IAM), " diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx new file mode 100644 index 00000000..311d6ecf --- /dev/null +++ b/src/oracledb/impl/arrow/array.pyx @@ -0,0 +1,403 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# array.pyx +# +# Cython implementation of the ArrowArrayImpl class. +#------------------------------------------------------------------------------ + +cdef class ArrowArrayImpl: + + def __cinit__(self, ArrowType arrow_type, str name, int8_t precision, + int8_t scale, ArrowTimeUnit time_unit, + ArrowType child_arrow_type): + cdef ArrowType storage_type = arrow_type + self.arrow_type = arrow_type + self.child_arrow_type = child_arrow_type + self.time_unit = time_unit + self.name = name + self.arrow_array = \ + cpython.PyMem_Malloc(sizeof(ArrowArray)) + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + storage_type = NANOARROW_TYPE_INT64 + if time_unit == NANOARROW_TIME_UNIT_MILLI: + self.factor = 1e3 + elif time_unit == NANOARROW_TIME_UNIT_MICRO: + self.factor = 1e6 + elif time_unit == NANOARROW_TIME_UNIT_NANO: + self.factor = 1e9 + else: + self.factor = 1 + + self.arrow_schema = \ + cpython.PyMem_Malloc(sizeof(ArrowSchema)) + if arrow_type == NANOARROW_TYPE_DECIMAL128: + self.precision = precision + self.scale = scale + ArrowSchemaInit(self.arrow_schema) + _check_nanoarrow( + ArrowSchemaSetTypeDecimal( + self.arrow_schema, + arrow_type, + precision, + scale + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + # Currently struct is used for Sparse vector only + build_arrow_schema_for_sparse_vector(self.arrow_schema, + child_arrow_type) + else: + _check_nanoarrow( + ArrowSchemaInitFromType( + self.arrow_schema, + storage_type + ) + ) + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + _check_nanoarrow( + ArrowSchemaSetTypeDateTime( + self.arrow_schema, + arrow_type, + time_unit, + NULL + ) + ) + if arrow_type == NANOARROW_TYPE_LIST: + # Set the schema for child using child_arrow_type + _check_nanoarrow( + ArrowSchemaSetType( + self.arrow_schema.children[0], + child_arrow_type + ) + ) + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + else: # primitive type array init + _check_nanoarrow( + ArrowArrayInitFromType( + self.arrow_array, + storage_type + ) + ) + _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) + _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) + + def __dealloc__(self): + if self.arrow_array != NULL: + if self.arrow_array.release != NULL: + ArrowArrayRelease(self.arrow_array) + cpython.PyMem_Free(self.arrow_array) + if self.arrow_schema != NULL: + if self.arrow_schema.release != NULL: + ArrowSchemaRelease(self.arrow_schema) + cpython.PyMem_Free(self.arrow_schema) + + cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1: + """ + Append a value of type bytes to the array. + """ + cdef ArrowBufferView data + data.data.data = ptr + data.size_bytes = num_bytes + _check_nanoarrow(ArrowArrayAppendBytes(self.arrow_array, data)) + + cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1: + """ + Append a value of type ArrowDecimal to the array + + Arrow decimals are fixed-point decimal numbers encoded as a + scaled integer. decimal128(7, 3) can exactly represent the numbers + 1234.567 and -1234.567 encoded internally as the 128-bit integers + 1234567 and -1234567, respectively. + """ + cdef: + ArrowStringView decimal_view + ArrowDecimal decimal + decimal_view.data = ptr + decimal_view.size_bytes = num_bytes + ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + _check_nanoarrow(ArrowDecimalSetDigits(&decimal, decimal_view)) + _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) + + cdef int append_double(self, double value) except -1: + """ + Append a value of type double to the array. + """ + _check_nanoarrow(ArrowArrayAppendDouble(self.arrow_array, value)) + + cdef int append_float(self, float value) except -1: + """ + Append a value of type float to the array. + """ + self.append_double(value) + + cdef int append_int64(self, int64_t value) except -1: + """ + Append a value of type int64_t to the array. + """ + _check_nanoarrow(ArrowArrayAppendInt(self.arrow_array, value)) + + cdef int append_last_value(self, ArrowArrayImpl array) except -1: + """ + Appends the last value of the given array to this array. + """ + cdef: + int32_t start_offset, end_offset + ArrowBuffer *offsets_buffer + ArrowBuffer *data_buffer + ArrowDecimal decimal + int64_t *as_int64 + int32_t *as_int32 + double *as_double + float *as_float + int8_t as_bool + int64_t index + uint8_t *ptr + void* temp + ArrowBitmap *bitamp + if array is None: + array = self + index = array.arrow_array.length - 1 + bitmap = ArrowArrayValidityBitmap(array.arrow_array) + if bitmap != NULL and bitmap.buffer.data != NULL: + as_bool = ArrowBitGet(bitmap.buffer.data, index) + if not as_bool: + self.append_null() + return 0 + if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_int64 = data_buffer.data + self.append_int64(as_int64[index]) + elif array.arrow_type == NANOARROW_TYPE_DOUBLE: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_double = data_buffer.data + self.append_double(as_double[index]) + elif array.arrow_type == NANOARROW_TYPE_FLOAT: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_float = data_buffer.data + self.append_double(as_float[index]) + elif array.arrow_type == NANOARROW_TYPE_BOOL: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + as_bool = ArrowBitGet(data_buffer.data, index) + self.append_int64(as_bool) + elif array.arrow_type == NANOARROW_TYPE_DECIMAL128: + data_buffer = ArrowArrayBuffer(array.arrow_array, 1) + ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ptr = data_buffer.data + index * 16 + ArrowDecimalSetBytes(&decimal, ptr) + _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, + &decimal)) + elif array.arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING + ): + offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) + data_buffer = ArrowArrayBuffer(array.arrow_array, 2) + as_int32 = offsets_buffer.data + start_offset = as_int32[index] + end_offset = as_int32[index + 1] + temp = cpython.PyMem_Malloc(end_offset - start_offset) + memcpy(temp, &data_buffer.data[start_offset], + end_offset - start_offset) + try: + self.append_bytes(temp, end_offset - start_offset) + finally: + cpython.PyMem_Free(temp) + + elif array.arrow_type in ( + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING + ): + offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) + data_buffer = ArrowArrayBuffer(array.arrow_array, 2) + as_int64 = offsets_buffer.data + start_offset = as_int64[index] + end_offset = as_int64[index + 1] + temp = cpython.PyMem_Malloc(end_offset - start_offset) + memcpy(temp, &data_buffer.data[start_offset], + end_offset - start_offset) + try: + self.append_bytes(temp, end_offset - start_offset) + finally: + cpython.PyMem_Free(temp) + + cdef int append_null(self) except -1: + """ + Append a null value to the array. + """ + _check_nanoarrow(ArrowArrayAppendNull(self.arrow_array, 1)) + + cdef int append_vector(self, array.array value) except -1: + """ + Append a vector to the array. + """ + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + append_float_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + append_double_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + append_int8_array(self.arrow_array, value) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + append_uint8_array(self.arrow_array, value) + + cdef int append_sparse_vector(self, + int64_t num_dims, + array.array indices, + array.array values) except -1: + """ + Append a sparse vector to the array. + """ + cdef ArrowArray *array + + # validate that the array supports sparse vectors + if self.arrow_type != NANOARROW_TYPE_STRUCT: + errors._raise_err(errors.ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED) + + # append number of dimensions + array = self.arrow_array.children[0] + _check_nanoarrow(ArrowArrayAppendInt(array, num_dims)) + + # append indices array + array = self.arrow_array.children[1] + append_uint32_array(array, indices) + + # append values array + array = self.arrow_array.children[2] + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + append_float_array(array, values) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + append_double_array(array, values) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + append_int8_array(array, values) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + append_uint8_array(array, values) + + # indicate structure is completed + _check_nanoarrow(ArrowArrayFinishElement(self.arrow_array)) + + cdef int finish_building(self) except -1: + """ + Finish building the array. No more data will be added to it. + """ + _check_nanoarrow(ArrowArrayFinishBuildingDefault(self.arrow_array, + NULL)) + + def get_array_capsule(self): + """ + Internal method for getting a PyCapsule pointer to the array. + """ + cdef ArrowArray *array + array = cpython.PyMem_Malloc(sizeof(ArrowArray)) + try: + copy_arrow_array(self, self.arrow_array, array) + except: + cpython.PyMem_Free(array) + raise + return cpython.PyCapsule_New( + array, 'arrow_array', &pycapsule_array_deleter + ) + + def get_data_type(self): + """ + Internal method for getting the data type associated with the array. + """ + cdef char buffer[81] + ArrowSchemaToString(self.arrow_schema, buffer, sizeof(buffer), 0) + return buffer.decode() + + def get_name(self): + """ + Internal method for getting the name associated with the array. + """ + return self.name + + def get_null_count(self): + """ + Internal method for getting the number of rows containing null values. + """ + return self.arrow_array.null_count + + def get_num_rows(self): + """ + Internal method for getting the number of rows in the array. + """ + return self.arrow_array.length + + def get_schema_capsule(self): + """ + Internal method for getting a PyCapsule pointer to the schema. + """ + cdef ArrowSchema *schema + schema = cpython.PyMem_Malloc(sizeof(ArrowSchema)) + try: + _check_nanoarrow(ArrowSchemaDeepCopy(self.arrow_schema, schema)) + except: + cpython.PyMem_Free(schema) + raise + return cpython.PyCapsule_New( + schema, 'arrow_schema', &pycapsule_schema_deleter + ) + + +cdef void pycapsule_array_deleter(object array_capsule) noexcept: + """ + Called when the PyCapsule pointer is no longer required and performs the + necessary cleanup. + """ + cdef ArrowArray* array + array = cpython.PyCapsule_GetPointer( + array_capsule, "arrow_array" + ) + if array.release != NULL: + ArrowArrayRelease(array) + cpython.PyMem_Free(array) + + +cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: + """ + Called when the PyCapsule pointer is no longer required and performs the + necessary cleanup. + """ + cdef ArrowSchema* schema + schema = cpython.PyCapsule_GetPointer( + schema_capsule, "arrow_schema" + ) + if schema.release != NULL: + ArrowSchemaRelease(schema) + cpython.PyMem_Free(schema) diff --git a/src/oracledb/impl/arrow/dataframe.pyx b/src/oracledb/impl/arrow/dataframe.pyx new file mode 100644 index 00000000..80746179 --- /dev/null +++ b/src/oracledb/impl/arrow/dataframe.pyx @@ -0,0 +1,38 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# dataframe.pyx +# +# Cython implementation of the DataFrameImpl class. +#------------------------------------------------------------------------------ + +cdef class DataFrameImpl: + + def get_arrays(self): + """ + Internal method for getting the list of arrays associated with the data + frame. + """ + return self.arrays diff --git a/src/oracledb/interchange/nanoarrow/nanoarrow.c b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.c similarity index 100% rename from src/oracledb/interchange/nanoarrow/nanoarrow.c rename to src/oracledb/impl/arrow/nanoarrow/nanoarrow.c diff --git a/src/oracledb/interchange/nanoarrow/nanoarrow.h b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.h similarity index 100% rename from src/oracledb/interchange/nanoarrow/nanoarrow.h rename to src/oracledb/impl/arrow/nanoarrow/nanoarrow.h diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx new file mode 100644 index 00000000..bb3cbf7a --- /dev/null +++ b/src/oracledb/impl/arrow/utils.pyx @@ -0,0 +1,304 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# utils.pyx +# +# Contains utilities and definitions used by the other files in this module. +#------------------------------------------------------------------------------ + +cdef extern from "nanoarrow.c": + + ctypedef int ArrowErrorCode + + ctypedef void (*ArrowBufferDeallocatorCallback) + + cdef struct ArrowBufferAllocator: + void *private_data + + cdef struct ArrowBuffer: + uint8_t *data + int64_t size_bytes + ArrowBufferAllocator allocator + + cdef union ArrowBufferViewData: + const void* data + + cdef struct ArrowBufferView: + ArrowBufferViewData data + int64_t size_bytes + + cdef struct ArrowBitmap: + ArrowBuffer buffer + + cdef struct ArrowArrayView: + ArrowBufferView *buffer_views + + cdef struct ArrowDecimal: + pass + + cdef struct ArrowError: + pass + + cdef struct ArrowStringView: + const char* data + int64_t size_bytes + + cdef ArrowErrorCode NANOARROW_OK + + ArrowErrorCode ArrowArrayAllocateChildren(ArrowArray *array, + int64_t n_children) + ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* array, + ArrowBufferView value) + ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, + const ArrowDecimal* value) + ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) + ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) + ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) + ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) + ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, + ArrowError* error) + ArrowErrorCode ArrowArrayFinishElement(ArrowArray *array) + ArrowErrorCode ArrowArrayInitFromSchema(ArrowArray *array, + ArrowSchema *schema, + ArrowError *error) + ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, + ArrowType storage_type) + void ArrowArrayRelease(ArrowArray *array) + ArrowErrorCode ArrowArrayReserve(ArrowArray* array, + int64_t additional_size_elements) + ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) + ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* array) + ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, + ArrowArray* array) + int8_t ArrowBitGet(const uint8_t* bits, int64_t i) + ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, + void *private_data) + void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, + int32_t precision, int32_t scale) + void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) + ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, + ArrowStringView value) + ArrowErrorCode ArrowSchemaDeepCopy(const ArrowSchema *schema, + ArrowSchema *schema_out) + void ArrowSchemaInit(ArrowSchema* schema) + ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) + void ArrowSchemaRelease(ArrowSchema *schema) + ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) + ArrowErrorCode ArrowSchemaSetType(ArrowSchema * schema, ArrowType type) + ArrowErrorCode ArrowSchemaSetTypeDateTime(ArrowSchema* schema, + ArrowType arrow_type, + ArrowTimeUnit time_unit, + const char* timezone) + ArrowErrorCode ArrowSchemaSetTypeStruct(ArrowSchema *schema, + int64_t n_children) + ArrowErrorCode ArrowSchemaSetTypeDecimal(ArrowSchema* schema, + ArrowType type, + int32_t decimal_precision, + int32_t decimal_scale) + int64_t ArrowSchemaToString(const ArrowSchema* schema, char* out, + int64_t n, char recursive) + +cdef int _check_nanoarrow(int code) except -1: + """ + Checks the return code of the nanoarrow function and raises an exception if + it is not NANOARROW_OK. + """ + if code != NANOARROW_OK: + errors._raise_err(errors.ERR_ARROW_C_API_ERROR, code=code) + + + +cdef int append_double_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of doubles to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + double *double_buf = value.data.as_doubles + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendDouble(child_array, double_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_float_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of floats to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + float *float_buf = value.data.as_floats + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendDouble(child_array, float_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_int8_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of signed one-byte integers to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + int8_t *int8_buf = value.data.as_schars + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, int8_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_uint8_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of unsigned one-byte integers to the Arrow array. + """ + cdef: + ArrowArray *child_array = arrow_array.children[0] + uint8_t *uint8_buf = value.data.as_uchars + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, uint8_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef int append_uint32_array(ArrowArray *arrow_array, + array.array value) except -1: + """ + Appends an array of unsigned four-byte integers to the Arrow array. Note + that Python's array.array doesn't natively support uint32_t but an upper + layer has verified that the data in the buffer consists of only four byte + integers. + """ + cdef: + uint32_t *uint32_buf = value.data.as_voidptr + ArrowArray *child_array = arrow_array.children[0] + Py_ssize_t i + for i in range(len(value)): + _check_nanoarrow(ArrowArrayAppendInt(child_array, uint32_buf[i])) + _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) + + +cdef void arrow_buffer_dealloc_callback(ArrowBufferAllocator *allocator, + uint8_t *ptr, int64_t size): + """ + ArrowBufferDeallocatorCallback for an ArrowBuffer borrowed from an Arrow + array. + """ + cpython.Py_DECREF( allocator.private_data) + + +cdef int copy_arrow_array(ArrowArrayImpl array_impl, + ArrowArray *src, ArrowArray *dest) except -1: + """ + Shallow copy source ArrowArray to destination ArrowArray. The source + ArrowArray belongs to the wrapper ArrowArrayImpl. The shallow copy idea + is borrowed from nanoarrow: + https://github.com/apache/arrow-nanoarrow/main/blob/python + """ + cdef: + ArrowBuffer *dest_buffer + ssize_t i + _check_nanoarrow( + ArrowArrayInitFromType( + dest, NANOARROW_TYPE_UNINITIALIZED + ) + ) + + # Copy metadata + dest.length = src.length + dest.offset = src.offset + dest.null_count = src.null_count + + # Borrow an ArrowBuffer belonging to ArrowArrayImpl. The ArrowBuffer can + # belong to an immediate ArrowArray or a child (in case of nested types). + # Either way, we PY_INCREF(array_impl), so that it is not + # prematurely garbage collected. The corresponding PY_DECREF happens in the + # ArrowBufferDeAllocator callback. + for i in range(src.n_buffers): + if src.buffers[i] != NULL: + dest_buffer = ArrowArrayBuffer(dest, i) + dest_buffer.data = src.buffers[i] + dest_buffer.size_bytes = 0 + dest_buffer.allocator = ArrowBufferDeallocator( + arrow_buffer_dealloc_callback, + array_impl + ) + cpython.Py_INCREF(array_impl) + dest.buffers[i] = src.buffers[i] + dest.n_buffers = src.n_buffers + + # shallow copy of children (recursive call) + if src.n_children > 0: + _check_nanoarrow(ArrowArrayAllocateChildren(dest, src.n_children)) + for i in range(src.n_children): + copy_arrow_array(array_impl, src.children[i], dest.children[i]) + + +cdef int build_arrow_schema_for_sparse_vector( + ArrowSchema *schema, + ArrowType vector_value_type +) except -1: + + # Initialize struct with 3 fields - num_dimensions, indices, values + ArrowSchemaInit(schema) + _check_nanoarrow(ArrowSchemaSetTypeStruct(schema, 3)) + + # first child: "num_dimensions" + _check_nanoarrow( + ArrowSchemaSetType(schema.children[0], NANOARROW_TYPE_INT64) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[0], "num_dimensions")) + + # second child: "indices" + _check_nanoarrow(ArrowSchemaSetType( + schema.children[1], + NANOARROW_TYPE_LIST + ) + ) + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[1].children[0], + NANOARROW_TYPE_UINT32 + ) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[1], "indices")) + + # third child: "values" + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[2], + NANOARROW_TYPE_LIST + ) + ) + _check_nanoarrow( + ArrowSchemaSetType( + schema.children[2].children[0], + vector_value_type + ) + ) + _check_nanoarrow(ArrowSchemaSetName(schema.children[2], "values")) diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 81809457..e8382af2 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -47,7 +47,7 @@ cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer): return output -cdef int convert_date_to_arrow_timestamp(OracleArrowArray arrow_array, +cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl arrow_array, OracleDataBuffer *buffer) except -1: """ Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTAMP @@ -85,7 +85,7 @@ cdef object convert_interval_ym_to_python(OracleDataBuffer *buffer): return PY_TYPE_INTERVAL_YM(value.years, value.months) -cdef int convert_number_to_arrow_decimal(OracleArrowArray arrow_array, +cdef int convert_number_to_arrow_decimal(ArrowArrayImpl arrow_array, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow DECIMAL128. @@ -137,7 +137,7 @@ cdef int convert_number_to_arrow_decimal(OracleArrowArray arrow_array, -cdef int convert_number_to_arrow_double(OracleArrowArray arrow_array, +cdef int convert_number_to_arrow_double(ArrowArrayImpl arrow_array, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow DOUBLE. @@ -149,7 +149,7 @@ cdef int convert_number_to_arrow_double(OracleArrowArray arrow_array, arrow_array.append_double(atof(value.chars[:value.num_chars])) -cdef int convert_number_to_arrow_int64(OracleArrowArray arrow_array, +cdef int convert_number_to_arrow_int64(ArrowArrayImpl arrow_array, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow INT64. @@ -224,7 +224,7 @@ cdef object convert_str_to_python(OracleDataBuffer *buffer, uint8_t csfrm, cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, OracleMetadata to_metadata, OracleData* data, - OracleArrowArray arrow_array) except -1: + ArrowArrayImpl arrow_array) except -1: """ Converts the value stored in OracleData to Arrow format. """ @@ -440,7 +440,7 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, output_type=to_metadata.dbtype.name) -cdef int convert_vector_to_arrow(OracleArrowArray arrow_array, +cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, object vector) except -1: """ Converts the vector to the format required by the Arrow array. diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index 5d6b11d5..cc827b87 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -519,11 +519,13 @@ cdef class BaseCursorImpl: Flush all buffers and return an Oracle Data frame. """ cdef: + DataFrameImpl df_impl BaseVarImpl var_impl - list columns = [] + df_impl = DataFrameImpl.__new__(DataFrameImpl) + df_impl.arrays = [] for var_impl in self.fetch_var_impls: - columns.append(var_impl._finish_building_arrow_array()) - return PY_TYPE_DATAFRAME(columns) + df_impl.arrays.append(var_impl._finish_building_arrow_array()) + return PY_TYPE_DATAFRAME._from_impl(df_impl) def close(self, bint in_del=False): """ @@ -576,7 +578,7 @@ cdef class BaseCursorImpl: def fetch_df_all(self, cursor): """ - Internal method used for fetching all data as OracleDataFrame + Internal method used for fetching all data as DataFrame """ while self._more_rows_to_fetch: self._fetch_rows(cursor) @@ -584,7 +586,7 @@ cdef class BaseCursorImpl: def fetch_df_batches(self, cursor, int batch_size): """ - Internal method used for fetching next batch as OracleDataFrame + Internal method used for fetching next batch as DataFrame cursor.arraysize = batchsize """ cdef: diff --git a/src/oracledb/impl/base/utils.pyx b/src/oracledb/impl/base/utils.pyx index 04d19022..9bdc64b9 100644 --- a/src/oracledb/impl/base/utils.pyx +++ b/src/oracledb/impl/base/utils.pyx @@ -219,6 +219,7 @@ def init_base_impl(package): ENUM_AUTH_MODE, \ ENUM_POOL_GET_MODE, \ ENUM_PURITY, \ + PY_TYPE_ARROW_ARRAY, \ PY_TYPE_ASYNC_CURSOR, \ PY_TYPE_ASYNC_LOB, \ PY_TYPE_CONNECT_PARAMS, \ @@ -245,11 +246,12 @@ def init_base_impl(package): ENUM_AUTH_MODE = package.AuthMode ENUM_PURITY = package.Purity ENUM_POOL_GET_MODE = package.PoolGetMode + PY_TYPE_ARROW_ARRAY = package.ArrowArray PY_TYPE_ASYNC_CURSOR = package.AsyncCursor PY_TYPE_ASYNC_LOB = package.AsyncLOB PY_TYPE_CONNECT_PARAMS = package.ConnectParams PY_TYPE_CURSOR = package.Cursor - PY_TYPE_DATAFRAME = package.OracleDataFrame + PY_TYPE_DATAFRAME = package.DataFrame PY_TYPE_DB_OBJECT = package.DbObject PY_TYPE_DB_OBJECT_TYPE = package.DbObjectType PY_TYPE_FETCHINFO = package.FetchInfo diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 1fb79e64..1d398203 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -280,7 +280,7 @@ cdef class BaseVarImpl: else: errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT) - self._arrow_array = OracleArrowArray( + self._arrow_array = ArrowArrayImpl( arrow_type=self.metadata._arrow_type, name=self.metadata.name, precision=self.metadata.precision, @@ -298,13 +298,13 @@ cdef class BaseVarImpl: self.num_elements = 1 self._has_returned_data = False - cdef OracleArrowArray _finish_building_arrow_array(self): + cdef ArrowArrayImpl _finish_building_arrow_array(self): """ Finish building the Arrow array associated with the variable and then return that array (after clearing it in the variable so that a new array will be built if more rows are fetched). """ - cdef OracleArrowArray array = self._arrow_array + cdef ArrowArrayImpl array = self._arrow_array array.finish_building() self._arrow_array = None return array diff --git a/src/oracledb/impl/thin/cursor.pyx b/src/oracledb/impl/thin/cursor.pyx index 5133e030..205a8dab 100644 --- a/src/oracledb/impl/thin/cursor.pyx +++ b/src/oracledb/impl/thin/cursor.pyx @@ -434,7 +434,7 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): async def fetch_df_all(self, cursor): """ - Internal method used for fetching all data as OracleDataFrame + Internal method used for fetching all data as DataFrame """ while self._more_rows_to_fetch: await self._fetch_rows_async(cursor) @@ -442,7 +442,7 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): async def fetch_df_batches(self, cursor, int batch_size): """ - Internal method used for fetching next batch as OracleDataFrame. + Internal method used for fetching next batch as DataFrame. """ # Return the prefetched batch yield self._finish_building_arrow_arrays() diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 581c74e4..1b9a0746 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -910,7 +910,7 @@ cdef class MessageWithData(Message): var_impl._fetch_metadata) statement._last_output_type_handler = type_handler - # Create OracleArrowArray if fetching arrow is enabled + # create Arrow arrays if fetching arrow is enabled if cursor_impl.fetching_arrow: cursor_impl._create_arrow_arrays() diff --git a/src/oracledb/impl/thin/var.pyx b/src/oracledb/impl/thin/var.pyx index 0fa7c613..db005a97 100644 --- a/src/oracledb/impl/thin/var.pyx +++ b/src/oracledb/impl/thin/var.pyx @@ -32,7 +32,7 @@ cdef class ThinVarImpl(BaseVarImpl): cdef: object _last_raw_value - OracleArrowArray _last_arrow_array + ArrowArrayImpl _last_arrow_array list _coroutine_indexes cdef int _bind(self, object conn, BaseCursorImpl cursor_impl, @@ -113,7 +113,7 @@ cdef class ThinVarImpl(BaseVarImpl): BaseVarImpl._finalize_init(self) self._values = [None] * self.num_elements - cdef OracleArrowArray _finish_building_arrow_array(self): + cdef ArrowArrayImpl _finish_building_arrow_array(self): """ Finish building the Arrow array associated with the variable and then return that array (after clearing it in the variable so that a new diff --git a/src/oracledb/interchange/__init__.py b/src/oracledb/interchange/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/oracledb/interchange/buffer.py b/src/oracledb/interchange/buffer.py deleted file mode 100644 index 798a8dba..00000000 --- a/src/oracledb/interchange/buffer.py +++ /dev/null @@ -1,84 +0,0 @@ -# ----------------------------------------------------------------------------- -# Copyright (c) 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# buffer.py -# -# Implements the Buffer class as documented in DataFrame API -# ----------------------------------------------------------------------------- - -from typing import Tuple - -from .protocol import ( - Buffer, - DlpackDeviceType, -) - - -class OracleColumnBuffer(Buffer): - """ - OracleColumnBuffer represents a contiguous memory buffer in the DataFrame - Interchange Protocol. It provides access to raw binary data that backs - various components of the data frame such as column values, validity masks - and offsets for variable-length data types. - """ - - def __init__(self, buffer_type, size_in_bytes, address) -> None: - self.buffer_type = buffer_type - self.size_in_bytes = size_in_bytes - self.address = address - - def __dlpack__(self): - """ - Represent this structure as a DLPack interface. - """ - raise NotImplementedError("__dlpack__") - - def __dlpack_device__(self) -> Tuple[DlpackDeviceType, None]: - """ - Device type and device ID for where the data - in the buffer resides - """ - return (DlpackDeviceType.CPU, None) - - def __repr__(self) -> str: - device = self.__dlpack_device__()[0].name - return ( - f"OracleColumnBuffer(bufsize={self.bufsize}, " - f"ptr={self.ptr}, type={self.buffer_type}, device={device!r})" - ) - - @property - def bufsize(self) -> int: - """ - Returns the total size of buffer in bytes. - """ - return self.size_in_bytes - - @property - def ptr(self) -> int: - """ - Returns the memory address of the buffer. - """ - return self.address diff --git a/src/oracledb/interchange/column.py b/src/oracledb/interchange/column.py deleted file mode 100644 index c44873dc..00000000 --- a/src/oracledb/interchange/column.py +++ /dev/null @@ -1,217 +0,0 @@ -# ----------------------------------------------------------------------------- -# Copyright (c) 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# column.py -# -# Implements the Column class as documented in DataFrame API -# ----------------------------------------------------------------------------- - -from typing import Any, Dict, Iterable, Optional, Tuple - -from .buffer import OracleColumnBuffer -from .protocol import ( - CategoricalDescription, - Column, - Dtype, - ColumnBuffers, - ColumnNullType, - DtypeKind, -) - -from .nanoarrow_bridge import ( - ArrowTimeUnit, - ArrowType, -) - - -class OracleColumn(Column): - """ - OracleColumn represents a column in the DataFrame Interchange Protocol. It - provides a standardized way to expose a column's data, metadata and chunks, - allowing interoperability between data frame libraries. - """ - - def __init__(self, ora_arrow_array: object): - self.ora_arrow_array = ora_arrow_array - self._buffer_info = ora_arrow_array.get_buffer_info() - - def __arrow_c_array__(self, requested_schema=None): - return self.ora_arrow_array.__arrow_c_array__( - requested_schema=requested_schema - ) - - def _data_buffer(self): - buffer = self._buffer_info.get("data") - if buffer is None: - return None - size_bytes, address = buffer - data_buffer = OracleColumnBuffer( - size_in_bytes=size_bytes, address=address, buffer_type="data" - ) - return data_buffer, self.dtype - - def _offsets_buffer(self): - buffer = self._buffer_info.get("offsets") - if buffer is None: - return None - size_bytes, address = buffer - offsets_buffer = OracleColumnBuffer( - size_in_bytes=size_bytes, address=address, buffer_type="offsets" - ) - if self.ora_arrow_array.arrow_type in ( - ArrowType.NANOARROW_TYPE_LARGE_STRING, - ArrowType.NANOARROW_TYPE_LARGE_BINARY, - ): - dtype = (DtypeKind.INT, 64, "l", "=") - else: - dtype = (DtypeKind.INT, 32, "i", "=") - return offsets_buffer, dtype - - def _validity_buffer(self): - buffer = self._buffer_info.get("validity") - if buffer is None: - return None - size_bytes, address = buffer - validity_buffer = OracleColumnBuffer( - size_in_bytes=size_bytes, address=address, buffer_type="validity" - ) - dtype = (DtypeKind.BOOL, 1, "b", "=") - return validity_buffer, dtype - - def describe_categorical(self) -> CategoricalDescription: - """ - Returns a description of a categorical data type. - """ - raise NotImplementedError() - - @property - def describe_null(self) -> Tuple[ColumnNullType, Optional[int]]: - """ - Returns a description of the null representation used by the column. - """ - if self.null_count == 0: - return ColumnNullType.NON_NULLABLE, None - else: - return ColumnNullType.USE_BITMASK, 0 - - @property - def dtype(self) -> Dtype: - """ - Returns the data type of the column. The returned dtype provides - information on the storage format and the type of data in the column. - """ - arrow_type = self.ora_arrow_array.arrow_type - if arrow_type == ArrowType.NANOARROW_TYPE_INT64: - return (DtypeKind.INT, 64, "l", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_DOUBLE: - return (DtypeKind.FLOAT, 64, "g", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_FLOAT: - return (DtypeKind.FLOAT, 64, "g", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_STRING: - return (DtypeKind.STRING, 8, "u", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_TIMESTAMP: - time_unit = self.ora_arrow_array.time_unit - if time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_MICRO: - return (DtypeKind.DATETIME, 64, "tsu:", "=") - elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_SECOND: - return (DtypeKind.DATETIME, 64, "tss:", "=") - elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_MILLI: - return (DtypeKind.DATETIME, 64, "tsm:", "=") - elif time_unit == ArrowTimeUnit.NANOARROW_TIME_UNIT_NANO: - return (DtypeKind.DATETIME, 64, "tsn:", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_DECIMAL128: - array = self.ora_arrow_array - return ( - DtypeKind.DECIMAL, - 128, - f"d:{array.precision}.{array.scale}", - "=", - ) - elif arrow_type == ArrowType.NANOARROW_TYPE_BINARY: - return (DtypeKind.STRING, 8, "z", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_LARGE_BINARY: - return (DtypeKind.STRING, 8, "Z", "=") - elif arrow_type == ArrowType.NANOARROW_TYPE_LARGE_STRING: - return (DtypeKind.STRING, 8, "U", "=") - - def get_buffers(self) -> ColumnBuffers: - """ - Returns a dictionary specifying the memory buffers backing the column. - This currently consists of: - - "data": the main buffer storing column values - - "validity": a buffer containing null/missing values - - "offsets": a buffer for variable-length types like string - """ - return { - "data": self._data_buffer(), - "validity": self._validity_buffer(), - "offsets": self._offsets_buffer(), - } - - def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable[Column]: - """ - Return an iterator containing the column chunks. Currently this only - returns itself. - """ - yield self - - @property - def metadata(self) -> Dict[str, Any]: - """ - Returns metadata about the column. - """ - return { - "name": self.ora_arrow_array.name, - "size": self.size(), - "num_chunks": self.num_chunks(), - } - - @property - def null_count(self) -> int: - """ - Returns the number of null elements. - """ - return self.ora_arrow_array.null_count - - def num_chunks(self) -> int: - """ - Returns the number of chunks used by the column. This method currently - always returns the value 1, implying that the column uses contiguous - memory. - """ - return 1 - - @property - def offset(self) -> int: - """ - Returns the offset of the first element. - """ - return self.ora_arrow_array.offset - - def size(self) -> int: - """ - Returns the number of elements in the column. - """ - return len(self.ora_arrow_array) diff --git a/src/oracledb/interchange/dataframe.py b/src/oracledb/interchange/dataframe.py deleted file mode 100644 index 768145b2..00000000 --- a/src/oracledb/interchange/dataframe.py +++ /dev/null @@ -1,163 +0,0 @@ -# ----------------------------------------------------------------------------- -# Copyright (c) 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# dataframe.py -# -# Implement DataFrame class as documented in the standard -# https://data-apis.org/dataframe-protocol/latest/API.html -# ----------------------------------------------------------------------------- - -from typing import Any, Dict, Iterable, List, Optional, Sequence - -from .column import OracleColumn - -from .protocol import DataFrame - - -class OracleDataFrame(DataFrame): - """ - OracleDataFrame is an implementation of the DataFrame Interchange Protocol. - It provides an interface for exchanging tabular data between different data - frame libraries (e.g. pandas, pyarrow, polars). - """ - - def __init__( - self, - oracle_arrow_arrays: List, - allow_copy: bool = True, - ): - self._cols = [] - self._cols_map = {} - self._rows = None - self._arrays = oracle_arrow_arrays - for ora_arrow_array in oracle_arrow_arrays: - column = OracleColumn(ora_arrow_array=ora_arrow_array) - self._rows = column.size() - self._cols.append(column) - self._cols_map[ora_arrow_array.name] = column - self.allow_copy = allow_copy - - def __dataframe__( - self, - nan_as_null: bool = False, # noqa: FBT001 - allow_copy: bool = True, # noqa: FBT001 - ) -> DataFrame: - """ - Returns a data frame adhering to the DataFrame Interchange protocol. - """ - return self - - def get_chunks( - self, n_chunks: Optional[int] = None - ) -> Iterable[DataFrame]: - """ - Returns an iterator for each of the chunks in the data frame. Since - there is currently only one chunk, this simply returns itself. - """ - yield self - - def column_arrays(self) -> List: - """ - Returns a list of the Arrow arrays corresponding to each column in the - data frame. - """ - return self._arrays - - def column_names(self) -> List[str]: - """ - Returns a list of the names of the columns in the data frame. - """ - return list(self._cols_map.keys()) - - def get_column(self, i: int) -> OracleColumn: - """ - Returns a column from the data frame given its zero-based index. If the - index is out of range, an IndexError exception is raised. - """ - if i < 0 or i >= self.num_columns(): - raise IndexError( - f"Column index {i} is out of bounds for " - f"DataFrame with {self.num_columns()} columns" - ) - return self._cols[i] - - def get_column_by_name(self, name: str) -> OracleColumn: - """ - Returns a column from the data frame given the name of the column. If - the column name is not found, a KeyError exception is raised. - """ - if name not in self._cols_map: - raise KeyError(f"Column {name} not found in DataFrame") - return self._cols_map[name] - - def get_columns(self) -> List[OracleColumn]: - """ - Returns a list of all of the columns in the data frame. - """ - return self._cols - - @property - def metadata(self) -> Dict[str, Any]: - """ - Returns metadata for the data frame. Currently this returns - information about the number of columns (num_columns), number of rows - (num_rows) and number of chunks (num_chunks). - """ - return { - "num_columns": self.num_columns(), - "num_rows": self.num_rows(), - "num_chunks": self.num_chunks(), - } - - def num_chunks(self) -> int: - """ - Returns the number of chunks (contiguous memory blocks) in the data - frame. Currently this always returns 1. - """ - return 1 - - def num_columns(self) -> int: - """ - Returns the number of columns in the data frame. - """ - return len(self._cols) - - def num_rows(self) -> int: - """ - Returns the number of rows in the data frame. - """ - return self._rows - - def select_columns(self, indices: Sequence[int]) -> "DataFrame": - """ - Create a new DataFrame by selecting a subset of columns by index. - """ - raise NotImplementedError() - - def select_columns_by_name(self, names: Sequence[str]) -> "DataFrame": - """ - Create a new DataFrame by selecting a subset of columns by name. - """ - raise NotImplementedError() diff --git a/src/oracledb/interchange/nanoarrow_bridge.pyx b/src/oracledb/interchange/nanoarrow_bridge.pyx deleted file mode 100644 index 461d7113..00000000 --- a/src/oracledb/interchange/nanoarrow_bridge.pyx +++ /dev/null @@ -1,736 +0,0 @@ -#------------------------------------------------------------------------------ -# Copyright (c) 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -#------------------------------------------------------------------------------ -#------------------------------------------------------------------------------ -# nanoarrow_bridge.pyx -# -# Cython wrapper around the Arrow C Data interface -#------------------------------------------------------------------------------ - -cimport cpython - -from libc.stdint cimport uintptr_t -from libc.string cimport memcpy, strlen, strchr - -from .. import errors - -cdef extern from "nanoarrow/nanoarrow.c": - - ctypedef int ArrowErrorCode - - ctypedef void (*ArrowBufferDeallocatorCallback) - - cdef struct ArrowBufferAllocator: - void *private_data - - cdef struct ArrowBuffer: - uint8_t *data - int64_t size_bytes - ArrowBufferAllocator allocator - - cdef union ArrowBufferViewData: - const void* data - - cdef struct ArrowBufferView: - ArrowBufferViewData data - int64_t size_bytes - - cdef struct ArrowBitmap: - ArrowBuffer buffer - - cdef struct ArrowArrayView: - ArrowBufferView *buffer_views - - cdef struct ArrowDecimal: - pass - - cdef struct ArrowError: - pass - - cdef struct ArrowStringView: - const char* data - int64_t size_bytes - - cdef ArrowErrorCode NANOARROW_OK - - ArrowErrorCode ArrowArrayAllocateChildren(ArrowArray *array, - int64_t n_children) - ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* array, - ArrowBufferView value) - ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, - const ArrowDecimal* value) - ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) - ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) - ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) - ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) - ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, - ArrowError* error) - ArrowErrorCode ArrowArrayFinishElement(ArrowArray *array) - ArrowErrorCode ArrowArrayInitFromSchema(ArrowArray *array, - ArrowSchema *schema, - ArrowError *error) - ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, - ArrowType storage_type) - void ArrowArrayRelease(ArrowArray *array) - ArrowErrorCode ArrowArrayReserve(ArrowArray* array, - int64_t additional_size_elements) - ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) - ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* array) - ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, - ArrowArray* array) - int8_t ArrowBitGet(const uint8_t* bits, int64_t i) - ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, - void *private_data) - void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, - int32_t precision, int32_t scale) - void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) - ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, - ArrowStringView value) - ArrowErrorCode ArrowSchemaDeepCopy(const ArrowSchema *schema, - ArrowSchema *schema_out) - void ArrowSchemaInit(ArrowSchema* schema) - ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) - void ArrowSchemaRelease(ArrowSchema *schema) - ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) - ArrowErrorCode ArrowSchemaSetType(ArrowSchema * schema, ArrowType type) - ArrowErrorCode ArrowSchemaSetTypeDateTime(ArrowSchema* schema, - ArrowType arrow_type, - ArrowTimeUnit time_unit, - const char* timezone) - ArrowErrorCode ArrowSchemaSetTypeStruct(ArrowSchema *schema, - int64_t n_children) - ArrowErrorCode ArrowSchemaSetTypeDecimal(ArrowSchema* schema, - ArrowType type, - int32_t decimal_precision, - int32_t decimal_scale) - int64_t ArrowSchemaToString(const ArrowSchema* schema, char* out, - int64_t n, char recursive) - -cdef int _check_nanoarrow(int code) except -1: - """ - Checks the return code of the nanoarrow function and raises an exception if - it is not NANOARROW_OK. - """ - if code != NANOARROW_OK: - errors._raise_err(errors.ERR_ARROW_C_API_ERROR, code=code) - - -cdef void pycapsule_array_deleter(object array_capsule) noexcept: - cdef ArrowArray* array = cpython.PyCapsule_GetPointer( - array_capsule, "arrow_array" - ) - if array.release != NULL: - ArrowArrayRelease(array) - cpython.PyMem_Free(array) - - -cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: - cdef ArrowSchema* schema = cpython.PyCapsule_GetPointer( - schema_capsule, "arrow_schema" - ) - if schema.release != NULL: - ArrowSchemaRelease(schema) - cpython.PyMem_Free(schema) - - -cdef int append_double_array(ArrowArray *arrow_array, - array.array value) except -1: - """ - Appends an array of doubles to the Arrow array. - """ - cdef: - ArrowArray *child_array = arrow_array.children[0] - double *double_buf = value.data.as_doubles - Py_ssize_t i - for i in range(len(value)): - _check_nanoarrow(ArrowArrayAppendDouble(child_array, double_buf[i])) - _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) - - -cdef int append_float_array(ArrowArray *arrow_array, - array.array value) except -1: - """ - Appends an array of floats to the Arrow array. - """ - cdef: - ArrowArray *child_array = arrow_array.children[0] - float *float_buf = value.data.as_floats - Py_ssize_t i - for i in range(len(value)): - _check_nanoarrow(ArrowArrayAppendDouble(child_array, float_buf[i])) - _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) - - -cdef int append_int8_array(ArrowArray *arrow_array, - array.array value) except -1: - """ - Appends an array of signed one-byte integers to the Arrow array. - """ - cdef: - ArrowArray *child_array = arrow_array.children[0] - int8_t *int8_buf = value.data.as_schars - Py_ssize_t i - for i in range(len(value)): - _check_nanoarrow(ArrowArrayAppendInt(child_array, int8_buf[i])) - _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) - - -cdef int append_uint8_array(ArrowArray *arrow_array, - array.array value) except -1: - """ - Appends an array of unsigned one-byte integers to the Arrow array. - """ - cdef: - ArrowArray *child_array = arrow_array.children[0] - uint8_t *uint8_buf = value.data.as_uchars - Py_ssize_t i - for i in range(len(value)): - _check_nanoarrow(ArrowArrayAppendInt(child_array, uint8_buf[i])) - _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) - - -cdef int append_uint32_array(ArrowArray *arrow_array, - array.array value) except -1: - """ - Appends an array of unsigned four-byte integers to the Arrow array. Note - that Python's array.array doesn't natively support uint32_t but an upper - layer has verified that the data in the buffer consists of only four byte - integers. - """ - cdef: - uint32_t *uint32_buf = value.data.as_voidptr - ArrowArray *child_array = arrow_array.children[0] - Py_ssize_t i - for i in range(len(value)): - _check_nanoarrow(ArrowArrayAppendInt(child_array, uint32_buf[i])) - _check_nanoarrow(ArrowArrayFinishElement(arrow_array)) - - -cdef void arrow_buffer_dealloc_callback(ArrowBufferAllocator *allocator, - uint8_t *ptr, int64_t size): - """ - ArrowBufferDeallocatorCallback for an ArrowBuffer borrowed from - OracleArrowArray - """ - cpython.Py_DECREF( allocator.private_data) - - -cdef int copy_arrow_array(OracleArrowArray oracle_arrow_array, - ArrowArray *src, ArrowArray *dest) except -1: - """ - Shallow copy source ArrowArray to destination ArrowArray. The source - ArrowArray belongs to the wrapper OracleArrowArray. The shallow copy idea - is borrowed from nanoarrow: - https://github.com/apache/arrow-nanoarrow/main/blob/python - """ - cdef: - ArrowBuffer *dest_buffer - ssize_t i - _check_nanoarrow( - ArrowArrayInitFromType( - dest, NANOARROW_TYPE_UNINITIALIZED - ) - ) - - # Copy metadata - dest.length = src.length - dest.offset = src.offset - dest.null_count = src.null_count - - # Borrow an ArrowBuffer belonging to OracleArrowArray. The ArrowBuffer can - # belong to an immediate ArrowArray or a child (in case of nested types). - # Either way, we PY_INCREF(oracle_arrow_array), so that it is not - # prematurely garbage collected. The corresponding PY_DECREF happens in the - # ArrowBufferDeAllocator callback. - for i in range(src.n_buffers): - if src.buffers[i] != NULL: - dest_buffer = ArrowArrayBuffer(dest, i) - dest_buffer.data = src.buffers[i] - dest_buffer.size_bytes = 0 - dest_buffer.allocator = ArrowBufferDeallocator( - arrow_buffer_dealloc_callback, - oracle_arrow_array - ) - cpython.Py_INCREF(oracle_arrow_array) - dest.buffers[i] = src.buffers[i] - dest.n_buffers = src.n_buffers - - # shallow copy of children (recursive call) - if src.n_children > 0: - _check_nanoarrow(ArrowArrayAllocateChildren(dest, src.n_children)) - for i in range(src.n_children): - copy_arrow_array( - oracle_arrow_array, src.children[i], dest.children[i] - ) - -cdef int build_arrow_schema_for_sparse_vector( - ArrowSchema *schema, - ArrowType vector_value_type -) except -1: - - # Initialize struct with 3 fields - num_dimensions, indices, values - ArrowSchemaInit(schema) - _check_nanoarrow(ArrowSchemaSetTypeStruct(schema, 3)) - - # first child: "num_dimensions" - _check_nanoarrow( - ArrowSchemaSetType(schema.children[0], NANOARROW_TYPE_INT64) - ) - _check_nanoarrow(ArrowSchemaSetName(schema.children[0], "num_dimensions")) - - # second child: "indices" - _check_nanoarrow(ArrowSchemaSetType( - schema.children[1], - NANOARROW_TYPE_LIST - ) - ) - _check_nanoarrow( - ArrowSchemaSetType( - schema.children[1].children[0], - NANOARROW_TYPE_UINT32 - ) - ) - _check_nanoarrow(ArrowSchemaSetName(schema.children[1], "indices")) - - # third child: "values" - _check_nanoarrow( - ArrowSchemaSetType( - schema.children[2], - NANOARROW_TYPE_LIST - ) - ) - _check_nanoarrow( - ArrowSchemaSetType( - schema.children[2].children[0], - vector_value_type - ) - ) - _check_nanoarrow(ArrowSchemaSetName(schema.children[2], "values")) - - -cdef class OracleArrowArray: - - def __cinit__(self, ArrowType arrow_type, str name, int8_t precision, - int8_t scale, ArrowTimeUnit time_unit, - ArrowType child_arrow_type): - cdef ArrowType storage_type = arrow_type - self.arrow_type = arrow_type - self.child_arrow_type = child_arrow_type - self.time_unit = time_unit - self.name = name - self.arrow_array = \ - cpython.PyMem_Malloc(sizeof(ArrowArray)) - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - storage_type = NANOARROW_TYPE_INT64 - if time_unit == NANOARROW_TIME_UNIT_MILLI: - self.factor = 1e3 - elif time_unit == NANOARROW_TIME_UNIT_MICRO: - self.factor = 1e6 - elif time_unit == NANOARROW_TIME_UNIT_NANO: - self.factor = 1e9 - else: - self.factor = 1 - - self.arrow_schema = \ - cpython.PyMem_Malloc(sizeof(ArrowSchema)) - if arrow_type == NANOARROW_TYPE_DECIMAL128: - self.precision = precision - self.scale = scale - ArrowSchemaInit(self.arrow_schema) - _check_nanoarrow( - ArrowSchemaSetTypeDecimal( - self.arrow_schema, - arrow_type, - precision, - scale - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - # Currently struct is used for Sparse vector only - build_arrow_schema_for_sparse_vector(self.arrow_schema, - child_arrow_type) - else: - _check_nanoarrow( - ArrowSchemaInitFromType( - self.arrow_schema, - storage_type - ) - ) - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - _check_nanoarrow( - ArrowSchemaSetTypeDateTime( - self.arrow_schema, - arrow_type, - time_unit, - NULL - ) - ) - if arrow_type == NANOARROW_TYPE_LIST: - # Set the schema for child using child_arrow_type - _check_nanoarrow( - ArrowSchemaSetType( - self.arrow_schema.children[0], - child_arrow_type - ) - ) - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - else: # primitive type array init - _check_nanoarrow( - ArrowArrayInitFromType( - self.arrow_array, - storage_type - ) - ) - _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) - _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) - - def __dealloc__(self): - if self.arrow_array != NULL: - if self.arrow_array.release != NULL: - ArrowArrayRelease(self.arrow_array) - cpython.PyMem_Free(self.arrow_array) - if self.arrow_schema != NULL: - if self.arrow_schema.release != NULL: - ArrowSchemaRelease(self.arrow_schema) - cpython.PyMem_Free(self.arrow_schema) - - def __len__(self): - return self.arrow_array.length - - def __repr__(self): - return ( - f"OracleArrowArray(name={self.name}, " - f"len={self.arrow_array.length}, " - f"type={self._schema_to_string()})" - ) - - def __str__(self): - return self.__repr__() - - cdef str _schema_to_string(self): - """ - Converts the schema to a string representation. - """ - cdef char buffer[81] - ArrowSchemaToString(self.arrow_schema, buffer, sizeof(buffer), 0) - return buffer.decode() - - cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1: - """ - Append a value of type bytes to the array. - """ - cdef ArrowBufferView data - data.data.data = ptr - data.size_bytes = num_bytes - _check_nanoarrow(ArrowArrayAppendBytes(self.arrow_array, data)) - - cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1: - """ - Append a value of type ArrowDecimal to the array - - Arrow decimals are fixed-point decimal numbers encoded as a - scaled integer. decimal128(7, 3) can exactly represent the numbers - 1234.567 and -1234.567 encoded internally as the 128-bit integers - 1234567 and -1234567, respectively. - """ - cdef: - ArrowStringView decimal_view - ArrowDecimal decimal - decimal_view.data = ptr - decimal_view.size_bytes = num_bytes - ArrowDecimalInit(&decimal, 128, self.precision, self.scale) - _check_nanoarrow(ArrowDecimalSetDigits(&decimal, decimal_view)) - _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) - - cdef int append_double(self, double value) except -1: - """ - Append a value of type double to the array. - """ - _check_nanoarrow(ArrowArrayAppendDouble(self.arrow_array, value)) - - cdef int append_float(self, float value) except -1: - """ - Append a value of type float to the array. - """ - self.append_double(value) - - cdef int append_int64(self, int64_t value) except -1: - """ - Append a value of type int64_t to the array. - """ - _check_nanoarrow(ArrowArrayAppendInt(self.arrow_array, value)) - - cdef int append_last_value(self, OracleArrowArray array) except -1: - """ - Appends the last value of the given array to this array. - """ - cdef: - int32_t start_offset, end_offset - ArrowBuffer *offsets_buffer - ArrowBuffer *data_buffer - ArrowDecimal decimal - int64_t *as_int64 - int32_t *as_int32 - double *as_double - float *as_float - int8_t as_bool - int64_t index - uint8_t *ptr - void* temp - ArrowBitmap *bitamp - if array is None: - array = self - index = array.arrow_array.length - 1 - bitmap = ArrowArrayValidityBitmap(array.arrow_array) - if bitmap != NULL and bitmap.buffer.data != NULL: - as_bool = ArrowBitGet(bitmap.buffer.data, index) - if not as_bool: - self.append_null() - return 0 - if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - as_int64 = data_buffer.data - self.append_int64(as_int64[index]) - elif array.arrow_type == NANOARROW_TYPE_DOUBLE: - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - as_double = data_buffer.data - self.append_double(as_double[index]) - elif array.arrow_type == NANOARROW_TYPE_FLOAT: - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - as_float = data_buffer.data - self.append_double(as_float[index]) - elif array.arrow_type == NANOARROW_TYPE_BOOL: - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - as_bool = ArrowBitGet(data_buffer.data, index) - self.append_int64(as_bool) - elif array.arrow_type == NANOARROW_TYPE_DECIMAL128: - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - ArrowDecimalInit(&decimal, 128, self.precision, self.scale) - ptr = data_buffer.data + index * 16 - ArrowDecimalSetBytes(&decimal, ptr) - _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, - &decimal)) - elif array.arrow_type in ( - NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_STRING - ): - offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) - data_buffer = ArrowArrayBuffer(array.arrow_array, 2) - as_int32 = offsets_buffer.data - start_offset = as_int32[index] - end_offset = as_int32[index + 1] - temp = cpython.PyMem_Malloc(end_offset - start_offset) - memcpy(temp, &data_buffer.data[start_offset], - end_offset - start_offset) - try: - self.append_bytes(temp, end_offset - start_offset) - finally: - cpython.PyMem_Free(temp) - - elif array.arrow_type in ( - NANOARROW_TYPE_LARGE_BINARY, - NANOARROW_TYPE_LARGE_STRING - ): - offsets_buffer = ArrowArrayBuffer(array.arrow_array, 1) - data_buffer = ArrowArrayBuffer(array.arrow_array, 2) - as_int64 = offsets_buffer.data - start_offset = as_int64[index] - end_offset = as_int64[index + 1] - temp = cpython.PyMem_Malloc(end_offset - start_offset) - memcpy(temp, &data_buffer.data[start_offset], - end_offset - start_offset) - try: - self.append_bytes(temp, end_offset - start_offset) - finally: - cpython.PyMem_Free(temp) - - cdef int append_null(self) except -1: - """ - Append a null value to the array. - """ - _check_nanoarrow(ArrowArrayAppendNull(self.arrow_array, 1)) - - cdef int append_vector(self, array.array value) except -1: - """ - Append a vector to the array. - """ - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: - append_float_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: - append_double_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: - append_int8_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: - append_uint8_array(self.arrow_array, value) - - cdef int append_sparse_vector(self, - int64_t num_dims, - array.array indices, - array.array values) except -1: - """ - Append a sparse vector to the array. - """ - cdef ArrowArray *array - - # validate that the array supports sparse vectors - if self.arrow_type != NANOARROW_TYPE_STRUCT: - errors._raise_err(errors.ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED) - - # append number of dimensions - array = self.arrow_array.children[0] - _check_nanoarrow(ArrowArrayAppendInt(array, num_dims)) - - # append indices array - array = self.arrow_array.children[1] - append_uint32_array(array, indices) - - # append values array - array = self.arrow_array.children[2] - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: - append_float_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: - append_double_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: - append_int8_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: - append_uint8_array(array, values) - - # indicate structure is completed - _check_nanoarrow(ArrowArrayFinishElement(self.arrow_array)) - - cdef int finish_building(self) except -1: - """ - Finish building the array. No more data will be added to it. - """ - _check_nanoarrow(ArrowArrayFinishBuildingDefault(self.arrow_array, - NULL)) - - def get_buffer_info(self): - """ - Get buffer information required by the dataframe interchange logic. - """ - cdef: - int64_t n_buffers = self.arrow_array.n_buffers - ArrowBufferView *buffer - ArrowArrayView view - _check_nanoarrow(ArrowArrayViewInitFromArray(&view, self.arrow_array)) - - # initialize all buffers to None to begin with - buffers = { - "validity": None, - "offsets": None, - "data": None - } - - # validity buffer - if n_buffers > 0 and self.arrow_array.null_count > 0: - buffer = &view.buffer_views[0] - buffers["validity"] = ( - buffer.size_bytes, - buffer.data.data - ) - - # data / offset buffer - if n_buffers == 2: - buffer = &view.buffer_views[1] - buffers["data"] = ( - buffer.size_bytes, - buffer.data.data - ) - elif n_buffers == 3: - buffer = &view.buffer_views[1] - buffers["offsets"] = ( - buffer.size_bytes, - buffer.data.data - ) - buffer = &view.buffer_views[2] - buffers["data"] = ( - buffer.size_bytes, - buffer.data.data - ) - - return buffers - - @property - def null_count(self) -> int: - return self.arrow_array.null_count - - @property - def offset(self) -> int: - return self.arrow_array.offset - - def __arrow_c_schema__(self): - """ - Export an ArrowSchema PyCapsule - """ - cdef ArrowSchema *exported_schema = \ - cpython.PyMem_Malloc(sizeof(ArrowSchema)) - try: - _check_nanoarrow( - ArrowSchemaDeepCopy( - self.arrow_schema, - exported_schema - ) - ) - except: - cpython.PyMem_Free(exported_schema) - raise - return cpython.PyCapsule_New( - exported_schema, 'arrow_schema', &pycapsule_schema_deleter - ) - - def __arrow_c_array__(self, requested_schema=None): - """ - Returns - ------- - Tuple[PyCapsule, PyCapsule] - A pair of PyCapsules containing a C ArrowSchema and ArrowArray, - respectively. - """ - if requested_schema is not None: - raise NotImplementedError("requested_schema") - cdef ArrowArray *exported_array = \ - cpython.PyMem_Malloc(sizeof(ArrowArray)) - try: - copy_arrow_array(self, self.arrow_array, exported_array) - array_capsule = cpython.PyCapsule_New( - exported_array, 'arrow_array', &pycapsule_array_deleter - ) - except: - cpython.PyMem_Free(exported_array) - raise - return self.__arrow_c_schema__(), array_capsule diff --git a/src/oracledb/interchange/protocol.py b/src/oracledb/interchange/protocol.py deleted file mode 100644 index 91739c75..00000000 --- a/src/oracledb/interchange/protocol.py +++ /dev/null @@ -1,538 +0,0 @@ -# ----------------------------------------------------------------------------- -# MIT License - -# Copyright (c) 2025 Consortium for Python Data API Standards contributors - -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: - -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. - -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# protocol.py -# -# Implement DataFrame class as documented in the standard -# https://data-apis.org/dataframe-protocol/latest/API.html -# -# The DataFrame API standard has this file with the following changes: -# https://github.com/data-apis/dataframe-api/blob/main/protocol/dataframe_protocol.py -# - addition of license and this block of comments -# - addition of DtypeKind DECIMAL (24) -# - correction of typing for Column for older versions of Python -# - Black formatting -# ----------------------------------------------------------------------------- - -from abc import ( - ABC, - abstractmethod, -) -import enum -from typing import ( - Any, - Dict, - Iterable, - Optional, - Sequence, - Tuple, - TypedDict, -) - - -class DlpackDeviceType(enum.IntEnum): - """Integer enum for device type codes matching DLPack.""" - - CPU = 1 - CUDA = 2 - CPU_PINNED = 3 - OPENCL = 4 - VULKAN = 7 - METAL = 8 - VPI = 9 - ROCM = 10 - - -class DtypeKind(enum.IntEnum): - """ - Integer enum for data types. - - Attributes - ---------- - INT : int - Matches to signed integer data type. - UINT : int - Matches to unsigned integer data type. - FLOAT : int - Matches to floating point data type. - BOOL : int - Matches to boolean data type. - STRING : int - Matches to string data type (UTF-8 encoded). - DATETIME : int - Matches to datetime data type. - CATEGORICAL : int - Matches to categorical data type. - """ - - INT = 0 - UINT = 1 - FLOAT = 2 - BOOL = 20 - STRING = 21 # UTF-8 - DATETIME = 22 - CATEGORICAL = 23 - DECIMAL = 24 - - -Dtype = Tuple[DtypeKind, int, str, str] # see Column.dtype - - -class ColumnNullType(enum.IntEnum): - """ - Integer enum for null type representation. - - Attributes - ---------- - NON_NULLABLE : int - Non-nullable column. - USE_NAN : int - Use explicit float NaN value. - USE_SENTINEL : int - Sentinel value besides NaN. - USE_BITMASK : int - The bit is set/unset representing a null on a certain position. - USE_BYTEMASK : int - The byte is set/unset representing a null on a certain position. - """ - - NON_NULLABLE = 0 - USE_NAN = 1 - USE_SENTINEL = 2 - USE_BITMASK = 3 - USE_BYTEMASK = 4 - - -class ColumnBuffers(TypedDict): - # first element is a buffer containing the column data; - # second element is the data buffer's associated dtype - data: Tuple["Buffer", Dtype] - - # first element is a buffer containing mask values indicating missing data; - # second element is the mask value buffer's associated dtype. - # None if the null representation is not a bit or byte mask - validity: Optional[Tuple["Buffer", Dtype]] - - # first element is a buffer containing the offset values for - # variable-size binary data (e.g., variable-length strings); - # second element is the offsets buffer's associated dtype. - # None if the data buffer does not have an associated offsets buffer - offsets: Optional[Tuple["Buffer", Dtype]] - - -class CategoricalDescription(TypedDict): - # whether the ordering of dictionary indices is semantically meaningful - is_ordered: bool - # whether a dictionary-style mapping of categorical values to other objects - # exists - is_dictionary: bool - # Python-level only (e.g. ``{int: str}``). - # None if not a dictionary-style categorical. - categories: Optional["Column"] - - -class Buffer(ABC): - """ - Data in the buffer is guaranteed to be contiguous in memory. - - Note that there is no dtype attribute present, a buffer can be thought of - as simply a block of memory. However, if the column that the buffer is - attached to has a dtype that's supported by DLPack and ``__dlpack__`` is - implemented, then that dtype information will be contained in the return - value from ``__dlpack__``. - - This distinction is useful to support both data exchange via DLPack on a - buffer and (b) dtypes like variable-length strings which do not have a - fixed number of bytes per element. - """ - - @property - @abstractmethod - def bufsize(self) -> int: - """ - Buffer size in bytes. - """ - pass - - @property - @abstractmethod - def ptr(self) -> int: - """ - Pointer to start of the buffer as an integer. - """ - pass - - @abstractmethod - def __dlpack__(self): - """ - Produce DLPack capsule (see array API standard). - - Raises: - - - TypeError : if the buffer contains unsupported dtypes. - - NotImplementedError : if DLPack support is not implemented - - Useful to have to connect to array libraries. Support optional because - it's not completely trivial to implement for a Python-only library. - """ - raise NotImplementedError("__dlpack__") - - @abstractmethod - def __dlpack_device__(self) -> Tuple[DlpackDeviceType, Optional[int]]: - """ - Device type and device ID for where the data in the buffer resides. - Uses device type codes matching DLPack. - Note: must be implemented even if ``__dlpack__`` is not. - """ - pass - - -class Column(ABC): - """ - A column object, with only the methods and properties required by the - interchange protocol defined. - - A column can contain one or more chunks. Each chunk can contain up to three - buffers - a data buffer, a mask buffer (depending on null representation), - and an offsets buffer (if variable-size binary; e.g., variable-length - strings). - - TBD: there's also the "chunk" concept here, which is implicit in Arrow as - multiple buffers per array (= column here). Semantically it may make - sense to have both: chunks were meant for example for lazy evaluation - of data which doesn't fit in memory, while multiple buffers per column - could also come from doing a selection operation on a single - contiguous buffer. - - Given these concepts, one would expect chunks to be all of the same - size (say a 10,000 row dataframe could have 10 chunks of 1,000 rows), - while multiple buffers could have data-dependent lengths. Not an issue - in pandas if one column is backed by a single NumPy array, but in - Arrow it seems possible. - Are multiple chunks *and* multiple buffers per column necessary for - the purposes of this interchange protocol, or must producers either - reuse the chunk concept for this or copy the data? - - Note: this Column object can only be produced by ``__dataframe__``, so - doesn't need its own version or ``__column__`` protocol. - """ - - @abstractmethod - def size(self) -> int: - """ - Size of the column, in elements. - - Corresponds to DataFrame.num_rows() if column is a single chunk; - equal to size of this current chunk otherwise. - - Is a method rather than a property because it may cause a (potentially - expensive) computation for some dataframe implementations. - """ - pass - - @property - @abstractmethod - def offset(self) -> int: - """ - Offset of first element. - - May be > 0 if using chunks; for example for a column with N chunks of - equal size M (only the last chunk may be shorter), - ``offset = n * M``, ``n = 0 .. N-1``. - """ - pass - - @property - @abstractmethod - def dtype(self) -> Dtype: - """ - Dtype description as a tuple ``(kind, bit-width, format string, - endianness)``. - - Bit-width : the number of bits as an integer - Format string : data type description format string in Apache Arrow C - Data Interface format. - Endianness : current only native endianness (``=``) is supported - - Notes: - - Kind specifiers are aligned with DLPack where possible (hence the - jump to 20, leave enough room for future extension) - - Masks must be specified as boolean with either bit width 1 (for - bit masks) or 8 (for byte masks). - - Dtype width in bits was preferred over bytes - - Endianness isn't too useful, but included now in case in the - future we need to support non-native endianness - - Went with Apache Arrow format strings over NumPy format strings - because they're more complete from a dataframe perspective - - Format strings are mostly useful for datetime specification, and - for categoricals. - - For categoricals, the format string describes the type of the - categorical in the data buffer. In case of a separate encoding of - the categorical (e.g. an integer to string mapping), this can - be derived from ``self.describe_categorical``. - - Data types not included: complex, Arrow-style null, binary, - decimal, and nested (list, struct, map, union) dtypes. - """ - pass - - @property - @abstractmethod - def describe_categorical(self) -> CategoricalDescription: - """ - If the dtype is categorical, there are two options: - - There are only values in the data buffer. - - There is a separate non-categorical Column encoding categorical - values. - - Raises TypeError if the dtype is not categorical - - Returns the dictionary with description on how to interpret the data - buffer: - - "is_ordered" : bool, whether the ordering of dictionary indices - is semantically meaningful. - - "is_dictionary" : bool, whether a mapping of - categorical values to other objects exists - - "categories" : Column representing the (implicit) mapping of - indices to category values (e.g. an array of cat1, - cat2, ...). - None if not a dictionary-style categorical. - - TBD: are there any other in-memory representations that are needed? - """ - pass - - @property - @abstractmethod - def describe_null(self) -> Tuple[ColumnNullType, Any]: - """ - Return the missing value (or "null") representation the column dtype - uses, as a tuple ``(kind, value)``. - - Value : if kind is "sentinel value", the actual value. If kind is a bit - mask or a byte mask, the value (0 or 1) indicating a missing value. - None otherwise. - """ - pass - - @property - @abstractmethod - def null_count(self) -> Optional[int]: - """ - Number of null elements, if known. - - Note: Arrow uses -1 to indicate "unknown", but None seems cleaner. - """ - pass - - @property - @abstractmethod - def metadata(self) -> Dict[str, Any]: - """ - The metadata for the column. See `DataFrame.metadata` for more details. - """ - pass - - @abstractmethod - def num_chunks(self) -> int: - """ - Return the number of chunks the column consists of. - """ - pass - - @abstractmethod - def get_chunks(self, n_chunks: Optional[int] = None) -> Iterable["Column"]: - """ - Return an iterator yielding the chunks. - - See `DataFrame.get_chunks` for details on ``n_chunks``. - """ - pass - - @abstractmethod - def get_buffers(self) -> ColumnBuffers: - """ - Return a dictionary containing the underlying buffers. - - The returned dictionary has the following contents: - - - "data": a two-element tuple whose first element is a buffer - containing the data and whose second element is the data - buffer's associated dtype. - - "validity": a two-element tuple whose first element is a buffer - containing mask values indicating missing data and - whose second element is the mask value buffer's - associated dtype. None if the null representation is - not a bit or byte mask. - - "offsets": a two-element tuple whose first element is a buffer - containing the offset values for variable-size binary - data (e.g., variable-length strings) and whose second - element is the offsets buffer's associated dtype. None - if the data buffer does not have an associated offsets - buffer. - """ - pass - - -# def get_children(self) -> Iterable[Column]: -# """ -# Children columns underneath the column, each object in this iterator -# must adhere to the column specification. -# """ -# pass - - -class DataFrame(ABC): - """ - A data frame class, with only the methods required by the interchange - protocol defined. - - A "data frame" represents an ordered collection of named columns. - A column's "name" must be a unique string. - Columns may be accessed by name or by position. - - This could be a public data frame class, or an object with the methods and - attributes defined on this DataFrame class could be returned from the - ``__dataframe__`` method of a public data frame class in a library adhering - to the dataframe interchange protocol specification. - """ - - version = 0 # version of the protocol - - @abstractmethod - def __dataframe__( - self, nan_as_null: bool = False, allow_copy: bool = True - ) -> "DataFrame": - """ - Construct a new exchange object, potentially changing the parameters. - - ``nan_as_null`` is a DEPRECATED keyword that should not be used. See - warning below. - ``allow_copy`` is a keyword that defines whether or not the library is - allowed to make a copy of the data. For example, copying data would be - necessary if a library supports strided buffers, given that this - protocol specifies contiguous buffers. - - WARNING: the ``nan_as_null`` parameter will be removed from the API - protocol. Please avoid passing it as either a positional or keyword - argument. Call this method using ``.__dataframe__(allow_copy=...)``. - """ - pass - - @property - @abstractmethod - def metadata(self) -> Dict[str, Any]: - """ - The metadata for the data frame, as a dictionary with string keys. The - contents of `metadata` may be anything, they are meant for a library - to store information that it needs to, e.g., roundtrip losslessly or - for two implementations to share data that is not (yet) part of the - interchange protocol specification. For avoiding collisions with other - entries, please add name the keys with the name of the library - followed by a period and the desired name, e.g, ``pandas.indexcol``. - """ - pass - - @abstractmethod - def num_columns(self) -> int: - """ - Return the number of columns in the DataFrame. - """ - pass - - @abstractmethod - def num_rows(self) -> Optional[int]: - # TODO: not happy with Optional, but need to flag it may be expensive - # why include it if it may be None - what do we expect consumers - # to do here? - """ - Return the number of rows in the DataFrame, if available. - """ - pass - - @abstractmethod - def num_chunks(self) -> int: - """ - Return the number of chunks the DataFrame consists of. - """ - pass - - @abstractmethod - def column_names(self) -> Iterable[str]: - """ - Return an iterator yielding the column names. - """ - pass - - @abstractmethod - def get_column(self, i: int) -> Column: - """ - Return the column at the indicated position. - """ - pass - - @abstractmethod - def get_column_by_name(self, name: str) -> Column: - """ - Return the column whose name is the indicated name. - """ - pass - - @abstractmethod - def get_columns(self) -> Iterable[Column]: - """ - Return an iterator yielding the columns. - """ - pass - - @abstractmethod - def select_columns(self, indices: Sequence[int]) -> "DataFrame": - """ - Create a new DataFrame by selecting a subset of columns by index. - """ - pass - - @abstractmethod - def select_columns_by_name(self, names: Sequence[str]) -> "DataFrame": - """ - Create a new DataFrame by selecting a subset of columns by name. - """ - pass - - @abstractmethod - def get_chunks( - self, n_chunks: Optional[int] = None - ) -> Iterable["DataFrame"]: - """ - Return an iterator yielding the chunks. - - By default (None), yields the chunks that the data is stored as by the - producer. If given, ``n_chunks`` must be a multiple of - ``self.num_chunks()``, meaning the producer must subdivide each chunk - before yielding it. - - Note that the producer must ensure that all columns are chunked the - same way. - """ - pass diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index 157a22e7..b9c48bf7 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -193,9 +193,7 @@ from .base_impl import ( DB_TYPE_XMLTYPE, ) -from .interchange.nanoarrow_bridge cimport ( - OracleArrowArray, -) +from .arrow_impl cimport ArrowArrayImpl ctypedef unsigned char char_type diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 0054f54d..ab034b1d 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -409,12 +409,6 @@ def test_8000(self): ora_df = self.conn.fetch_df_all(statement) self.assertEqual(ora_df.num_rows(), len(DATASET_1)) self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) - metadata = dict( - num_columns=ora_df.num_columns(), - num_rows=ora_df.num_rows(), - num_chunks=1, - ) - self.assertEqual(ora_df.metadata, metadata) def test_8001(self): "8001 - test conversion to external dataframe" @@ -493,37 +487,19 @@ def test_8013(self): ora_df.get_column_by_name("missing_column") def test_8014(self): - "8014 - check size and null count with no nulls" - self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - col = ora_df.get_column(0) - self.assertEqual(col.size(), len(DATASET_1)) - self.assertEqual(col.null_count, 0) - - def test_8015(self): - "8015 - check size and null count with nulls present" - self.__populate_table(DATASET_2) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - col = ora_df.get_column_by_name("SALARY") - self.assertEqual(col.size(), len(DATASET_2)) - self.assertEqual(col.null_count, 2) - - def test_8016(self): - "8016 - check unsupported error" + "8014 - check unsupported error" statement = "select cursor(select user from dual) from dual" with self.assertRaisesFullCode("DPY-3030"): self.conn.fetch_df_all(statement) - def test_8017(self): - "8017 - batches with specification of size matching number of rows" + def test_8015(self): + "8015 - batches with specification of size matching number of rows" self.__test_df_batches_interop( DATASET_2, batch_size=len(DATASET_2), num_batches=1 ) - def test_8018(self): - "8018 - verify get_column() returns the correct value" + def test_8016(self): + "8016 - verify get_column() returns the correct value" self.__check_interop() self.__populate_table(DATASET_1) statement = "select * from TestDataFrame order by Id" @@ -531,47 +507,12 @@ def test_8018(self): array = pyarrow.array(ora_df.get_column(1)) self.assertEqual(array.to_pylist(), ["John", "Big"]) - def test_8019(self): - "8019 - verify OracleColumn and get_buffers" - self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - ora_col = ora_df.get_column(1) - self.assertEqual(ora_col.num_chunks(), 1) - self.assertEqual(ora_col.size(), 2) - - buffers = ora_col.get_buffers() - self.assertEqual(len(buffers), 3) - self.assertIsNotNone(buffers["data"]) - self.assertIsNotNone(buffers["offsets"]) - self.assertIsNone(buffers["validity"]) - - def test_8020(self): - "8020 - verify OracleColumn Attributes" - self.__populate_table(DATASET_2) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.describe_null[0], 0) - self.assertEqual(ora_col.dtype[0], 0) - metadata = {"name": "ID", "size": 4, "num_chunks": 1} - self.assertEqual(metadata, ora_col.metadata) - self.assertEqual(ora_col.null_count, 0) - - ora_col = ora_df.get_column(4) - self.assertEqual(ora_col.describe_null[0], 3) - self.assertEqual(ora_col.dtype[0], 21) - metadata = {"name": "COUNTRY", "size": 4, "num_chunks": 1} - self.assertEqual(metadata, ora_col.metadata) - self.assertEqual(ora_col.null_count, 2) - - def test_8021(self): - "8021 - batches with size that has duplicate rows across batches" + def test_8017(self): + "8017 - batches with size that has duplicate rows across batches" self.__test_df_batches_interop(DATASET_4, batch_size=3, num_batches=2) - def test_8022(self): - "8022 - fetch_decimals without precision and scale specified" + def test_8018(self): + "8018 - fetch_decimals without precision and scale specified" data = [(1.0,)] self.__check_interop() with test_env.DefaultsContextManager("fetch_decimals", True): @@ -583,8 +524,8 @@ def test_8022(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - def test_8023(self): - "8023 - fetch clob" + def test_8019(self): + "8019 - fetch clob" data = [("test_8023",)] self.__check_interop() ora_df = self.conn.fetch_df_all( @@ -597,8 +538,8 @@ def test_8023(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - def test_8024(self): - "8024 - fetch blob" + def test_8020(self): + "8020 - fetch blob" data = [(b"test_8024",)] self.__check_interop() ora_df = self.conn.fetch_df_all( @@ -611,8 +552,8 @@ def test_8024(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - def test_8025(self): - "8025 - fetch raw" + def test_8021(self): + "8021 - fetch raw" data = [(b"test_8025",)] self.__check_interop() ora_df = self.conn.fetch_df_all( @@ -626,8 +567,8 @@ def test_8025(self): self.assertEqual(fetched_data, data) @test_env.skip_unless_native_boolean_supported() - def test_8026(self): - "8026 - fetch boolean" + def test_8022(self): + "8022 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] self.__check_interop() ora_df = self.conn.fetch_df_all( @@ -650,8 +591,8 @@ def test_8026(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - def test_8027(self): - "8027 - fetch data with multiple rows containing null values" + def test_8023(self): + "8023 - fetch data with multiple rows containing null values" self.__check_interop() ora_df = self.conn.fetch_df_all( """ @@ -692,82 +633,9 @@ def test_8027(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - def test_8028(self): - "8028 - verify dtype for all Arrow types" - query = """ - select - cast(1 as number(10)) as col_int64, - cast(1.23 as binary_double) as col_double, - cast(7.14 as binary_float) as col_float, - cast('abcd' as varchar2(10)) as col_string, - cast('efgh' as nvarchar2(6)) as col_nstring, - cast('ijkl' as char(4)) as col_char, - cast('mnop' as nchar(4)) as col_nchar, - cast(systimestamp as timestamp(0)) as col_ts_sec, - cast(systimestamp as timestamp(3)) as col_ts_ms, - cast(systimestamp as timestamp(6)) as col_ts_us, - cast(systimestamp as timestamp(9)) as col_ts_ns, - to_clob('abc') as col_large_string, - to_nclob('def') as col_large_nstring, - utl_raw.cast_to_raw('abc2') as col_binary, - to_blob(utl_raw.cast_to_raw('abc3')) as col_large_binary - from dual - """ - decimal_query = ( - "select cast(123.45 as decimal(10, 2)) as col_decimal128 from dual" - ) - - # determine dtype kind enumeration - ora_df = self.conn.fetch_df_all("select user from dual") - col = ora_df.get_column(0) - dtype_kind = type(col.dtype[0]) - - expected_dtypes = { - "COL_INT64": (dtype_kind.INT, 64, "l", "="), - "COL_DOUBLE": (dtype_kind.FLOAT, 64, "g", "="), - "COL_FLOAT": (dtype_kind.FLOAT, 64, "g", "="), - "COL_STRING": (dtype_kind.STRING, 8, "u", "="), - "COL_NSTRING": (dtype_kind.STRING, 8, "u", "="), - "COL_CHAR": (dtype_kind.STRING, 8, "u", "="), - "COL_NCHAR": (dtype_kind.STRING, 8, "u", "="), - "COL_TS_SEC": (dtype_kind.DATETIME, 64, "tss:", "="), - "COL_TS_MS": (dtype_kind.DATETIME, 64, "tsm:", "="), - "COL_TS_US": (dtype_kind.DATETIME, 64, "tsu:", "="), - "COL_TS_NS": (dtype_kind.DATETIME, 64, "tsn:", "="), - "COL_LARGE_STRING": (dtype_kind.STRING, 8, "U", "="), - "COL_LARGE_NSTRING": (dtype_kind.STRING, 8, "U", "="), - "COL_BINARY": (dtype_kind.STRING, 8, "z", "="), - "COL_LARGE_BINARY": (dtype_kind.STRING, 8, "Z", "="), - "COL_DECIMAL128": (dtype_kind.DECIMAL, 128, "d:10.2", "="), - } - - # check query without fetch_decimals enabled - ora_df = self.conn.fetch_df_all(query) - for i, name in enumerate(ora_df.column_names()): - col = ora_df.get_column(i) - self.assertEqual(col.dtype, expected_dtypes[name]) - - # check query with fetch_decimals enabled - with test_env.DefaultsContextManager("fetch_decimals", True): - ora_df = self.conn.fetch_df_all(decimal_query) - col = ora_df.get_column(0) - self.assertEqual(col.dtype, expected_dtypes["COL_DECIMAL128"]) - - def test_8029(self): - "8029 - verify get_buffers() with data frames containing null values" - self.__populate_table(DATASET_2) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - country_col = ora_df.get_column_by_name("COUNTRY") - buffers = country_col.get_buffers() - self.assertEqual(len(buffers), 3) - self.assertIsNotNone(buffers["data"]) - self.assertIsNotNone(buffers["offsets"]) - self.assertIsNotNone(buffers["validity"]) - @test_env.skip_unless_vectors_supported() - def test_8030(self): - "8030 - fetch float32 vector" + def test_8024(self): + "8024 - fetch float32 vector" # float32 is a special case while comparing dataframe values # Converting Dataframe cell value of type numpy.ndarray[float32] @@ -789,8 +657,6 @@ def test_8030(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -800,8 +666,8 @@ def test_8030(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8031(self): - "8031 - fetch float64 vector" + def test_8025(self): + "8025 - fetch float64 vector" data = [ ([34.6, 77.8],), ([34.6, 77.8, 55.9],), @@ -816,8 +682,6 @@ def test_8031(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -826,8 +690,8 @@ def test_8031(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8032(self): - "8032 - fetch int8 vector" + def test_8026(self): + "8026 - fetch int8 vector" data = [ ([34, -77],), ([34, 77, 55],), @@ -842,8 +706,6 @@ def test_8032(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -852,8 +714,8 @@ def test_8032(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8033(self): - "8033 - fetch binary vector" + def test_8027(self): + "8027 - fetch binary vector" data = [ ([3, 2, 3],), ([3, 2],), @@ -868,8 +730,6 @@ def test_8033(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -878,8 +738,8 @@ def test_8033(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8034(self): - "8034 - fetch float32 vectors with None" + def test_8028(self): + "8028 - fetch float32 vectors with None" data = [ (array.array("f", [34.6, 77.8]).tolist(),), (array.array("f", [34.6, 77.8, 55.9]).tolist(),), @@ -897,8 +757,6 @@ def test_8034(self): ) self.assertEqual(ora_df.num_rows(), 3) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 1) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -907,8 +765,8 @@ def test_8034(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8035(self): - "8035 - fetch duplicate float64 vectors" + def test_8029(self): + "8029 - fetch duplicate float64 vectors" data = [ ([34.6, 77.8],), ([34.6, 77.8],), @@ -953,8 +811,6 @@ def test_8035(self): ) self.assertEqual(ora_df.num_rows(), 12) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -963,8 +819,8 @@ def test_8035(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() - def test_8036(self): - "8036 - fetch float32 sparse vectors" + def test_8030(self): + "8030 - fetch float32 sparse vectors" data = [ ( { @@ -1001,8 +857,6 @@ def test_8036(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -1012,8 +866,8 @@ def test_8036(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() - def test_8037(self): - "8037 - fetch float64 sparse vectors" + def test_8031(self): + "8031 - fetch float64 sparse vectors" data = [ ( { @@ -1050,8 +904,6 @@ def test_8037(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -1061,8 +913,8 @@ def test_8037(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() - def test_8038(self): - "8038 - DPY-3031 - Unsupported flexible vector formats" + def test_8032(self): + "8032 - DPY-3031 - Unsupported flexible vector formats" with self.assertRaisesFullCode("DPY-3031"): self.conn.fetch_df_all( """ @@ -1073,8 +925,8 @@ def test_8038(self): ) @test_env.skip_unless_sparse_vectors_supported() - def test_8039(self): - "8039 - DPY-4007 -fetch sparse vectors with flexible dimensions" + def test_8033(self): + "8033 - DPY-4007 -fetch sparse vectors with flexible dimensions" self.__check_interop() with self.assertRaisesFullCode("DPY-2065"): self.conn.fetch_df_all( diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 964a5d5a..2592ac10 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -415,12 +415,6 @@ async def test_8100(self): ora_df = await self.conn.fetch_df_all(statement) self.assertEqual(ora_df.num_rows(), len(DATASET_1)) self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) - metadata = dict( - num_columns=ora_df.num_columns(), - num_rows=ora_df.num_rows(), - num_chunks=1, - ) - self.assertEqual(ora_df.metadata, metadata) async def test_8101(self): "8101 - test conversion to external dataframe" @@ -501,43 +495,25 @@ async def test_8113(self): ora_df.get_column_by_name("missing_column") async def test_8114(self): - "8114 - check size and null count with no nulls" - await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) - col = ora_df.get_column(0) - self.assertEqual(col.size(), len(DATASET_1)) - self.assertEqual(col.null_count, 0) - - async def test_8115(self): - "8115 - check size and null count with nulls present" - await self.__populate_table(DATASET_2) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) - col = ora_df.get_column_by_name("SALARY") - self.assertEqual(col.size(), len(DATASET_2)) - self.assertEqual(col.null_count, 2) - - async def test_8116(self): - "8116 - check unsupported error" + "8114 - check unsupported error" statement = "select cursor(select user from dual) from dual" with self.assertRaisesFullCode("DPY-3030"): await self.conn.fetch_df_all(statement) - async def test_8117(self): - "8117 - batches with specification of size matching number of rows" + async def test_8115(self): + "8115 - batches with specification of size matching number of rows" await self.__test_df_batches_interop( DATASET_2, batch_size=len(DATASET_2), num_batches=1 ) - async def test_8118(self): - "8118 - batches with size that has duplicate rows across batches" + async def test_8116(self): + "8116 - batches with size that has duplicate rows across batches" await self.__test_df_batches_interop( DATASET_4, batch_size=3, num_batches=2 ) - async def test_8119(self): - "8119 - fetch_decimals without precision and scale specified" + async def test_8117(self): + "8117 - fetch_decimals without precision and scale specified" data = [(1.0,)] self.__check_interop() with test_env.DefaultsContextManager("fetch_decimals", True): @@ -549,8 +525,8 @@ async def test_8119(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - async def test_8120(self): - "8120 - fetch clob" + async def test_8118(self): + "8118 - fetch clob" data = [("test_8023",)] self.__check_interop() ora_df = await self.conn.fetch_df_all( @@ -563,8 +539,8 @@ async def test_8120(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - async def test_8121(self): - "8121 - fetch blob" + async def test_8119(self): + "8119 - fetch blob" data = [(b"test_8024",)] self.__check_interop() ora_df = await self.conn.fetch_df_all( @@ -578,8 +554,8 @@ async def test_8121(self): self.assertEqual(fetched_data, data) @test_env.skip_unless_native_boolean_supported() - async def test_8122(self): - "8122 - fetch boolean" + async def test_8120(self): + "8120 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] self.__check_interop() ora_df = await self.conn.fetch_df_all( @@ -603,8 +579,8 @@ async def test_8122(self): self.assertEqual(fetched_data, data) @test_env.skip_unless_vectors_supported() - async def test_8123(self): - "8123 - fetch float32 vector" + async def test_8121(self): + "8121 - fetch float32 vector" data = [ (array.array("f", [34.6, 77.8]).tolist(),), (array.array("f", [34.6, 77.8, 55.9]).tolist(),), @@ -619,8 +595,6 @@ async def test_8123(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -631,8 +605,8 @@ async def test_8123(self): self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() - async def test_8124(self): - "8124 - fetch float64 sparse vectors" + async def test_8122(self): + "8122 - fetch float64 sparse vectors" data = [ ( { @@ -669,8 +643,6 @@ async def test_8124(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - ora_col = ora_df.get_column(0) - self.assertEqual(ora_col.null_count, 0) fetched_tab = pyarrow.Table.from_arrays( ora_df.column_arrays(), names=ora_df.column_names() ) @@ -679,8 +651,8 @@ async def test_8124(self): fetched_df = fetched_tab.to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) - async def test_8125(self): - "8125 - fetch data with multiple rows containing null values" + async def test_8123(self): + "8123 - fetch data with multiple rows containing null values" self.__check_interop() ora_df = await self.conn.fetch_df_all( """ @@ -721,67 +693,6 @@ async def test_8125(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) - async def test_8126(self): - "8126 - verify dtype for all Arrow types" - query = """ - select - cast(1 as number(10)) as col_int64, - cast(1.23 as binary_double) as col_double, - cast(7.14 as binary_float) as col_float, - cast('abcd' as varchar2(10)) as col_string, - cast('efgh' as nvarchar2(6)) as col_nstring, - cast('ijkl' as char(4)) as col_char, - cast('mnop' as nchar(4)) as col_nchar, - cast(systimestamp as timestamp(0)) as col_ts_sec, - cast(systimestamp as timestamp(3)) as col_ts_ms, - cast(systimestamp as timestamp(6)) as col_ts_us, - cast(systimestamp as timestamp(9)) as col_ts_ns, - to_clob('abc') as col_large_string, - to_nclob('def') as col_large_nstring, - utl_raw.cast_to_raw('abc2') as col_binary, - to_blob(utl_raw.cast_to_raw('abc3')) as col_large_binary - from dual - """ - decimal_query = ( - "select cast(123.45 as decimal(10, 2)) as col_decimal128 from dual" - ) - - # determine dtype kind enumeration - ora_df = await self.conn.fetch_df_all("select user from dual") - col = ora_df.get_column(0) - dtype_kind = type(col.dtype[0]) - - expected_dtypes = { - "COL_INT64": (dtype_kind.INT, 64, "l", "="), - "COL_DOUBLE": (dtype_kind.FLOAT, 64, "g", "="), - "COL_FLOAT": (dtype_kind.FLOAT, 64, "g", "="), - "COL_STRING": (dtype_kind.STRING, 8, "u", "="), - "COL_NSTRING": (dtype_kind.STRING, 8, "u", "="), - "COL_CHAR": (dtype_kind.STRING, 8, "u", "="), - "COL_NCHAR": (dtype_kind.STRING, 8, "u", "="), - "COL_TS_SEC": (dtype_kind.DATETIME, 64, "tss:", "="), - "COL_TS_MS": (dtype_kind.DATETIME, 64, "tsm:", "="), - "COL_TS_US": (dtype_kind.DATETIME, 64, "tsu:", "="), - "COL_TS_NS": (dtype_kind.DATETIME, 64, "tsn:", "="), - "COL_LARGE_STRING": (dtype_kind.STRING, 8, "U", "="), - "COL_LARGE_NSTRING": (dtype_kind.STRING, 8, "U", "="), - "COL_BINARY": (dtype_kind.STRING, 8, "z", "="), - "COL_LARGE_BINARY": (dtype_kind.STRING, 8, "Z", "="), - "COL_DECIMAL128": (dtype_kind.DECIMAL, 128, "d:10.2", "="), - } - - # check query without fetch_decimals enabled - ora_df = await self.conn.fetch_df_all(query) - for i, name in enumerate(ora_df.column_names()): - col = ora_df.get_column(i) - self.assertEqual(col.dtype, expected_dtypes[name]) - - # check query with fetch_decimals enabled - with test_env.DefaultsContextManager("fetch_decimals", True): - ora_df = await self.conn.fetch_df_all(decimal_query) - col = ora_df.get_column(0) - self.assertEqual(col.dtype, expected_dtypes["COL_DECIMAL128"]) - if __name__ == "__main__": test_env.run_test_cases() From a0d7ae91bac1611c35f3ae7536f1fc1897f66c44 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:50:09 -0600 Subject: [PATCH 130/239] Add Instance Principal Authentication section. --- doc/src/user_guide/connection_handling.rst | 105 ++++++++++++++++++++- 1 file changed, 104 insertions(+), 1 deletion(-) diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 542c8240..133c6742 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -4621,7 +4621,7 @@ the following table. With Simple Authentication, the individual configuration parameters can be provided at runtime. - With Instance Principal Authentication, OCI compute instances can be authorized to access services on Oracle Cloud such as Oracle Autonomous Database. Python-oracledb applications running on such a compute instance are automatically authenticated, eliminating the need to provide database user credentials. This authentication method will only work on compute instances where internal network endpoints are reachable. For more information on OCI compute instances, see `OCI Compute Instances `__, `Creating a Compute Instance `__, and `Calling Services from a Compute Instance `__. + With Instance Principal Authentication, OCI compute instances can be authorized to access services on Oracle Cloud such as Oracle Autonomous Database. Python-oracledb applications running on such a compute instance are automatically authenticated, eliminating the need to provide database user credentials. This authentication method will only work on compute instances where internal network endpoints are reachable. See :ref:`instanceprincipalauth`. See `OCI SDK Authentication Methods `__ for more information. - Required @@ -4790,6 +4790,109 @@ to explicitly set the ``extra_auth_params`` and ``externalauth`` parameters of dsn=mydb_low, extra_auth_params=token_based_auth) +.. _instanceprincipalauth: + +Instance Principal Authentication +================================= + +With Instance Principal Authentication, Oracle Cloud Infrastructure (OCI) +compute instances can be authorized to access services on Oracle Cloud such as +Oracle Autonomous Database. Python-oracledb applications running on such a +compute instance do not need to provide database user credentials. + +Each compute instance behaves as a distinct type of Identity and Access +Management (IAM) Principal, that is, each compute instance has a unique +identity in the form of a digital certificate which is managed by OCI. When +using Instance Principal Authentication, a compute instance authenticates with +OCI IAM using this identity and obtains a short-lived token. This token is +then used to access Oracle Cloud services without storing or managing any +secrets in your application. + +The example below demonstrates how to connect to Oracle Autonomous +Database using Instance Principal authentication. To enable this, use +python-oracledb's :ref:`oci_tokens ` plugin which +is pre-installed with the ``oracledb`` module. + +**Step 1: Create an OCI Compute Instance** + +An `OCI compute instance `__ is a virtual machine running +within OCI that provides compute resources for your application. This compute +instance will be used to authenticate access to Oracle Cloud services when +using Instance Principal Authentication. + +To create an OCI compute instance, see the steps in `Creating an Instance +`__ section of the Oracle Cloud Infrastructure +documentation. + +For more information on OCI compute instances, see `Calling Services from a +Compute Instance `__. + +**Step 2: Install the OCI CLI on your compute instance** + +The `OCI Command Line Interface (CLI) `__ that can be used on its own or with +the Oracle Cloud console to complete OCI tasks. + +To install the OCI CLI on your compute instance, see the installation +instructions in the `Installing the CLI `__ section of Oracle Cloud Infrastructure +documentation. + +**Step 3: Create a Dynamic Group** + +A Dynamic Group is used to define rules to group the compute instances that +require access. + +To create a dynamic group using the Oracle Cloud console, see the steps in the +`To create a dynamic group `__ section of the Oracle Cloud +Infrastructure documentation. + +**Step 4: Create an IAM Policy** + +An IAM Policy is used to grant a dynamic group permission to access the +required OCI services such as Oracle Autonomous Database. + +To create an IAM policy using Oracle Cloud console, see the steps in the +`Create an IAM Policy `__ section of the Oracle Cloud +Infrastructure documentation. + +**Step 5: Map an Instance Principal to an Oracle Database User** + +You must map the Instance Principal to an Oracle Database user. For more +information, see `Accessing the Database Using an Instance Principal +`__. + +Also, make sure that external authentication is enabled on Oracle ADB and +Oracle Database parameter ``IDENTITY_PROVIDER_TYPE`` is set to *OCI_IAM*. For +the steps, see `Enable IAM Authentication on ADB `__. + +**Step 6: Deploy your application on the Compute Instance** + +To use Instance Principal authentication, set ``extra_auth_params`` when +creating a standalone connection or a connection pool, for example: + +.. code-block:: python + + import oracledb + import oracledb.plugins.oci_tokens + + token_based_auth = { + "auth_type": "InstancePrincipal" + } + + connection = oracledb.connect( + dsn=mydb_low, + extra_auth_params=token_based_auth + ) + Privileged Connections ====================== From f9c435b965e329c7d5d9a58ada5538f35b38cdbf Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:50:28 -0600 Subject: [PATCH 131/239] Upgraded nanoarrow version to 0.7.0. --- doc/src/release_notes.rst | 1 + src/oracledb/impl/arrow/nanoarrow/nanoarrow.c | 251 +++++++++- src/oracledb/impl/arrow/nanoarrow/nanoarrow.h | 447 ++++++++++++------ 3 files changed, 538 insertions(+), 161 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 44eb4d57..75de4f01 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -40,6 +40,7 @@ Common Changes - Documentation on methods and attributes on the ``DataFrame`` and ``ArrowArray`` objects are now available in Python plugins such as those found in VS Code + - Upgraded Arrow C Data (nanoarrow) API version to 0.7.0 Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version diff --git a/src/oracledb/impl/arrow/nanoarrow/nanoarrow.c b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.c index 8f265988..80b79eee 100644 --- a/src/oracledb/impl/arrow/nanoarrow/nanoarrow.c +++ b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.c @@ -111,6 +111,7 @@ void ArrowLayoutInit(struct ArrowLayout* layout, enum ArrowType storage_type) { case NANOARROW_TYPE_UINT32: case NANOARROW_TYPE_INT32: case NANOARROW_TYPE_FLOAT: + case NANOARROW_TYPE_DECIMAL32: layout->element_size_bits[1] = 32; break; case NANOARROW_TYPE_INTERVAL_MONTHS: @@ -122,6 +123,7 @@ void ArrowLayoutInit(struct ArrowLayout* layout, enum ArrowType storage_type) { case NANOARROW_TYPE_INT64: case NANOARROW_TYPE_DOUBLE: case NANOARROW_TYPE_INTERVAL_DAY_TIME: + case NANOARROW_TYPE_DECIMAL64: layout->element_size_bits[1] = 64; break; @@ -188,6 +190,24 @@ void ArrowLayoutInit(struct ArrowLayout* layout, enum ArrowType storage_type) { layout->buffer_type[1] = NANOARROW_BUFFER_TYPE_DATA; layout->buffer_data_type[1] = NANOARROW_TYPE_STRING_VIEW; layout->element_size_bits[1] = 128; + break; + + case NANOARROW_TYPE_LIST_VIEW: + layout->buffer_type[1] = NANOARROW_BUFFER_TYPE_VIEW_OFFSET; + layout->buffer_data_type[1] = NANOARROW_TYPE_INT32; + layout->element_size_bits[1] = 32; + layout->buffer_type[2] = NANOARROW_BUFFER_TYPE_SIZE; + layout->buffer_data_type[2] = NANOARROW_TYPE_INT32; + layout->element_size_bits[2] = 32; + break; + case NANOARROW_TYPE_LARGE_LIST_VIEW: + layout->buffer_type[1] = NANOARROW_BUFFER_TYPE_VIEW_OFFSET; + layout->buffer_data_type[1] = NANOARROW_TYPE_INT64; + layout->element_size_bits[1] = 64; + layout->buffer_type[2] = NANOARROW_BUFFER_TYPE_SIZE; + layout->buffer_data_type[2] = NANOARROW_TYPE_INT64; + layout->element_size_bits[2] = 64; + break; default: break; @@ -326,13 +346,14 @@ ArrowErrorCode ArrowDecimalSetDigits(struct ArrowDecimal* decimal, // Use 32-bit words for portability uint32_t words32[8]; - int n_words32 = decimal->n_words * 2; + memset(words32, 0, sizeof(words32)); + int n_words32 = decimal->n_words > 0 ? decimal->n_words * 2 : 1; NANOARROW_DCHECK(n_words32 <= 8); memset(words32, 0, sizeof(words32)); ShiftAndAdd(value, words32, n_words32); - if (decimal->low_word_index == 0) { + if (_ArrowIsLittleEndian() || n_words32 == 1) { memcpy(decimal->words, words32, sizeof(uint32_t) * n_words32); } else { uint64_t lo; @@ -356,11 +377,31 @@ ArrowErrorCode ArrowDecimalSetDigits(struct ArrowDecimal* decimal, // https://github.com/apache/arrow/blob/cd3321b28b0c9703e5d7105d6146c1270bbadd7f/cpp/src/arrow/util/decimal.cc#L365 ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const struct ArrowDecimal* decimal, struct ArrowBuffer* buffer) { - NANOARROW_DCHECK(decimal->n_words == 2 || decimal->n_words == 4); + NANOARROW_DCHECK(decimal->n_words == 0 || decimal->n_words == 1 || + decimal->n_words == 2 || decimal->n_words == 4); + + // For the 32-bit case, just use snprintf() + if (decimal->n_words == 0) { + int32_t value; + memcpy(&value, decimal->words, sizeof(int32_t)); + NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(buffer, 16)); + int n_chars = snprintf((char*)buffer->data + buffer->size_bytes, + (buffer->capacity_bytes - buffer->size_bytes), "%d", value); + if (n_chars <= 0) { + return EINVAL; + } + + buffer->size_bytes += n_chars; + return NANOARROW_OK; + } + int is_negative = ArrowDecimalSign(decimal) < 0; uint64_t words_little_endian[4]; - if (decimal->low_word_index == 0) { + if (decimal->n_words == 0) { + words_little_endian[0] = 0; + memcpy(words_little_endian, decimal->words, sizeof(uint32_t)); + } else if (decimal->low_word_index == 0) { memcpy(words_little_endian, decimal->words, decimal->n_words * sizeof(uint64_t)); } else { for (int i = 0; i < decimal->n_words; i++) { @@ -370,21 +411,33 @@ ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const struct ArrowDecimal* decim // We've already made a copy, so negate that if needed if (is_negative) { - uint64_t carry = 1; - for (int i = 0; i < decimal->n_words; i++) { - uint64_t elem = words_little_endian[i]; - elem = ~elem + carry; - carry &= (elem == 0); - words_little_endian[i] = elem; + if (decimal->n_words == 0) { + uint32_t elem = (uint32_t)words_little_endian[0]; + elem = ~elem + 1; + words_little_endian[0] = (int32_t)elem; + } else { + uint64_t carry = 1; + for (int i = 0; i < decimal->n_words; i++) { + uint64_t elem = words_little_endian[i]; + elem = ~elem + carry; + carry &= (elem == 0); + words_little_endian[i] = elem; + } } } // Find the most significant word that is non-zero int most_significant_elem_idx = -1; - for (int i = decimal->n_words - 1; i >= 0; i--) { - if (words_little_endian[i] != 0) { - most_significant_elem_idx = i; - break; + if (decimal->n_words == 0) { + if (words_little_endian[0] != 0) { + most_significant_elem_idx = 0; + } + } else { + for (int i = decimal->n_words - 1; i >= 0; i--) { + if (words_little_endian[i] != 0) { + most_significant_elem_idx = i; + break; + } } } @@ -462,6 +515,50 @@ ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const struct ArrowDecimal* decim return NANOARROW_OK; } + +ArrowErrorCode ArrowDecimalAppendStringToBuffer(const struct ArrowDecimal* decimal, + struct ArrowBuffer* buffer) { + int64_t buffer_size = buffer->size_bytes; + NANOARROW_RETURN_NOT_OK(ArrowDecimalAppendDigitsToBuffer(decimal, buffer)); + int64_t digits_size = buffer->size_bytes - buffer_size; + + if (decimal->scale <= 0) { + // e.g., digits are -12345 and scale is -2 -> -1234500 + // Just add zeros to the end + for (int i = decimal->scale; i < 0; i++) { + NANOARROW_RETURN_NOT_OK(ArrowBufferAppendInt8(buffer, '0')); + } + return NANOARROW_OK; + } + + int is_negative = buffer->data[0] == '-'; + int64_t num_digits = digits_size - is_negative; + if (num_digits <= decimal->scale) { + // e.g., digits are -12345 and scale is 6 -> -0.012345 + // Insert "0." between the (maybe) negative sign and the digits + int64_t num_zeros_after_decimal = decimal->scale - num_digits; + NANOARROW_RETURN_NOT_OK( + ArrowBufferResize(buffer, buffer->size_bytes + num_zeros_after_decimal + 2, 0)); + + uint8_t* digits_start = buffer->data + is_negative; + memmove(digits_start + num_zeros_after_decimal + 2, digits_start, num_digits); + *digits_start++ = '0'; + *digits_start++ = '.'; + for (int i = 0; i < num_zeros_after_decimal; i++) { + *digits_start++ = '0'; + } + + } else { + // e.g., digits are -12345 and scale is 4 -> -1.2345 + // Insert a decimal point before scale digits of output + NANOARROW_RETURN_NOT_OK(ArrowBufferResize(buffer, buffer->size_bytes + 1, 0)); + uint8_t* decimal_point_to_be = buffer->data + buffer->size_bytes - 1 - decimal->scale; + memmove(decimal_point_to_be + 1, decimal_point_to_be, decimal->scale); + *decimal_point_to_be = '.'; + } + + return NANOARROW_OK; +} // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information @@ -589,6 +686,10 @@ static const char* ArrowSchemaFormatTemplate(enum ArrowType type) { return "+l"; case NANOARROW_TYPE_LARGE_LIST: return "+L"; + case NANOARROW_TYPE_LIST_VIEW: + return "+vl"; + case NANOARROW_TYPE_LARGE_LIST_VIEW: + return "+vL"; case NANOARROW_TYPE_STRUCT: return "+s"; case NANOARROW_TYPE_MAP: @@ -607,6 +708,8 @@ static int ArrowSchemaInitChildrenIfNeeded(struct ArrowSchema* schema, case NANOARROW_TYPE_LIST: case NANOARROW_TYPE_LARGE_LIST: case NANOARROW_TYPE_FIXED_SIZE_LIST: + case NANOARROW_TYPE_LIST_VIEW: + case NANOARROW_TYPE_LARGE_LIST_VIEW: NANOARROW_RETURN_NOT_OK(ArrowSchemaAllocateChildren(schema, 1)); ArrowSchemaInit(schema->children[0]); NANOARROW_RETURN_NOT_OK(ArrowSchemaSetName(schema->children[0], "item")); @@ -735,11 +838,35 @@ ArrowErrorCode ArrowSchemaSetTypeDecimal(struct ArrowSchema* schema, enum ArrowT char buffer[64]; int n_chars; switch (type) { + case NANOARROW_TYPE_DECIMAL32: + if (decimal_precision > 9) { + return EINVAL; + } + + n_chars = snprintf(buffer, sizeof(buffer), "d:%d,%d,32", decimal_precision, + decimal_scale); + break; + case NANOARROW_TYPE_DECIMAL64: + if (decimal_precision > 18) { + return EINVAL; + } + + n_chars = snprintf(buffer, sizeof(buffer), "d:%d,%d,64", decimal_precision, + decimal_scale); + break; case NANOARROW_TYPE_DECIMAL128: + if (decimal_precision > 38) { + return EINVAL; + } + n_chars = snprintf(buffer, sizeof(buffer), "d:%d,%d", decimal_precision, decimal_scale); break; case NANOARROW_TYPE_DECIMAL256: + if (decimal_precision > 76) { + return EINVAL; + } + n_chars = snprintf(buffer, sizeof(buffer), "d:%d,%d,256", decimal_precision, decimal_scale); break; @@ -1185,6 +1312,12 @@ static ArrowErrorCode ArrowSchemaViewParse(struct ArrowSchemaView* schema_view, *format_end_out = parse_end; switch (schema_view->decimal_bitwidth) { + case 32: + ArrowSchemaViewSetPrimitive(schema_view, NANOARROW_TYPE_DECIMAL32); + return NANOARROW_OK; + case 64: + ArrowSchemaViewSetPrimitive(schema_view, NANOARROW_TYPE_DECIMAL64); + return NANOARROW_OK; case 128: ArrowSchemaViewSetPrimitive(schema_view, NANOARROW_TYPE_DECIMAL128); return NANOARROW_OK; @@ -1321,6 +1454,24 @@ static ArrowErrorCode ArrowSchemaViewParse(struct ArrowSchemaView* schema_view, return EINVAL; } + // views + case 'v': + switch (format[2]) { + case 'l': + schema_view->storage_type = NANOARROW_TYPE_LIST_VIEW; + schema_view->type = NANOARROW_TYPE_LIST_VIEW; + *format_end_out = format + 3; + return NANOARROW_OK; + case 'L': + schema_view->storage_type = NANOARROW_TYPE_LARGE_LIST_VIEW; + schema_view->type = NANOARROW_TYPE_LARGE_LIST_VIEW; + *format_end_out = format + 3; + return NANOARROW_OK; + default: + ArrowErrorSet( + error, "Expected view format string +vl or +vL but found '%s'", format); + return EINVAL; + } default: ArrowErrorSet(error, "Expected nested type format string but found '%s'", format); @@ -1621,6 +1772,8 @@ static ArrowErrorCode ArrowSchemaViewValidate(struct ArrowSchemaView* schema_vie case NANOARROW_TYPE_HALF_FLOAT: case NANOARROW_TYPE_FLOAT: case NANOARROW_TYPE_DOUBLE: + case NANOARROW_TYPE_DECIMAL32: + case NANOARROW_TYPE_DECIMAL64: case NANOARROW_TYPE_DECIMAL128: case NANOARROW_TYPE_DECIMAL256: case NANOARROW_TYPE_STRING: @@ -1649,7 +1802,9 @@ static ArrowErrorCode ArrowSchemaViewValidate(struct ArrowSchemaView* schema_vie return ArrowSchemaViewValidateNChildren(schema_view, 0, error); case NANOARROW_TYPE_LIST: + case NANOARROW_TYPE_LIST_VIEW: case NANOARROW_TYPE_LARGE_LIST: + case NANOARROW_TYPE_LARGE_LIST_VIEW: case NANOARROW_TYPE_FIXED_SIZE_LIST: return ArrowSchemaViewValidateNChildren(schema_view, 1, error); @@ -1759,7 +1914,7 @@ ArrowErrorCode ArrowSchemaViewInit(struct ArrowSchemaView* schema_view, ArrowLayoutInit(&schema_view->layout, schema_view->storage_type); if (schema_view->storage_type == NANOARROW_TYPE_FIXED_SIZE_BINARY) { - schema_view->layout.element_size_bits[1] = schema_view->fixed_size * 8; + schema_view->layout.element_size_bits[1] = (int64_t)schema_view->fixed_size * 8; } else if (schema_view->storage_type == NANOARROW_TYPE_FIXED_SIZE_LIST) { schema_view->layout.child_size_elements = schema_view->fixed_size; } @@ -1780,6 +1935,8 @@ static int64_t ArrowSchemaTypeToStringInternal(struct ArrowSchemaView* schema_vi char* out, int64_t n) { const char* type_string = ArrowTypeString(schema_view->type); switch (schema_view->type) { + case NANOARROW_TYPE_DECIMAL32: + case NANOARROW_TYPE_DECIMAL64: case NANOARROW_TYPE_DECIMAL128: case NANOARROW_TYPE_DECIMAL256: return snprintf(out, n, "%s(%" PRId32 ", %" PRId32 ")", type_string, @@ -2237,6 +2394,8 @@ static ArrowErrorCode ArrowArraySetStorageType(struct ArrowArray* array, case NANOARROW_TYPE_HALF_FLOAT: case NANOARROW_TYPE_FLOAT: case NANOARROW_TYPE_DOUBLE: + case NANOARROW_TYPE_DECIMAL32: + case NANOARROW_TYPE_DECIMAL64: case NANOARROW_TYPE_DECIMAL128: case NANOARROW_TYPE_DECIMAL256: case NANOARROW_TYPE_INTERVAL_MONTHS: @@ -2254,6 +2413,8 @@ static ArrowErrorCode ArrowArraySetStorageType(struct ArrowArray* array, case NANOARROW_TYPE_LARGE_STRING: case NANOARROW_TYPE_BINARY: case NANOARROW_TYPE_LARGE_BINARY: + case NANOARROW_TYPE_LIST_VIEW: + case NANOARROW_TYPE_LARGE_LIST_VIEW: array->n_buffers = 3; break; @@ -2300,6 +2461,7 @@ ArrowErrorCode ArrowArrayInitFromType(struct ArrowArray* array, private_data->n_variadic_buffers = 0; private_data->variadic_buffers = NULL; private_data->variadic_buffer_sizes = NULL; + private_data->list_view_offset = 0; array->private_data = private_data; array->buffers = (const void**)(private_data->buffer_data); @@ -2831,6 +2993,8 @@ void ArrowArrayViewSetLength(struct ArrowArrayView* array_view, int64_t length) continue; case NANOARROW_BUFFER_TYPE_TYPE_ID: case NANOARROW_BUFFER_TYPE_UNION_OFFSET: + case NANOARROW_BUFFER_TYPE_VIEW_OFFSET: + case NANOARROW_BUFFER_TYPE_SIZE: array_view->buffer_views[i].size_bytes = element_size_bytes * length; continue; case NANOARROW_BUFFER_TYPE_VARIADIC_DATA: @@ -2987,12 +3151,19 @@ static int ArrowArrayViewValidateMinimal(struct ArrowArrayView* array_view, min_buffer_size_bytes = _ArrowBytesForBits(offset_plus_length); break; + case NANOARROW_BUFFER_TYPE_SIZE: + min_buffer_size_bytes = element_size_bytes * offset_plus_length; + break; case NANOARROW_BUFFER_TYPE_DATA_OFFSET: // Probably don't want/need to rely on the producer to have allocated an // offsets buffer of length 1 for a zero-size array min_buffer_size_bytes = (offset_plus_length != 0) * element_size_bytes * (offset_plus_length + 1); break; + case NANOARROW_BUFFER_TYPE_VIEW_OFFSET: + min_buffer_size_bytes = + (offset_plus_length != 0) * element_size_bytes * offset_plus_length; + break; case NANOARROW_BUFFER_TYPE_DATA: min_buffer_size_bytes = _ArrowRoundUpToMultipleOf8(array_view->layout.element_size_bits[i] * @@ -3029,6 +3200,8 @@ static int ArrowArrayViewValidateMinimal(struct ArrowArrayView* array_view, case NANOARROW_TYPE_LARGE_LIST: case NANOARROW_TYPE_FIXED_SIZE_LIST: case NANOARROW_TYPE_MAP: + case NANOARROW_TYPE_LIST_VIEW: + case NANOARROW_TYPE_LARGE_LIST_VIEW: if (array_view->n_children != 1) { ArrowErrorSet(error, "Expected 1 child of %s array but found %" PRId64 " child arrays", @@ -3308,10 +3481,11 @@ static int ArrowArrayViewValidateDefault(struct ArrowArrayView* array_view, if (array_view->children[0]->length < last_offset) { ArrowErrorSet(error, - "Expected child of large list array to have length >= %" PRId64 + "Expected child of %s array to have length >= %" PRId64 " but found array " "with length %" PRId64, - last_offset, array_view->children[0]->length); + ArrowTypeString(array_view->storage_type), last_offset, + array_view->children[0]->length); return EINVAL; } } @@ -3554,12 +3728,53 @@ static int ArrowArrayViewValidateFull(struct ArrowArrayView* array_view, } } + if (array_view->storage_type == NANOARROW_TYPE_LIST_VIEW || + array_view->storage_type == NANOARROW_TYPE_LARGE_LIST_VIEW) { + int64_t child_len = array_view->children[0]->length; + + struct ArrowBufferView offsets, sizes; + offsets.data.data = array_view->buffer_views[1].data.data; + sizes.data.data = array_view->buffer_views[2].data.data; + + for (int64_t i = array_view->offset; i < array_view->length + array_view->offset; + i++) { + int64_t offset, size; + if (array_view->storage_type == NANOARROW_TYPE_LIST_VIEW) { + offset = offsets.data.as_int32[i]; + size = sizes.data.as_int32[i]; + } else { + offset = offsets.data.as_int64[i]; + size = sizes.data.as_int64[i]; + } + + if (offset < 0) { + ArrowErrorSet(error, "Invalid negative offset %" PRId64 " at index %" PRId64, + offset, i); + return EINVAL; + } + + if (size < 0) { + ArrowErrorSet(error, "Invalid negative size %" PRId64 " at index %" PRId64, size, + i); + return EINVAL; + } + + if ((offset + size) > child_len) { + ArrowErrorSet(error, + "Offset: %" PRId64 " + size: %" PRId64 " at index: %" PRId64 + " exceeds length of child view: %" PRId64, + offset, size, i, child_len); + return EINVAL; + } + } + } + // Recurse for children for (int64_t i = 0; i < array_view->n_children; i++) { NANOARROW_RETURN_NOT_OK(ArrowArrayViewValidateFull(array_view->children[i], error)); } - // Dictionary valiation not implemented + // Dictionary validation not implemented if (array_view->dictionary != NULL) { NANOARROW_RETURN_NOT_OK(ArrowArrayViewValidateFull(array_view->dictionary, error)); // TODO: validate the indices diff --git a/src/oracledb/impl/arrow/nanoarrow/nanoarrow.h b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.h index 0738957c..d0b955f6 100644 --- a/src/oracledb/impl/arrow/nanoarrow/nanoarrow.h +++ b/src/oracledb/impl/arrow/nanoarrow/nanoarrow.h @@ -15,13 +15,13 @@ // specific language governing permissions and limitations // under the License. -#ifndef NANOARROW_BUILD_ID_H_INCLUDED -#define NANOARROW_BUILD_ID_H_INCLUDED +#ifndef NANOARROW_CONFIG_H_INCLUDED +#define NANOARROW_CONFIG_H_INCLUDED #define NANOARROW_VERSION_MAJOR 0 -#define NANOARROW_VERSION_MINOR 6 +#define NANOARROW_VERSION_MINOR 7 #define NANOARROW_VERSION_PATCH 0 -#define NANOARROW_VERSION "0.6.0" +#define NANOARROW_VERSION "0.7.0" #define NANOARROW_VERSION_INT \ (NANOARROW_VERSION_MAJOR * 10000 + NANOARROW_VERSION_MINOR * 100 + \ @@ -29,6 +29,13 @@ #define NANOARROW_NAMESPACE PythonPkg +#if !defined(NANOARROW_CXX_NAMESPACE) +#define NANOARROW_CXX_NAMESPACE nanoarrow +#endif + +#define NANOARROW_CXX_NAMESPACE_BEGIN namespace NANOARROW_CXX_NAMESPACE { +#define NANOARROW_CXX_NAMESPACE_END } + #endif // Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file @@ -181,14 +188,14 @@ struct ArrowArrayStream { NANOARROW_RETURN_NOT_OK((x_ <= max_) ? NANOARROW_OK : EINVAL) #if defined(NANOARROW_DEBUG) -#define _NANOARROW_RETURN_NOT_OK_WITH_ERROR_IMPL(NAME, EXPR, ERROR_PTR_EXPR, EXPR_STR) \ - do { \ - const int NAME = (EXPR); \ - if (NAME) { \ - ArrowErrorSet((ERROR_PTR_EXPR), "%s failed with errno %d(%s)\n* %s:%d", EXPR_STR, \ - NAME, strerror(NAME), __FILE__, __LINE__); \ - return NAME; \ - } \ +#define _NANOARROW_RETURN_NOT_OK_WITH_ERROR_IMPL(NAME, EXPR, ERROR_PTR_EXPR, EXPR_STR) \ + do { \ + const int NAME = (EXPR); \ + if (NAME) { \ + ArrowErrorSet((ERROR_PTR_EXPR), "%s failed with errno %d\n* %s:%d", EXPR_STR, \ + NAME, __FILE__, __LINE__); \ + return NAME; \ + } \ } while (0) #else #define _NANOARROW_RETURN_NOT_OK_WITH_ERROR_IMPL(NAME, EXPR, ERROR_PTR_EXPR, EXPR_STR) \ @@ -485,7 +492,11 @@ enum ArrowType { NANOARROW_TYPE_INTERVAL_MONTH_DAY_NANO, NANOARROW_TYPE_RUN_END_ENCODED, NANOARROW_TYPE_BINARY_VIEW, - NANOARROW_TYPE_STRING_VIEW + NANOARROW_TYPE_STRING_VIEW, + NANOARROW_TYPE_DECIMAL32, + NANOARROW_TYPE_DECIMAL64, + NANOARROW_TYPE_LIST_VIEW, + NANOARROW_TYPE_LARGE_LIST_VIEW, }; /// \brief Get a string value of an enum ArrowType value @@ -542,6 +553,10 @@ static inline const char* ArrowTypeString(enum ArrowType type) { return "interval_months"; case NANOARROW_TYPE_INTERVAL_DAY_TIME: return "interval_day_time"; + case NANOARROW_TYPE_DECIMAL32: + return "decimal32"; + case NANOARROW_TYPE_DECIMAL64: + return "decimal64"; case NANOARROW_TYPE_DECIMAL128: return "decimal128"; case NANOARROW_TYPE_DECIMAL256: @@ -578,6 +593,10 @@ static inline const char* ArrowTypeString(enum ArrowType type) { return "binary_view"; case NANOARROW_TYPE_STRING_VIEW: return "string_view"; + case NANOARROW_TYPE_LIST_VIEW: + return "list_view"; + case NANOARROW_TYPE_LARGE_LIST_VIEW: + return "large_list_view"; default: return NULL; } @@ -656,7 +675,9 @@ enum ArrowBufferType { NANOARROW_BUFFER_TYPE_DATA_OFFSET, NANOARROW_BUFFER_TYPE_DATA, NANOARROW_BUFFER_TYPE_VARIADIC_DATA, - NANOARROW_BUFFER_TYPE_VARIADIC_SIZE + NANOARROW_BUFFER_TYPE_VARIADIC_SIZE, + NANOARROW_BUFFER_TYPE_VIEW_OFFSET, + NANOARROW_BUFFER_TYPE_SIZE, }; /// \brief The maximum number of fixed buffers in an ArrowArrayView or ArrowLayout @@ -890,6 +911,9 @@ struct ArrowArrayPrivateData { // Size of each variadic buffer in bytes int64_t* variadic_buffer_sizes; + + // The current offset used to build list views + int64_t list_view_offset; }; /// \brief A representation of an interval. @@ -922,7 +946,8 @@ static inline void ArrowIntervalInit(struct ArrowInterval* interval, /// values set using ArrowDecimalSetInt(), ArrowDecimalSetBytes128(), /// or ArrowDecimalSetBytes256(). struct ArrowDecimal { - /// \brief An array of 64-bit integers of n_words length defined in native-endian order + /// \brief An array of 64-bit integers of n_words length defined in native-endian order. + /// For a 32-bit decimal value, index 0 will be a 32-bit integer value. uint64_t words[4]; /// \brief The number of significant digits this decimal number can represent @@ -931,7 +956,8 @@ struct ArrowDecimal { /// \brief The number of digits after the decimal point. This can be negative. int32_t scale; - /// \brief The number of words in the words array + /// \brief The number of 64-bit words in the words array. For the special case of a + /// 32-bit decimal value, this will be 0. int n_words; /// \brief Cached value used by the implementation @@ -948,13 +974,14 @@ static inline void ArrowDecimalInit(struct ArrowDecimal* decimal, int32_t bitwid memset(decimal->words, 0, sizeof(decimal->words)); decimal->precision = precision; decimal->scale = scale; + // n_words will be 0 for bitwidth == 32 decimal->n_words = (int)(bitwidth / 8 / sizeof(uint64_t)); if (_ArrowIsLittleEndian()) { decimal->low_word_index = 0; - decimal->high_word_index = decimal->n_words - 1; + decimal->high_word_index = decimal->n_words > 0 ? decimal->n_words - 1 : 0; } else { - decimal->low_word_index = decimal->n_words - 1; + decimal->low_word_index = decimal->n_words > 0 ? decimal->n_words - 1 : 0; decimal->high_word_index = 0; } } @@ -965,6 +992,12 @@ static inline void ArrowDecimalInit(struct ArrowDecimal* decimal, int32_t bitwid /// within the signed 64-bit integer range (A precision less than or equal /// to 18 is sufficiently small). static inline int64_t ArrowDecimalGetIntUnsafe(const struct ArrowDecimal* decimal) { + if (decimal->n_words == 0) { + int32_t value; + memcpy(&value, decimal->words, sizeof(int32_t)); + return value; + } + return (int64_t)decimal->words[decimal->low_word_index]; } @@ -972,18 +1005,32 @@ static inline int64_t ArrowDecimalGetIntUnsafe(const struct ArrowDecimal* decima /// \ingroup nanoarrow-utils static inline void ArrowDecimalGetBytes(const struct ArrowDecimal* decimal, uint8_t* out) { - memcpy(out, decimal->words, decimal->n_words * sizeof(uint64_t)); + if (decimal->n_words == 0) { + memcpy(out, decimal->words, sizeof(int32_t)); + } else { + memcpy(out, decimal->words, decimal->n_words * sizeof(uint64_t)); + } } /// \brief Returns 1 if the value represented by decimal is >= 0 or -1 otherwise /// \ingroup nanoarrow-utils static inline int64_t ArrowDecimalSign(const struct ArrowDecimal* decimal) { - return 1 | ((int64_t)(decimal->words[decimal->high_word_index]) >> 63); + if (decimal->n_words == 0) { + return ArrowDecimalGetIntUnsafe(decimal) >= 0 ? 1 : -1; + } else { + return 1 | ((int64_t)(decimal->words[decimal->high_word_index]) >> 63); + } } /// \brief Sets the integer value of this decimal /// \ingroup nanoarrow-utils static inline void ArrowDecimalSetInt(struct ArrowDecimal* decimal, int64_t value) { + if (decimal->n_words == 0) { + int32_t value32 = (int32_t)value; + memcpy(decimal->words, &value32, sizeof(int32_t)); + return; + } + if (value < 0) { memset(decimal->words, 0xff, decimal->n_words * sizeof(uint64_t)); } else { @@ -996,6 +1043,14 @@ static inline void ArrowDecimalSetInt(struct ArrowDecimal* decimal, int64_t valu /// \brief Negate the value of this decimal in place /// \ingroup nanoarrow-utils static inline void ArrowDecimalNegate(struct ArrowDecimal* decimal) { + if (decimal->n_words == 0) { + int32_t value; + memcpy(&value, decimal->words, sizeof(int32_t)); + value = -value; + memcpy(decimal->words, &value, sizeof(int32_t)); + return; + } + uint64_t carry = 1; if (decimal->low_word_index == 0) { @@ -1019,7 +1074,11 @@ static inline void ArrowDecimalNegate(struct ArrowDecimal* decimal) { /// \ingroup nanoarrow-utils static inline void ArrowDecimalSetBytes(struct ArrowDecimal* decimal, const uint8_t* value) { - memcpy(decimal->words, value, decimal->n_words * sizeof(uint64_t)); + if (decimal->n_words == 0) { + memcpy(decimal->words, value, sizeof(int32_t)); + } else { + memcpy(decimal->words, value, decimal->n_words * sizeof(uint64_t)); + } } #ifdef __cplusplus @@ -1079,6 +1138,8 @@ static inline void ArrowDecimalSetBytes(struct ArrowDecimal* decimal, #define ArrowDecimalSetDigits NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDecimalSetDigits) #define ArrowDecimalAppendDigitsToBuffer \ NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDecimalAppendDigitsToBuffer) +#define ArrowDecimalAppendStringToBuffer \ + NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowDecimalAppendStringToBuffer) #define ArrowSchemaInit NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowSchemaInit) #define ArrowSchemaInitFromType \ NANOARROW_SYMBOL(NANOARROW_NAMESPACE, ArrowSchemaInitFromType) @@ -1168,6 +1229,20 @@ static inline void ArrowDecimalSetBytes(struct ArrowDecimal* decimal, #endif +#if (defined _WIN32 || defined __CYGWIN__) && defined(NANOARROW_BUILD_DLL) +#if defined(NANOARROW_EXPORT_DLL) +#define NANOARROW_DLL __declspec(dllexport) +#else +#define NANOARROW_DLL __declspec(dllimport) +#endif // defined(NANOARROW_EXPORT_DLL) +#elif !defined(NANOARROW_DLL) +#if defined(__GNUC__) && __GNUC__ >= 4 +#define NANOARROW_DLL __attribute__((visibility("default"))) +#else +#define NANOARROW_DLL +#endif // __GNUC__ >= 4 +#endif + #ifdef __cplusplus extern "C" { #endif @@ -1191,19 +1266,19 @@ extern "C" { /// @{ /// \brief Allocate like malloc() -void* ArrowMalloc(int64_t size); +NANOARROW_DLL void* ArrowMalloc(int64_t size); /// \brief Reallocate like realloc() -void* ArrowRealloc(void* ptr, int64_t size); +NANOARROW_DLL void* ArrowRealloc(void* ptr, int64_t size); /// \brief Free a pointer allocated using ArrowMalloc() or ArrowRealloc(). -void ArrowFree(void* ptr); +NANOARROW_DLL void ArrowFree(void* ptr); /// \brief Return the default allocator /// /// The default allocator uses ArrowMalloc(), ArrowRealloc(), and /// ArrowFree(). -struct ArrowBufferAllocator ArrowBufferAllocatorDefault(void); +NANOARROW_DLL struct ArrowBufferAllocator ArrowBufferAllocatorDefault(void); /// \brief Create a custom deallocator /// @@ -1211,8 +1286,8 @@ struct ArrowBufferAllocator ArrowBufferAllocatorDefault(void); /// attach a custom deallocator to an ArrowBuffer. This may be used to /// avoid copying an existing buffer that was not allocated using the /// infrastructure provided here (e.g., by an R or Python object). -struct ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, - void* private_data); +NANOARROW_DLL struct ArrowBufferAllocator ArrowBufferDeallocator( + ArrowBufferDeallocatorCallback, void* private_data); /// @} @@ -1292,8 +1367,8 @@ static inline void ArrowArrayStreamRelease(struct ArrowArrayStream* array_stream /// \brief Set the contents of an error using printf syntax. /// /// If error is NULL, this function does nothing and returns NANOARROW_OK. -NANOARROW_CHECK_PRINTF_ATTRIBUTE int ArrowErrorSet(struct ArrowError* error, - const char* fmt, ...); +NANOARROW_DLL NANOARROW_CHECK_PRINTF_ATTRIBUTE int ArrowErrorSet(struct ArrowError* error, + const char* fmt, ...); /// @} @@ -1302,24 +1377,29 @@ NANOARROW_CHECK_PRINTF_ATTRIBUTE int ArrowErrorSet(struct ArrowError* error, /// @{ /// \brief Return a version string in the form "major.minor.patch" -const char* ArrowNanoarrowVersion(void); +NANOARROW_DLL const char* ArrowNanoarrowVersion(void); /// \brief Return an integer that can be used to compare versions sequentially -int ArrowNanoarrowVersionInt(void); +NANOARROW_DLL int ArrowNanoarrowVersionInt(void); /// \brief Initialize a description of buffer arrangements from a storage type -void ArrowLayoutInit(struct ArrowLayout* layout, enum ArrowType storage_type); +NANOARROW_DLL void ArrowLayoutInit(struct ArrowLayout* layout, + enum ArrowType storage_type); /// \brief Create a string view from a null-terminated string static inline struct ArrowStringView ArrowCharView(const char* value); /// \brief Sets the integer value of an ArrowDecimal from a string -ArrowErrorCode ArrowDecimalSetDigits(struct ArrowDecimal* decimal, - struct ArrowStringView value); +NANOARROW_DLL ArrowErrorCode ArrowDecimalSetDigits(struct ArrowDecimal* decimal, + struct ArrowStringView value); /// \brief Get the integer value of an ArrowDecimal as string -ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const struct ArrowDecimal* decimal, - struct ArrowBuffer* buffer); +NANOARROW_DLL ArrowErrorCode ArrowDecimalAppendDigitsToBuffer( + const struct ArrowDecimal* decimal, struct ArrowBuffer* buffer); + +/// \brief Get the decimal value of an ArrowDecimal as a string +NANOARROW_DLL ArrowErrorCode ArrowDecimalAppendStringToBuffer( + const struct ArrowDecimal* decimal, struct ArrowBuffer* buffer); /// \brief Get the half float value of a float static inline uint16_t ArrowFloatToHalfFloat(float value); @@ -1348,7 +1428,7 @@ static inline int64_t ArrowResolveChunk64(int64_t index, const int64_t* offsets, /// Initializes the fields and release callback of schema_out. Caller /// is responsible for calling the schema->release callback if /// NANOARROW_OK is returned. -void ArrowSchemaInit(struct ArrowSchema* schema); +NANOARROW_DLL void ArrowSchemaInit(struct ArrowSchema* schema); /// \brief Initialize an ArrowSchema from an ArrowType /// @@ -1356,7 +1436,8 @@ void ArrowSchemaInit(struct ArrowSchema* schema); /// ArrowSchemaSetType() for the common case of constructing an /// unparameterized type. The caller is responsible for calling the schema->release /// callback if NANOARROW_OK is returned. -ArrowErrorCode ArrowSchemaInitFromType(struct ArrowSchema* schema, enum ArrowType type); +NANOARROW_DLL ArrowErrorCode ArrowSchemaInitFromType(struct ArrowSchema* schema, + enum ArrowType type); /// \brief Get a human-readable summary of a Schema /// @@ -1364,8 +1445,8 @@ ArrowErrorCode ArrowSchemaInitFromType(struct ArrowSchema* schema, enum ArrowTyp /// and returns the number of characters required for the output if /// n were sufficiently large. If recursive is non-zero, the result will /// also include children. -int64_t ArrowSchemaToString(const struct ArrowSchema* schema, char* out, int64_t n, - char recursive); +NANOARROW_DLL int64_t ArrowSchemaToString(const struct ArrowSchema* schema, char* out, + int64_t n, char recursive); /// \brief Set the format field of a schema from an ArrowType /// @@ -1375,14 +1456,16 @@ int64_t ArrowSchemaToString(const struct ArrowSchema* schema, char* out, int64_t /// allocated, initialized, and named; however, the caller must /// ArrowSchemaSetType() on the preinitialized children. Schema must have been initialized /// using ArrowSchemaInit() or ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetType(struct ArrowSchema* schema, enum ArrowType type); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetType(struct ArrowSchema* schema, + enum ArrowType type); /// \brief Set the format field and initialize children of a struct schema /// /// The specified number of children are initialized; however, the caller is responsible /// for calling ArrowSchemaSetType() and ArrowSchemaSetName() on each child. /// Schema must have been initialized using ArrowSchemaInit() or ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetTypeStruct(struct ArrowSchema* schema, int64_t n_children); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeStruct(struct ArrowSchema* schema, + int64_t n_children); /// \brief Set the format field of a fixed-size schema /// @@ -1392,17 +1475,20 @@ ArrowErrorCode ArrowSchemaSetTypeStruct(struct ArrowSchema* schema, int64_t n_ch /// allocated, initialized, and named; however, the caller must /// ArrowSchemaSetType() the first child. Schema must have been initialized using /// ArrowSchemaInit() or ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetTypeFixedSize(struct ArrowSchema* schema, - enum ArrowType type, int32_t fixed_size); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeFixedSize(struct ArrowSchema* schema, + enum ArrowType type, + int32_t fixed_size); /// \brief Set the format field of a decimal schema /// /// Returns EINVAL for scale <= 0 or for type that is not -/// NANOARROW_TYPE_DECIMAL128 or NANOARROW_TYPE_DECIMAL256. Schema must have been -/// initialized using ArrowSchemaInit() or ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetTypeDecimal(struct ArrowSchema* schema, enum ArrowType type, - int32_t decimal_precision, - int32_t decimal_scale); +/// NANOARROW_TYPE_DECIMAL32, NANOARROW_TYPE_DECIMAL64, NANOARROW_TYPE_DECIMAL128 or +/// NANOARROW_TYPE_DECIMAL256. Schema must have been initialized using +/// ArrowSchemaInit() or ArrowSchemaDeepCopy(). +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeDecimal(struct ArrowSchema* schema, + enum ArrowType type, + int32_t decimal_precision, + int32_t decimal_scale); /// \brief Set the format field of a run-end encoded schema /// @@ -1412,8 +1498,8 @@ ArrowErrorCode ArrowSchemaSetTypeDecimal(struct ArrowSchema* schema, enum ArrowT /// The caller must call `ArrowSchemaSetTypeXXX(schema->children[1])` to /// set the value type. Note that when building arrays using the `ArrowArrayAppendXXX()` /// functions, the run-end encoded array's logical length must be updated manually. -ArrowErrorCode ArrowSchemaSetTypeRunEndEncoded(struct ArrowSchema* schema, - enum ArrowType run_end_type); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeRunEndEncoded(struct ArrowSchema* schema, + enum ArrowType run_end_type); /// \brief Set the format field of a time, timestamp, or duration schema /// @@ -1422,55 +1508,60 @@ ArrowErrorCode ArrowSchemaSetTypeRunEndEncoded(struct ArrowSchema* schema, /// NANOARROW_TYPE_TIMESTAMP, or NANOARROW_TYPE_DURATION. The /// timezone parameter must be NULL for a non-timestamp type. Schema must have been /// initialized using ArrowSchemaInit() or ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetTypeDateTime(struct ArrowSchema* schema, enum ArrowType type, - enum ArrowTimeUnit time_unit, - const char* timezone); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeDateTime(struct ArrowSchema* schema, + enum ArrowType type, + enum ArrowTimeUnit time_unit, + const char* timezone); /// \brief Set the format field of a union schema /// /// Returns EINVAL for a type that is not NANOARROW_TYPE_DENSE_UNION /// or NANOARROW_TYPE_SPARSE_UNION. The specified number of children are /// allocated, and initialized. -ArrowErrorCode ArrowSchemaSetTypeUnion(struct ArrowSchema* schema, enum ArrowType type, - int64_t n_children); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetTypeUnion(struct ArrowSchema* schema, + enum ArrowType type, + int64_t n_children); /// \brief Make a (recursive) copy of a schema /// /// Allocates and copies fields of schema into schema_out. -ArrowErrorCode ArrowSchemaDeepCopy(const struct ArrowSchema* schema, - struct ArrowSchema* schema_out); +NANOARROW_DLL ArrowErrorCode ArrowSchemaDeepCopy(const struct ArrowSchema* schema, + struct ArrowSchema* schema_out); /// \brief Copy format into schema->format /// /// schema must have been allocated using ArrowSchemaInitFromType() or /// ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetFormat(struct ArrowSchema* schema, const char* format); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetFormat(struct ArrowSchema* schema, + const char* format); /// \brief Copy name into schema->name /// /// schema must have been allocated using ArrowSchemaInitFromType() or /// ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaSetName(struct ArrowSchema* schema, const char* name); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetName(struct ArrowSchema* schema, + const char* name); /// \brief Copy metadata into schema->metadata /// /// schema must have been allocated using ArrowSchemaInitFromType() or /// ArrowSchemaDeepCopy. -ArrowErrorCode ArrowSchemaSetMetadata(struct ArrowSchema* schema, const char* metadata); +NANOARROW_DLL ArrowErrorCode ArrowSchemaSetMetadata(struct ArrowSchema* schema, + const char* metadata); /// \brief Allocate the schema->children array /// /// Includes the memory for each child struct ArrowSchema. /// schema must have been allocated using ArrowSchemaInitFromType() or /// ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaAllocateChildren(struct ArrowSchema* schema, - int64_t n_children); +NANOARROW_DLL ArrowErrorCode ArrowSchemaAllocateChildren(struct ArrowSchema* schema, + int64_t n_children); /// \brief Allocate the schema->dictionary member /// /// schema must have been allocated using ArrowSchemaInitFromType() or /// ArrowSchemaDeepCopy(). -ArrowErrorCode ArrowSchemaAllocateDictionary(struct ArrowSchema* schema); +NANOARROW_DLL ArrowErrorCode ArrowSchemaAllocateDictionary(struct ArrowSchema* schema); /// @} @@ -1494,49 +1585,51 @@ struct ArrowMetadataReader { }; /// \brief Initialize an ArrowMetadataReader -ArrowErrorCode ArrowMetadataReaderInit(struct ArrowMetadataReader* reader, - const char* metadata); +NANOARROW_DLL ArrowErrorCode ArrowMetadataReaderInit(struct ArrowMetadataReader* reader, + const char* metadata); /// \brief Read the next key/value pair from an ArrowMetadataReader -ArrowErrorCode ArrowMetadataReaderRead(struct ArrowMetadataReader* reader, - struct ArrowStringView* key_out, - struct ArrowStringView* value_out); +NANOARROW_DLL ArrowErrorCode ArrowMetadataReaderRead(struct ArrowMetadataReader* reader, + struct ArrowStringView* key_out, + struct ArrowStringView* value_out); /// \brief The number of bytes in in a key/value metadata string -int64_t ArrowMetadataSizeOf(const char* metadata); +NANOARROW_DLL int64_t ArrowMetadataSizeOf(const char* metadata); /// \brief Check for a key in schema metadata -char ArrowMetadataHasKey(const char* metadata, struct ArrowStringView key); +NANOARROW_DLL char ArrowMetadataHasKey(const char* metadata, struct ArrowStringView key); /// \brief Extract a value from schema metadata /// /// If key does not exist in metadata, value_out is unmodified -ArrowErrorCode ArrowMetadataGetValue(const char* metadata, struct ArrowStringView key, - struct ArrowStringView* value_out); +NANOARROW_DLL ArrowErrorCode ArrowMetadataGetValue(const char* metadata, + struct ArrowStringView key, + struct ArrowStringView* value_out); /// \brief Initialize a builder for schema metadata from key/value pairs /// /// metadata can be an existing metadata string or NULL to initialize /// an empty metadata string. -ArrowErrorCode ArrowMetadataBuilderInit(struct ArrowBuffer* buffer, const char* metadata); +NANOARROW_DLL ArrowErrorCode ArrowMetadataBuilderInit(struct ArrowBuffer* buffer, + const char* metadata); /// \brief Append a key/value pair to a buffer containing serialized metadata -ArrowErrorCode ArrowMetadataBuilderAppend(struct ArrowBuffer* buffer, - struct ArrowStringView key, - struct ArrowStringView value); +NANOARROW_DLL ArrowErrorCode ArrowMetadataBuilderAppend(struct ArrowBuffer* buffer, + struct ArrowStringView key, + struct ArrowStringView value); /// \brief Set a key/value pair to a buffer containing serialized metadata /// /// Ensures that the only entry for key in the metadata is set to value. /// This function maintains the existing position of (the first instance of) /// key if present in the data. -ArrowErrorCode ArrowMetadataBuilderSet(struct ArrowBuffer* buffer, - struct ArrowStringView key, - struct ArrowStringView value); +NANOARROW_DLL ArrowErrorCode ArrowMetadataBuilderSet(struct ArrowBuffer* buffer, + struct ArrowStringView key, + struct ArrowStringView value); /// \brief Remove a key from a buffer containing serialized metadata -ArrowErrorCode ArrowMetadataBuilderRemove(struct ArrowBuffer* buffer, - struct ArrowStringView key); +NANOARROW_DLL ArrowErrorCode ArrowMetadataBuilderRemove(struct ArrowBuffer* buffer, + struct ArrowStringView key); /// @} @@ -1634,9 +1727,9 @@ struct ArrowSchemaView { }; /// \brief Initialize an ArrowSchemaView -ArrowErrorCode ArrowSchemaViewInit(struct ArrowSchemaView* schema_view, - const struct ArrowSchema* schema, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowSchemaViewInit(struct ArrowSchemaView* schema_view, + const struct ArrowSchema* schema, + struct ArrowError* error); /// @} @@ -1852,24 +1945,24 @@ static inline void ArrowBitmapReset(struct ArrowBitmap* bitmap); /// Initializes the fields and release callback of array. Caller /// is responsible for calling the array->release callback if /// NANOARROW_OK is returned. -ArrowErrorCode ArrowArrayInitFromType(struct ArrowArray* array, - enum ArrowType storage_type); +NANOARROW_DLL ArrowErrorCode ArrowArrayInitFromType(struct ArrowArray* array, + enum ArrowType storage_type); /// \brief Initialize the contents of an ArrowArray from an ArrowSchema /// /// Caller is responsible for calling the array->release callback if /// NANOARROW_OK is returned. -ArrowErrorCode ArrowArrayInitFromSchema(struct ArrowArray* array, - const struct ArrowSchema* schema, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayInitFromSchema(struct ArrowArray* array, + const struct ArrowSchema* schema, + struct ArrowError* error); /// \brief Initialize the contents of an ArrowArray from an ArrowArrayView /// /// Caller is responsible for calling the array->release callback if /// NANOARROW_OK is returned. -ArrowErrorCode ArrowArrayInitFromArrayView(struct ArrowArray* array, - const struct ArrowArrayView* array_view, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayInitFromArrayView( + struct ArrowArray* array, const struct ArrowArrayView* array_view, + struct ArrowError* error); /// \brief Allocate the array->children array /// @@ -1877,7 +1970,8 @@ ArrowErrorCode ArrowArrayInitFromArrayView(struct ArrowArray* array, /// whose members are marked as released and may be subsequently initialized /// with ArrowArrayInitFromType() or moved from an existing ArrowArray. /// schema must have been allocated using ArrowArrayInitFromType(). -ArrowErrorCode ArrowArrayAllocateChildren(struct ArrowArray* array, int64_t n_children); +NANOARROW_DLL ArrowErrorCode ArrowArrayAllocateChildren(struct ArrowArray* array, + int64_t n_children); /// \brief Allocate the array->dictionary member /// @@ -1885,18 +1979,19 @@ ArrowErrorCode ArrowArrayAllocateChildren(struct ArrowArray* array, int64_t n_ch /// is marked as released and may be subsequently initialized /// with ArrowArrayInitFromType() or moved from an existing ArrowArray. /// array must have been allocated using ArrowArrayInitFromType() -ArrowErrorCode ArrowArrayAllocateDictionary(struct ArrowArray* array); +NANOARROW_DLL ArrowErrorCode ArrowArrayAllocateDictionary(struct ArrowArray* array); /// \brief Set the validity bitmap of an ArrowArray /// /// array must have been allocated using ArrowArrayInitFromType() -void ArrowArraySetValidityBitmap(struct ArrowArray* array, struct ArrowBitmap* bitmap); +NANOARROW_DLL void ArrowArraySetValidityBitmap(struct ArrowArray* array, + struct ArrowBitmap* bitmap); /// \brief Set a buffer of an ArrowArray /// /// array must have been allocated using ArrowArrayInitFromType() -ArrowErrorCode ArrowArraySetBuffer(struct ArrowArray* array, int64_t i, - struct ArrowBuffer* buffer); +NANOARROW_DLL ArrowErrorCode ArrowArraySetBuffer(struct ArrowArray* array, int64_t i, + struct ArrowBuffer* buffer); /// \brief Get the validity bitmap of an ArrowArray /// @@ -1922,8 +2017,8 @@ static inline ArrowErrorCode ArrowArrayStartAppending(struct ArrowArray* array); /// child array sizes for non-fixed-size arrays), recursively reserve space for /// additional elements. This is useful for reducing the number of reallocations /// that occur using the item-wise appenders. -ArrowErrorCode ArrowArrayReserve(struct ArrowArray* array, - int64_t additional_size_elements); +NANOARROW_DLL ArrowErrorCode ArrowArrayReserve(struct ArrowArray* array, + int64_t additional_size_elements); /// \brief Append a null value to an array static inline ArrowErrorCode ArrowArrayAppendNull(struct ArrowArray* array, int64_t n); @@ -2021,8 +2116,8 @@ static inline ArrowErrorCode ArrowArrayShrinkToFit(struct ArrowArray* array); /// into array->buffers and checks the actual size of the buffers /// against the expected size based on the final length. /// array must have been allocated using ArrowArrayInitFromType() -ArrowErrorCode ArrowArrayFinishBuildingDefault(struct ArrowArray* array, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayFinishBuildingDefault(struct ArrowArray* array, + struct ArrowError* error); /// \brief Finish building an ArrowArray with explicit validation /// @@ -2031,9 +2126,9 @@ ArrowErrorCode ArrowArrayFinishBuildingDefault(struct ArrowArray* array, /// buffer data access is not possible or more validation (i.e., /// NANOARROW_VALIDATION_LEVEL_FULL) if buffer content was obtained from an untrusted or /// corruptible source. -ArrowErrorCode ArrowArrayFinishBuilding(struct ArrowArray* array, - enum ArrowValidationLevel validation_level, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayFinishBuilding( + struct ArrowArray* array, enum ArrowValidationLevel validation_level, + struct ArrowError* error); /// @} @@ -2044,8 +2139,8 @@ ArrowErrorCode ArrowArrayFinishBuilding(struct ArrowArray* array, /// @{ /// \brief Initialize the contents of an ArrowArrayView -void ArrowArrayViewInitFromType(struct ArrowArrayView* array_view, - enum ArrowType storage_type); +NANOARROW_DLL void ArrowArrayViewInitFromType(struct ArrowArrayView* array_view, + enum ArrowType storage_type); /// \brief Move an ArrowArrayView /// @@ -2055,32 +2150,34 @@ static inline void ArrowArrayViewMove(struct ArrowArrayView* src, struct ArrowArrayView* dst); /// \brief Initialize the contents of an ArrowArrayView from an ArrowSchema -ArrowErrorCode ArrowArrayViewInitFromSchema(struct ArrowArrayView* array_view, - const struct ArrowSchema* schema, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode +ArrowArrayViewInitFromSchema(struct ArrowArrayView* array_view, + const struct ArrowSchema* schema, struct ArrowError* error); /// \brief Allocate the array_view->children array /// /// Includes the memory for each child struct ArrowArrayView -ArrowErrorCode ArrowArrayViewAllocateChildren(struct ArrowArrayView* array_view, - int64_t n_children); +NANOARROW_DLL ArrowErrorCode +ArrowArrayViewAllocateChildren(struct ArrowArrayView* array_view, int64_t n_children); /// \brief Allocate array_view->dictionary -ArrowErrorCode ArrowArrayViewAllocateDictionary(struct ArrowArrayView* array_view); +NANOARROW_DLL ArrowErrorCode +ArrowArrayViewAllocateDictionary(struct ArrowArrayView* array_view); /// \brief Set data-independent buffer sizes from length -void ArrowArrayViewSetLength(struct ArrowArrayView* array_view, int64_t length); +NANOARROW_DLL void ArrowArrayViewSetLength(struct ArrowArrayView* array_view, + int64_t length); /// \brief Set buffer sizes and data pointers from an ArrowArray -ArrowErrorCode ArrowArrayViewSetArray(struct ArrowArrayView* array_view, - const struct ArrowArray* array, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayViewSetArray(struct ArrowArrayView* array_view, + const struct ArrowArray* array, + struct ArrowError* error); /// \brief Set buffer sizes and data pointers from an ArrowArray except for those /// that require dereferencing buffer content. -ArrowErrorCode ArrowArrayViewSetArrayMinimal(struct ArrowArrayView* array_view, - const struct ArrowArray* array, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode +ArrowArrayViewSetArrayMinimal(struct ArrowArrayView* array_view, + const struct ArrowArray* array, struct ArrowError* error); /// \brief Get the number of buffers /// @@ -2132,9 +2229,9 @@ static inline int64_t ArrowArrayViewGetBufferElementSizeBits( /// and sizes otherwise, you may wish to perform checks at a different level. See /// documentation for ArrowValidationLevel for the details of checks performed /// at each level. -ArrowErrorCode ArrowArrayViewValidate(struct ArrowArrayView* array_view, - enum ArrowValidationLevel validation_level, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowArrayViewValidate( + struct ArrowArrayView* array_view, enum ArrowValidationLevel validation_level, + struct ArrowError* error); /// \brief Compare two ArrowArrayView objects for equality /// @@ -2144,13 +2241,13 @@ ArrowErrorCode ArrowArrayViewValidate(struct ArrowArrayView* array_view, /// error if error is non-NULL. /// /// Returns NANOARROW_OK if the comparison completed successfully. -ArrowErrorCode ArrowArrayViewCompare(const struct ArrowArrayView* actual, - const struct ArrowArrayView* expected, - enum ArrowCompareLevel level, int* out, - struct ArrowError* reason); +NANOARROW_DLL ArrowErrorCode ArrowArrayViewCompare(const struct ArrowArrayView* actual, + const struct ArrowArrayView* expected, + enum ArrowCompareLevel level, int* out, + struct ArrowError* reason); /// \brief Reset the contents of an ArrowArrayView and frees resources -void ArrowArrayViewReset(struct ArrowArrayView* array_view); +NANOARROW_DLL void ArrowArrayViewReset(struct ArrowArrayView* array_view); /// \brief Check for a null element in an ArrowArrayView static inline int8_t ArrowArrayViewIsNull(const struct ArrowArrayView* array_view, @@ -2229,8 +2326,8 @@ static inline void ArrowArrayViewGetDecimalUnsafe(const struct ArrowArrayView* a /// This function moves the ownership of schema to the array_stream. If /// this function returns NANOARROW_OK, the caller is responsible for /// releasing the ArrowArrayStream. -ArrowErrorCode ArrowBasicArrayStreamInit(struct ArrowArrayStream* array_stream, - struct ArrowSchema* schema, int64_t n_arrays); +NANOARROW_DLL ArrowErrorCode ArrowBasicArrayStreamInit( + struct ArrowArrayStream* array_stream, struct ArrowSchema* schema, int64_t n_arrays); /// \brief Set the ith ArrowArray in this ArrowArrayStream. /// @@ -2239,16 +2336,16 @@ ArrowErrorCode ArrowBasicArrayStreamInit(struct ArrowArrayStream* array_stream, /// be greater than zero and less than the value of n_arrays passed in /// ArrowBasicArrayStreamInit(). Callers are not required to fill all /// n_arrays members (i.e., n_arrays is a maximum bound). -void ArrowBasicArrayStreamSetArray(struct ArrowArrayStream* array_stream, int64_t i, - struct ArrowArray* array); +NANOARROW_DLL void ArrowBasicArrayStreamSetArray(struct ArrowArrayStream* array_stream, + int64_t i, struct ArrowArray* array); /// \brief Validate the contents of this ArrowArrayStream /// /// array_stream must have been initialized with ArrowBasicArrayStreamInit(). /// This function uses ArrowArrayStreamInitFromSchema() and ArrowArrayStreamSetArray() /// to validate the contents of the arrays. -ArrowErrorCode ArrowBasicArrayStreamValidate(const struct ArrowArrayStream* array_stream, - struct ArrowError* error); +NANOARROW_DLL ArrowErrorCode ArrowBasicArrayStreamValidate( + const struct ArrowArrayStream* array_stream, struct ArrowError* error); /// @} @@ -2893,6 +2990,9 @@ static inline void ArrowBitmapAppendInt8Unsafe(struct ArrowBitmap* bitmap, return; } + NANOARROW_DCHECK(bitmap->buffer.data != NULL); + NANOARROW_DCHECK(values != NULL); + const int8_t* values_cursor = values; int64_t n_remaining = n_values; int64_t out_i_cursor = bitmap->size_bits; @@ -2940,6 +3040,9 @@ static inline void ArrowBitmapAppendInt32Unsafe(struct ArrowBitmap* bitmap, return; } + NANOARROW_DCHECK(bitmap->buffer.data != NULL); + NANOARROW_DCHECK(values != NULL); + const int32_t* values_cursor = values; int64_t n_remaining = n_values; int64_t out_i_cursor = bitmap->size_bits; @@ -3283,6 +3386,9 @@ static inline ArrowErrorCode _ArrowArrayAppendEmptyInternal(struct ArrowArray* a case NANOARROW_BUFFER_TYPE_VARIADIC_SIZE: case NANOARROW_BUFFER_TYPE_VALIDITY: continue; + case NANOARROW_BUFFER_TYPE_SIZE: + NANOARROW_RETURN_NOT_OK(ArrowBufferAppendFill(buffer, 0, size_bytes * n)); + continue; case NANOARROW_BUFFER_TYPE_DATA_OFFSET: // Append the current value at the end of the offset buffer for each element NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(buffer, size_bytes * n)); @@ -3303,7 +3409,10 @@ static inline ArrowErrorCode _ArrowArrayAppendEmptyInternal(struct ArrowArray* a NANOARROW_RETURN_NOT_OK(_ArrowArrayAppendBits(array, i, 0, n)); } continue; - + case NANOARROW_BUFFER_TYPE_VIEW_OFFSET: + NANOARROW_RETURN_NOT_OK(ArrowBufferReserve(buffer, size_bytes * n)); + NANOARROW_RETURN_NOT_OK(ArrowBufferAppendFill(buffer, 0, size_bytes * n)); + continue; case NANOARROW_BUFFER_TYPE_TYPE_ID: case NANOARROW_BUFFER_TYPE_UNION_OFFSET: // These cases return above @@ -3693,6 +3802,22 @@ static inline ArrowErrorCode ArrowArrayAppendDecimal(struct ArrowArray* array, struct ArrowBuffer* data_buffer = ArrowArrayBuffer(array, 1); switch (private_data->storage_type) { + case NANOARROW_TYPE_DECIMAL32: + if (value->n_words != 0) { + return EINVAL; + } else { + NANOARROW_RETURN_NOT_OK( + ArrowBufferAppend(data_buffer, value->words, sizeof(uint32_t))); + break; + } + case NANOARROW_TYPE_DECIMAL64: + if (value->n_words != 1) { + return EINVAL; + } else { + NANOARROW_RETURN_NOT_OK( + ArrowBufferAppend(data_buffer, value->words, sizeof(uint64_t))); + break; + } case NANOARROW_TYPE_DECIMAL128: if (value->n_words != 2) { return EINVAL; @@ -3734,6 +3859,7 @@ static inline ArrowErrorCode ArrowArrayFinishElement(struct ArrowArray* array) { if (child_length > INT32_MAX) { return EOVERFLOW; } + NANOARROW_RETURN_NOT_OK( ArrowBufferAppendInt32(ArrowArrayBuffer(array, 1), (int32_t)child_length)); break; @@ -3749,6 +3875,31 @@ static inline ArrowErrorCode ArrowArrayFinishElement(struct ArrowArray* array) { return EINVAL; } break; + case NANOARROW_TYPE_LIST_VIEW: { + child_length = array->children[0]->length; + if (child_length > INT32_MAX) { + return EOVERFLOW; + } + + const int32_t last_valid_offset = (int32_t)private_data->list_view_offset; + NANOARROW_RETURN_NOT_OK( + ArrowBufferAppendInt32(ArrowArrayBuffer(array, 1), last_valid_offset)); + NANOARROW_RETURN_NOT_OK(ArrowBufferAppendInt32( + ArrowArrayBuffer(array, 2), (int32_t)child_length - last_valid_offset)); + private_data->list_view_offset = child_length; + break; + } + case NANOARROW_TYPE_LARGE_LIST_VIEW: { + child_length = array->children[0]->length; + const int64_t last_valid_offset = private_data->list_view_offset; + NANOARROW_RETURN_NOT_OK( + ArrowBufferAppendInt64(ArrowArrayBuffer(array, 1), last_valid_offset)); + NANOARROW_RETURN_NOT_OK(ArrowBufferAppendInt64(ArrowArrayBuffer(array, 2), + child_length - last_valid_offset)); + private_data->list_view_offset = child_length; + break; + } + case NANOARROW_TYPE_STRUCT: for (int64_t i = 0; i < array->n_children; i++) { child_length = array->children[i]->length; @@ -4023,8 +4174,10 @@ static inline int64_t ArrowArrayViewListChildOffset( const struct ArrowArrayView* array_view, int64_t i) { switch (array_view->storage_type) { case NANOARROW_TYPE_LIST: + case NANOARROW_TYPE_LIST_VIEW: return array_view->buffer_views[1].data.as_int32[i]; case NANOARROW_TYPE_LARGE_LIST: + case NANOARROW_TYPE_LARGE_LIST_VIEW: return array_view->buffer_views[1].data.as_int64[i]; default: return -1; @@ -4161,7 +4314,7 @@ static inline struct ArrowStringView ArrowArrayViewGetStringUnsafe( case NANOARROW_TYPE_BINARY: view.data = data_view + offsets_view->data.as_int32[i]; view.size_bytes = - offsets_view->data.as_int32[i + 1] - offsets_view->data.as_int32[i]; + (int64_t)offsets_view->data.as_int32[i + 1] - offsets_view->data.as_int32[i]; break; case NANOARROW_TYPE_LARGE_STRING: case NANOARROW_TYPE_LARGE_BINARY: @@ -4201,7 +4354,7 @@ static inline struct ArrowBufferView ArrowArrayViewGetBytesUnsafe( case NANOARROW_TYPE_STRING: case NANOARROW_TYPE_BINARY: view.size_bytes = - offsets_view->data.as_int32[i + 1] - offsets_view->data.as_int32[i]; + (int64_t)offsets_view->data.as_int32[i + 1] - offsets_view->data.as_int32[i]; view.data.as_uint8 = data_view + offsets_view->data.as_int32[i]; break; case NANOARROW_TYPE_LARGE_STRING: @@ -4231,23 +4384,25 @@ static inline struct ArrowBufferView ArrowArrayViewGetBytesUnsafe( static inline void ArrowArrayViewGetIntervalUnsafe( const struct ArrowArrayView* array_view, int64_t i, struct ArrowInterval* out) { const uint8_t* data_view = array_view->buffer_views[1].data.as_uint8; + const int64_t offset = array_view->offset; + const int64_t index = offset + i; switch (array_view->storage_type) { case NANOARROW_TYPE_INTERVAL_MONTHS: { const size_t size = sizeof(int32_t); - memcpy(&out->months, data_view + i * size, sizeof(int32_t)); + memcpy(&out->months, data_view + index * size, sizeof(int32_t)); break; } case NANOARROW_TYPE_INTERVAL_DAY_TIME: { const size_t size = sizeof(int32_t) + sizeof(int32_t); - memcpy(&out->days, data_view + i * size, sizeof(int32_t)); - memcpy(&out->ms, data_view + i * size + 4, sizeof(int32_t)); + memcpy(&out->days, data_view + index * size, sizeof(int32_t)); + memcpy(&out->ms, data_view + index * size + 4, sizeof(int32_t)); break; } case NANOARROW_TYPE_INTERVAL_MONTH_DAY_NANO: { const size_t size = sizeof(int32_t) + sizeof(int32_t) + sizeof(int64_t); - memcpy(&out->months, data_view + i * size, sizeof(int32_t)); - memcpy(&out->days, data_view + i * size + 4, sizeof(int32_t)); - memcpy(&out->ns, data_view + i * size + 8, sizeof(int64_t)); + memcpy(&out->months, data_view + index * size, sizeof(int32_t)); + memcpy(&out->days, data_view + index * size + 4, sizeof(int32_t)); + memcpy(&out->ns, data_view + index * size + 8, sizeof(int64_t)); break; } default: @@ -4260,6 +4415,12 @@ static inline void ArrowArrayViewGetDecimalUnsafe(const struct ArrowArrayView* a i += array_view->offset; const uint8_t* data_view = array_view->buffer_views[1].data.as_uint8; switch (array_view->storage_type) { + case NANOARROW_TYPE_DECIMAL32: + ArrowDecimalSetBytes(out, data_view + (i * 4)); + break; + case NANOARROW_TYPE_DECIMAL64: + ArrowDecimalSetBytes(out, data_view + (i * 8)); + break; case NANOARROW_TYPE_DECIMAL128: ArrowDecimalSetBytes(out, data_view + (i * 16)); break; From ff47fe91a5a347116c267359caeadbfef2ea9fdc Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:50:59 -0600 Subject: [PATCH 132/239] Further refactoring to simplify code and set stage for further enhancements. --- src/oracledb/arrow_impl.pxd | 7 +- src/oracledb/impl/arrow/array.pyx | 184 ++++++++++++++------------ src/oracledb/impl/base/converters.pyx | 2 +- src/oracledb/impl/base/var.pyx | 3 +- 4 files changed, 108 insertions(+), 88 deletions(-) diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index bbb84cce..27b7bc30 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -84,11 +84,12 @@ cdef class ArrowArrayImpl: str name ArrowType arrow_type ArrowTimeUnit time_unit - double factor + int time_factor ArrowArray *arrow_array ArrowSchema *arrow_schema ArrowType child_arrow_type + cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1 cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int append_double(self, double value) except -1 @@ -101,6 +102,10 @@ cdef class ArrowArrayImpl: array.array values) except -1 cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, + int8_t precision, int8_t scale, + ArrowTimeUnit time_unit, + ArrowType child_arrow_type) except -1 cdef class DataFrameImpl: diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 311d6ecf..8dbee66d 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -30,93 +30,11 @@ cdef class ArrowArrayImpl: - def __cinit__(self, ArrowType arrow_type, str name, int8_t precision, - int8_t scale, ArrowTimeUnit time_unit, - ArrowType child_arrow_type): - cdef ArrowType storage_type = arrow_type - self.arrow_type = arrow_type - self.child_arrow_type = child_arrow_type - self.time_unit = time_unit - self.name = name + def __cinit__(self): self.arrow_array = \ - cpython.PyMem_Malloc(sizeof(ArrowArray)) - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - storage_type = NANOARROW_TYPE_INT64 - if time_unit == NANOARROW_TIME_UNIT_MILLI: - self.factor = 1e3 - elif time_unit == NANOARROW_TIME_UNIT_MICRO: - self.factor = 1e6 - elif time_unit == NANOARROW_TIME_UNIT_NANO: - self.factor = 1e9 - else: - self.factor = 1 - + cpython.PyMem_Calloc(1, sizeof(ArrowArray)) self.arrow_schema = \ - cpython.PyMem_Malloc(sizeof(ArrowSchema)) - if arrow_type == NANOARROW_TYPE_DECIMAL128: - self.precision = precision - self.scale = scale - ArrowSchemaInit(self.arrow_schema) - _check_nanoarrow( - ArrowSchemaSetTypeDecimal( - self.arrow_schema, - arrow_type, - precision, - scale - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - # Currently struct is used for Sparse vector only - build_arrow_schema_for_sparse_vector(self.arrow_schema, - child_arrow_type) - else: - _check_nanoarrow( - ArrowSchemaInitFromType( - self.arrow_schema, - storage_type - ) - ) - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - _check_nanoarrow( - ArrowSchemaSetTypeDateTime( - self.arrow_schema, - arrow_type, - time_unit, - NULL - ) - ) - if arrow_type == NANOARROW_TYPE_LIST: - # Set the schema for child using child_arrow_type - _check_nanoarrow( - ArrowSchemaSetType( - self.arrow_schema.children[0], - child_arrow_type - ) - ) - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - else: # primitive type array init - _check_nanoarrow( - ArrowArrayInitFromType( - self.arrow_array, - storage_type - ) - ) - _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) - _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) + cpython.PyMem_Calloc(1, sizeof(ArrowSchema)) def __dealloc__(self): if self.arrow_array != NULL: @@ -128,6 +46,20 @@ cdef class ArrowArrayImpl: ArrowSchemaRelease(self.arrow_schema) cpython.PyMem_Free(self.arrow_schema) + cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1: + """ + Sets the time unit and the corresponding factor. + """ + self.time_unit = time_unit + if time_unit == NANOARROW_TIME_UNIT_MILLI: + self.time_factor = 1_000 + elif time_unit == NANOARROW_TIME_UNIT_MICRO: + self.time_factor = 1_000_000 + elif time_unit == NANOARROW_TIME_UNIT_NANO: + self.time_factor = 1_000_000_000 + else: + self.time_factor = 1 + cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1: """ Append a value of type bytes to the array. @@ -318,6 +250,88 @@ cdef class ArrowArrayImpl: _check_nanoarrow(ArrowArrayFinishBuildingDefault(self.arrow_array, NULL)) + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, + int8_t precision, int8_t scale, + ArrowTimeUnit time_unit, + ArrowType child_arrow_type) except -1: + """ + Populate the array from the supplied metadata. + """ + cdef ArrowType storage_type = arrow_type + self.arrow_type = arrow_type + self._set_time_unit(time_unit) + self.name = name + self.child_arrow_type = child_arrow_type + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + storage_type = NANOARROW_TYPE_INT64 + + _check_nanoarrow(ArrowArrayInitFromType(self.arrow_array, + storage_type)) + if arrow_type == NANOARROW_TYPE_DECIMAL128: + self.precision = precision + self.scale = scale + ArrowSchemaInit(self.arrow_schema) + _check_nanoarrow( + ArrowSchemaSetTypeDecimal( + self.arrow_schema, + arrow_type, + precision, + scale + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + # Currently struct is used for Sparse vector only + build_arrow_schema_for_sparse_vector(self.arrow_schema, + child_arrow_type) + else: + _check_nanoarrow( + ArrowSchemaInitFromType( + self.arrow_schema, + storage_type + ) + ) + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + _check_nanoarrow( + ArrowSchemaSetTypeDateTime( + self.arrow_schema, + arrow_type, + time_unit, + NULL + ) + ) + if arrow_type == NANOARROW_TYPE_LIST: + # Set the schema for child using child_arrow_type + _check_nanoarrow( + ArrowSchemaSetType( + self.arrow_schema.children[0], + child_arrow_type + ) + ) + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.arrow_schema, + NULL + ) + ) + else: # primitive type array init + _check_nanoarrow( + ArrowArrayInitFromType( + self.arrow_array, + storage_type + ) + ) + _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) + _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) + def get_array_capsule(self): """ Internal method for getting a PyCapsule pointer to the array. diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index e8382af2..dbda323c 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -59,7 +59,7 @@ cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl arrow_array, int64_t ts dt = convert_date_to_python(buffer) td = dt - EPOCH_DATE - ts = int(cydatetime.total_seconds(td) * arrow_array.factor) + ts = int(cydatetime.total_seconds(td) * arrow_array.time_factor) arrow_array.append_int64(ts) diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 1d398203..e3ff1716 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -280,7 +280,8 @@ cdef class BaseVarImpl: else: errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT) - self._arrow_array = ArrowArrayImpl( + self._arrow_array = ArrowArrayImpl.__new__(ArrowArrayImpl) + self._arrow_array.populate_from_metadata( arrow_type=self.metadata._arrow_type, name=self.metadata.name, precision=self.metadata.precision, From edae70440ba24d0db929374c2db1d59964cae741 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:52:53 -0600 Subject: [PATCH 133/239] Added support for the ArrowArrayStream PyCapsule interface. --- doc/src/release_notes.rst | 2 + samples/dataframe_pandas.py | 12 +-- samples/dataframe_pandas_async.py | 12 +-- samples/dataframe_parquet_write.py | 4 +- samples/dataframe_polars.py | 5 +- samples/dataframe_pyarrow.py | 4 +- src/oracledb/__init__.py | 1 + src/oracledb/arrow_array.py | 8 ++ src/oracledb/arrow_impl.pxd | 10 +++ src/oracledb/dataframe.py | 32 ++++++-- src/oracledb/errors.py | 5 ++ src/oracledb/impl/arrow/array.pyx | 67 +++++++++++++++ src/oracledb/impl/arrow/dataframe.pyx | 112 ++++++++++++++++++++++++++ src/oracledb/impl/arrow/utils.pyx | 21 ++++- src/oracledb/utils.py | 19 ++++- tests/test_8000_dataframe.py | 83 ++++--------------- tests/test_8100_dataframe_async.py | 45 ++--------- 17 files changed, 301 insertions(+), 141 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 75de4f01..93164270 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -35,6 +35,8 @@ Common Changes #) Changes to :ref:`data frame ` support: + - Added internal support for the ArrowArrayStream PyCapsule interface to + simplify :ref:`OracleDataFrame ` use. - Remove use of the DataFrame Interchange Protocol in :ref:`OracleDataFrames `. - Documentation on methods and attributes on the ``DataFrame`` and diff --git a/samples/dataframe_pandas.py b/samples/dataframe_pandas.py index 229fbf2f..ccf1fe96 100644 --- a/samples/dataframe_pandas.py +++ b/samples/dataframe_pandas.py @@ -60,9 +60,7 @@ odf = connection.fetch_df_all(statement=sql, arraysize=100) # Get a Pandas DataFrame from the data -df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() -).to_pandas() +df = pyarrow.table(odf).to_pandas() # Perform various Pandas operations on the DataFrame @@ -93,9 +91,7 @@ # behavior on the sample table. sql = "select id, name from SampleQueryTab order by id" for odf in connection.fetch_df_batches(statement=sql, size=10): - df_b = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + df_b = pyarrow.table(odf).to_pandas() print(f"Appending {df_b.shape[0]} rows") df = pandas.concat([df, df_b], ignore_index=True) @@ -137,9 +133,7 @@ odf = connection.fetch_df_all(statement=sql, arraysize=100) # Get a Pandas DataFrame from the data -df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() -).to_pandas() +df = pyarrow.table(odf).to_pandas() # Perform various Pandas operations on the DataFrame diff --git a/samples/dataframe_pandas_async.py b/samples/dataframe_pandas_async.py index b7e49b8e..25271fd5 100644 --- a/samples/dataframe_pandas_async.py +++ b/samples/dataframe_pandas_async.py @@ -64,9 +64,7 @@ async def main(): odf = await connection.fetch_df_all(statement=SQL, arraysize=100) # Get a Pandas DataFrame from the data - df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + df = pyarrow.table(odf).to_pandas() # Perform various Pandas operations on the DataFrame @@ -96,9 +94,7 @@ async def main(): # Tune 'size' for your data set. Here it is small to show the batch fetch # behavior on the sample table. async for odf in connection.fetch_df_batches(statement=SQL, size=10): - df_b = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + df_b = pyarrow.table(odf).to_pandas() print(f"Appending {df_b.shape[0]} rows") df = pandas.concat([df, df_b], ignore_index=True) @@ -141,9 +137,7 @@ async def main(): odf = await connection.fetch_df_all(statement=sql, arraysize=100) # Get a Pandas DataFrame from the data - df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + df = pyarrow.table(odf).to_pandas() # Perform various Pandas operations on the DataFrame diff --git a/samples/dataframe_parquet_write.py b/samples/dataframe_parquet_write.py index 02a7d93f..7a023859 100644 --- a/samples/dataframe_parquet_write.py +++ b/samples/dataframe_parquet_write.py @@ -61,9 +61,7 @@ for odf in connection.fetch_df_batches(statement=SQL, size=FETCH_BATCH_SIZE): - pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() - ) + pyarrow_table = pyarrow.table(odf) if not pqwriter: pqwriter = pq.ParquetWriter(PARQUET_FILE_NAME, pyarrow_table.schema) diff --git a/samples/dataframe_polars.py b/samples/dataframe_polars.py index 7b91ced7..9416af0c 100644 --- a/samples/dataframe_polars.py +++ b/samples/dataframe_polars.py @@ -57,10 +57,7 @@ odf = connection.fetch_df_all(statement=SQL1, arraysize=100) # Convert to a Polars DataFrame -pyarrow_table = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() -) -p = polars.from_arrow(pyarrow_table) +p = polars.from_arrow(odf) print(type(p)) # diff --git a/samples/dataframe_pyarrow.py b/samples/dataframe_pyarrow.py index d666f62b..8ce20a4d 100644 --- a/samples/dataframe_pyarrow.py +++ b/samples/dataframe_pyarrow.py @@ -56,9 +56,7 @@ odf = connection.fetch_df_all(statement=SQL1, arraysize=100) # Create a PyArrow table -pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() -) +pyarrow_table = pyarrow.table(odf) print("Type:") print(type(pyarrow_table)) # diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index ab3608ff..42e7e854 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -287,6 +287,7 @@ from .utils import ( enable_thin_mode as enable_thin_mode, + from_arrow as from_arrow, register_params_hook as register_params_hook, register_password_type as register_password_type, register_protocol as register_protocol, diff --git a/src/oracledb/arrow_array.py b/src/oracledb/arrow_array.py index d1afeece..14356ce5 100644 --- a/src/oracledb/arrow_array.py +++ b/src/oracledb/arrow_array.py @@ -29,6 +29,8 @@ # array data to other data frame libraries. # ----------------------------------------------------------------------------- +from .arrow_impl import ArrowArrayImpl + from . import errors @@ -51,6 +53,12 @@ def __repr__(self): def __str__(self): return self.__repr__() + @classmethod + def _from_arrow(cls, obj): + array = cls.__new__(cls) + array._impl = ArrowArrayImpl.from_arrow_array(obj) + return array + @classmethod def _from_impl(cls, impl): array = cls.__new__(cls) diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index 27b7bc30..c905c152 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -46,10 +46,18 @@ cdef extern from "nanoarrow.h": ArrowArray** children const void** buffers void (*release)(ArrowArray*) + void *private_data cdef struct ArrowSchema: + const char *format; + const char *name; + const char *metadata; + int64_t flags + int64_t n_children ArrowSchema** children + ArrowSchema* dictionary void (*release)(ArrowSchema*) + void *private_data cpdef enum ArrowType: NANOARROW_TYPE_BOOL @@ -102,6 +110,8 @@ cdef class ArrowArrayImpl: array.array values) except -1 cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 + cdef int populate_from_array(self, ArrowSchema* schema, + ArrowArray* array) except -1 cdef int populate_from_metadata(self, ArrowType arrow_type, str name, int8_t precision, int8_t scale, ArrowTimeUnit time_unit, diff --git a/src/oracledb/dataframe.py b/src/oracledb/dataframe.py index 90fea896..c263073c 100644 --- a/src/oracledb/dataframe.py +++ b/src/oracledb/dataframe.py @@ -32,6 +32,7 @@ from typing import List from .arrow_array import ArrowArray +from .arrow_impl import DataFrameImpl from . import errors @@ -41,16 +42,37 @@ class DataFrame: def __init__(self): errors._raise_err(errors.ERR_INTERNAL_CREATION_REQUIRED) + @classmethod + def _from_arrow(cls, obj): + df = cls.__new__(cls) + df._initialize(DataFrameImpl.from_arrow_stream(obj)) + return df + @classmethod def _from_impl(cls, impl): df = cls.__new__(cls) - df._impl = impl - df._arrays = [ArrowArray._from_impl(a) for a in impl.get_arrays()] - df._arrays_by_name = {} - for array in df._arrays: - df._arrays_by_name[array.name] = array + df._initialize(impl) return df + def _initialize(self, impl): + """ + Initializes the object given the implementation. + """ + self._impl = impl + self._arrays = [ArrowArray._from_impl(a) for a in impl.get_arrays()] + self._arrays_by_name = {} + for array in self._arrays: + self._arrays_by_name[array.name] = array + + def __arrow_c_stream__(self, requested_schema=None): + """ + Returns the ArrowArrayStream PyCapsule which allows direct conversion + to foreign data frames that support this interface. + """ + if requested_schema is not None: + raise NotImplementedError("requested_schema") + return self._impl.get_stream_capsule() + def column_arrays(self) -> List: """ Returns a list of the Arrow arrays corresponding to each column in the diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 3f1f15ba..b4bd9766 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -322,6 +322,7 @@ def _raise_not_supported(feature: str) -> None: ERR_INVALID_NETWORK_NAME = 3029 ERR_ARROW_UNSUPPORTED_DATA_TYPE = 3030 ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT = 3031 +ERR_ARROW_UNSUPPORTED_DATA_FORMAT = 3032 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -900,6 +901,10 @@ def _raise_not_supported(feature: str) -> None: "Apache Arrow format does not support sparse vectors with flexible " "dimensions" ), + ERR_ARROW_UNSUPPORTED_DATA_FORMAT: ( + 'conversion from Arrow format "{schema_format}" to Oracle Database ' + "is not supported" + ), ERR_ARROW_UNSUPPORTED_DATA_TYPE: ( "conversion from Oracle Database type {db_type_name} to Apache " "Arrow format is not supported" diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 8dbee66d..2d4527bc 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -250,6 +250,73 @@ cdef class ArrowArrayImpl: _check_nanoarrow(ArrowArrayFinishBuildingDefault(self.arrow_array, NULL)) + @classmethod + def from_arrow_array(cls, obj): + """ + Create an ArrowArrayImpl instance by extracting the information an + object implementing the PyCapsule Arrow array interface. + """ + cdef: + ArrowArrayImpl array_impl + ArrowSchema *arrow_schema + ArrowArray *arrow_array + schema_capsule, array_capsule = obj.__arrow_c_array__() + arrow_schema = cpython.PyCapsule_GetPointer( + schema_capsule, "arrow_schema" + ) + arrow_array = cpython.PyCapsule_GetPointer( + array_capsule, "arrow_array" + ) + array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) + array_impl.populate_from_array(arrow_schema, arrow_array) + return array_impl + + cdef int populate_from_array(self, ArrowSchema* schema, + ArrowArray* array) except -1: + """ + Populate the array from another array. + """ + cdef str schema_format + ArrowSchemaMove(schema, self.arrow_schema) + ArrowArrayMove(array, self.arrow_array) + schema_format = schema.format.decode() + self.name = schema.name.decode() + if schema_format == "u": + self.arrow_type = NANOARROW_TYPE_STRING + elif schema_format == "U": + self.arrow_type = NANOARROW_TYPE_LARGE_STRING + elif schema_format == "z": + self.arrow_type = NANOARROW_TYPE_BINARY + elif schema_format == "Z": + self.arrow_type = NANOARROW_TYPE_LARGE_BINARY + elif schema_format == "g": + self.arrow_type = NANOARROW_TYPE_DOUBLE + elif schema_format == "f": + self.arrow_type = NANOARROW_TYPE_FLOAT + elif schema_format == "l": + self.arrow_type = NANOARROW_TYPE_INT64 + elif schema_format == "tss:": + self.arrow_type = NANOARROW_TYPE_TIMESTAMP + self._set_time_unit(NANOARROW_TIME_UNIT_SECOND) + elif schema_format == "tsm:": + self.arrow_type = NANOARROW_TYPE_TIMESTAMP + self._set_time_unit(NANOARROW_TIME_UNIT_MILLI) + elif schema_format == "tsu:": + self.arrow_type = NANOARROW_TYPE_TIMESTAMP + self._set_time_unit(NANOARROW_TIME_UNIT_MICRO) + elif schema_format == "tsn:": + self.arrow_type = NANOARROW_TYPE_TIMESTAMP + self._set_time_unit(NANOARROW_TIME_UNIT_NANO) + elif schema_format.startswith("d:"): + self.arrow_type = NANOARROW_TYPE_DECIMAL128 + self.precision, self.scale = \ + [int(s) for s in schema_format[2:].split(",")] + elif schema_format == "b": + self.arrow_type = NANOARROW_TYPE_BOOL + else: + errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_FORMAT, + schema_format=schema_format) + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, int8_t precision, int8_t scale, ArrowTimeUnit time_unit, diff --git a/src/oracledb/impl/arrow/dataframe.pyx b/src/oracledb/impl/arrow/dataframe.pyx index 80746179..7a4f3ceb 100644 --- a/src/oracledb/impl/arrow/dataframe.pyx +++ b/src/oracledb/impl/arrow/dataframe.pyx @@ -30,9 +30,121 @@ cdef class DataFrameImpl: + @classmethod + def from_arrow_stream(cls, obj): + """ + Extract Arrow arrays from an object implementing the PyCapsule arrow + stream interface. + """ + cdef: + ArrowArrayStream *arrow_stream + ArrowSchema arrow_schema + ArrowArray arrow_array + DataFrameImpl df_impl + ArrowArrayImpl array + ssize_t i + df_impl = DataFrameImpl.__new__(DataFrameImpl) + df_impl.arrays = [] + capsule = obj.__arrow_c_stream__() + arrow_stream = cpython.PyCapsule_GetPointer( + capsule, "arrow_array_stream" + ) + _check_nanoarrow(arrow_stream.get_schema(arrow_stream, &arrow_schema)) + _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) + for i in range(arrow_schema.n_children): + array = ArrowArrayImpl.__new__(ArrowArrayImpl) + array.populate_from_array(arrow_schema.children[i], + arrow_array.children[i]) + df_impl.arrays.append(array) + _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) + if arrow_array.release != NULL: + raise NotImplementedError("multiple chunks not supported") + ArrowArrayStreamRelease(arrow_stream) + return df_impl + def get_arrays(self): """ Internal method for getting the list of arrays associated with the data frame. """ return self.arrays + + def get_stream_capsule(self): + """ + Internal method for getting a PyCapsule pointer to a stream that + encapsulates the arrays found in the data frame. + """ + cdef: + ArrowArrayImpl array_impl + ArrowArrayStream *stream + int64_t i, num_arrays + ArrowSchema schema + ArrowArray array + + # initialization + stream = NULL + array.release = NULL + schema.release = NULL + num_arrays = len(self.arrays) + + try: + + # create schema/array encompassing all of the arrays + _check_nanoarrow( + ArrowSchemaInitFromType(&schema, NANOARROW_TYPE_STRUCT) + ) + _check_nanoarrow(ArrowSchemaAllocateChildren(&schema, num_arrays)) + _check_nanoarrow( + ArrowArrayInitFromType(&array, NANOARROW_TYPE_STRUCT) + ) + _check_nanoarrow(ArrowArrayAllocateChildren(&array, num_arrays)) + for i, array_impl in enumerate(self.arrays): + array.length = array_impl.arrow_array.length + copy_arrow_array( + array_impl, array_impl.arrow_array, array.children[i] + ) + _check_nanoarrow( + ArrowSchemaDeepCopy( + array_impl.arrow_schema, schema.children[i] + ) + ) + + # create stream and populate it + stream = \ + cpython.PyMem_Calloc(1, sizeof(ArrowArrayStream)) + _check_nanoarrow( + ArrowBasicArrayStreamInit(stream, &schema, num_arrays) + ) + ArrowBasicArrayStreamSetArray(stream, 0, &array) + + except: + if schema.release: + ArrowSchemaRelease(&schema) + if array.release: + ArrowArrayRelease(&array) + if stream != NULL: + if stream.release: + ArrowArrayStreamRelease(stream) + cpython.PyMem_Free(stream) + raise + + # create and return capsule + return cpython.PyCapsule_New( + stream, + "arrow_array_stream", + &pycapsule_array_stream_deleter + ) + + +cdef void pycapsule_array_stream_deleter(object stream_capsule) noexcept: + """ + Called when the PyCapsule pointer is no longer required and performs the + necessary cleanup. + """ + cdef ArrowArrayStream* stream + stream = cpython.PyCapsule_GetPointer( + stream_capsule, 'arrow_array_stream' + ) + if stream.release != NULL: + ArrowArrayStreamRelease(stream) + cpython.PyMem_Free(stream) diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index bb3cbf7a..8a2e32ca 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -34,6 +34,11 @@ cdef extern from "nanoarrow.c": ctypedef void (*ArrowBufferDeallocatorCallback) + cdef struct ArrowArrayStream: + int (*get_schema)(ArrowArrayStream *, ArrowSchema * out) + int (*get_next)(ArrowArrayStream * stream, ArrowArray * out) + void (*release)(ArrowArrayStream*) + cdef struct ArrowBufferAllocator: void *private_data @@ -85,13 +90,20 @@ cdef extern from "nanoarrow.c": ArrowError *error) ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, ArrowType storage_type) + void ArrowArrayMove(ArrowArray* src, ArrowArray* dst) void ArrowArrayRelease(ArrowArray *array) ArrowErrorCode ArrowArrayReserve(ArrowArray* array, int64_t additional_size_elements) ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) + void ArrowArrayStreamRelease(ArrowArrayStream *array_stream) ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* array) ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, ArrowArray* array) + ArrowErrorCode ArrowBasicArrayStreamInit(ArrowArrayStream* array_stream, + ArrowSchema* schema, + int64_t n_arrays) + void ArrowBasicArrayStreamSetArray(ArrowArrayStream* array_stream, + int64_t i, ArrowArray* array) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, void *private_data) @@ -100,10 +112,13 @@ cdef extern from "nanoarrow.c": void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) ArrowErrorCode ArrowDecimalSetDigits(ArrowDecimal* decimal, ArrowStringView value) + ArrowErrorCode ArrowSchemaAllocateChildren(ArrowSchema* schema, + int64_t n_children) ArrowErrorCode ArrowSchemaDeepCopy(const ArrowSchema *schema, ArrowSchema *schema_out) void ArrowSchemaInit(ArrowSchema* schema) ArrowErrorCode ArrowSchemaInitFromType(ArrowSchema* schema, ArrowType type) + void ArrowSchemaMove(ArrowSchema* src, ArrowSchema* dst) void ArrowSchemaRelease(ArrowSchema *schema) ArrowErrorCode ArrowSchemaSetName(ArrowSchema* schema, const char* name) ArrowErrorCode ArrowSchemaSetType(ArrowSchema * schema, ArrowType type) @@ -222,6 +237,7 @@ cdef int copy_arrow_array(ArrowArrayImpl array_impl, """ cdef: ArrowBuffer *dest_buffer + ArrowBuffer *src_buffer ssize_t i _check_nanoarrow( ArrowArrayInitFromType( @@ -242,8 +258,9 @@ cdef int copy_arrow_array(ArrowArrayImpl array_impl, for i in range(src.n_buffers): if src.buffers[i] != NULL: dest_buffer = ArrowArrayBuffer(dest, i) - dest_buffer.data = src.buffers[i] - dest_buffer.size_bytes = 0 + src_buffer = ArrowArrayBuffer(src, i) + dest_buffer.data = src_buffer.data + dest_buffer.size_bytes = src_buffer.size_bytes dest_buffer.allocator = ArrowBufferDeallocator( arrow_buffer_dealloc_callback, array_impl diff --git a/src/oracledb/utils.py b/src/oracledb/utils.py index 239cdc55..e58613b3 100644 --- a/src/oracledb/utils.py +++ b/src/oracledb/utils.py @@ -28,7 +28,10 @@ # Contains utility classes and methods. # ----------------------------------------------------------------------------- -from typing import Callable, Union +from typing import Any, Callable, Union + +from .arrow_array import ArrowArray +from .dataframe import DataFrame from . import base_impl from . import driver_mode @@ -53,6 +56,20 @@ def enable_thin_mode(): pass +def from_arrow(obj: Any) -> Union[DataFrame, ArrowArray]: + """ + Uses the Arrow PyCapsule interface to return either a DataFrame or + ArrowArray object, depending on what interface is supported by the object + that is supplied to the function. + """ + if hasattr(obj, "__arrow_c_stream__"): + return DataFrame._from_arrow(obj) + elif hasattr(obj, "__arrow_c_array__"): + return ArrowArray._from_arrow(obj) + msg = "object must implement the PyCapsule stream or array interfaces" + raise ValueError(msg) + + def params_initer(f): """ Decorator function which is used on the ConnectParams and PoolParams diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index ab034b1d..568aac3e 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -243,7 +243,7 @@ def __check_interop(self): Checks to see if the pyarrow and pandas modules are available. """ if not HAS_INTEROP: - self.skipTest("missing pandas or pyarrow modules") + self.skipTest("missing numpy, pandas or pyarrow modules") def __convert_date(self, value): """ @@ -395,10 +395,7 @@ def __validate_df(self, ora_df, data): """ raw_df = self.__convert_to_df(data) raw_data = self.__get_data_from_df(raw_df) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, raw_data) @@ -531,10 +528,7 @@ def test_8019(self): ora_df = self.conn.fetch_df_all( "select to_clob('test_8023') from dual" ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -545,10 +539,7 @@ def test_8020(self): ora_df = self.conn.fetch_df_all( "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -559,10 +550,7 @@ def test_8021(self): ora_df = self.conn.fetch_df_all( "select utl_raw.cast_to_raw('test_8025') from dual" ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -584,10 +572,7 @@ def test_8022(self): select true """ ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -626,10 +611,7 @@ def test_8023(self): (None,), (None,), ] - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -657,12 +639,7 @@ def test_8024(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - # number of children for a nested list = 1 - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() @@ -682,11 +659,7 @@ def test_8025(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() @@ -706,11 +679,7 @@ def test_8026(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() @@ -730,11 +699,7 @@ def test_8027(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() @@ -757,11 +722,7 @@ def test_8028(self): ) self.assertEqual(ora_df.num_rows(), 3) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() @@ -811,11 +772,7 @@ def test_8029(self): ) self.assertEqual(ora_df.num_rows(), 12) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() @@ -857,12 +814,7 @@ def test_8030(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - # number of children for a struct = 3 (num_dimensions, indices, values) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() @@ -904,12 +856,7 @@ def test_8031(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - # number of children for a struct = 3 (num_dimensions, indices, values) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_vectors_supported() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 2592ac10..9679e83e 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -401,10 +401,7 @@ def __validate_df(self, ora_df, data): """ raw_df = self.__convert_to_df(data) raw_data = self.__get_data_from_df(raw_df) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, raw_data) @@ -518,10 +515,7 @@ async def test_8117(self): self.__check_interop() with test_env.DefaultsContextManager("fetch_decimals", True): ora_df = await self.conn.fetch_df_all("select 1.0 from dual") - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -532,10 +526,7 @@ async def test_8118(self): ora_df = await self.conn.fetch_df_all( "select to_clob('test_8023') from dual" ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -546,10 +537,7 @@ async def test_8119(self): ora_df = await self.conn.fetch_df_all( "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -571,10 +559,7 @@ async def test_8120(self): select true """ ) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) @@ -595,13 +580,7 @@ async def test_8121(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - - # number of children for a nested list = 1 - self.assertEqual(fetched_tab.schema.types[0].num_fields, 1) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) @test_env.skip_unless_sparse_vectors_supported() @@ -643,12 +622,7 @@ async def test_8122(self): ) self.assertEqual(ora_df.num_rows(), 2) self.assertEqual(ora_df.num_columns(), 1) - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - # number of children for a struct = 3 (num_dimensions, indices, values) - self.assertEqual(fetched_tab.schema.types[0].num_fields, 3) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) async def test_8123(self): @@ -686,10 +660,7 @@ async def test_8123(self): (None,), (None,), ] - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() + fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) From 2a2f2ac47d05ec9b6db490e22860fff16eefa691 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:53:27 -0600 Subject: [PATCH 134/239] Fixed bug with execution of a PL/SQL block containing at least one output bind variable immediately following a query that returned multiple duplicate rows. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thin/messages/base.pyx | 2 +- tests/test_4300_cursor_other.py | 13 +++++++++++++ tests/test_6300_cursor_other_async.py | 15 +++++++++++++++ 4 files changed, 32 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 93164270..e69dbb5e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -26,6 +26,9 @@ Thin Mode Changes value of the attribute :attr:`DeqOptions.deliverymode`. #) Fixed bug with detection of when a connection has been closed by the database without notification. +#) Fixed bug with execution of a PL/SQL block containing at least one output + bind variable immediately following a query that returned multiple + duplicate rows. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 1b9a0746..d56a4297 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -806,7 +806,7 @@ cdef class MessageWithData(Message): # retain last raw value when not fetching Arrow (for handling # duplicate rows) - if not self.cursor_impl.fetching_arrow: + if self.in_fetch and not self.cursor_impl.fetching_arrow: var_impl._last_raw_value = \ var_impl._values[self.cursor_impl._last_row_index] diff --git a/tests/test_4300_cursor_other.py b/tests/test_4300_cursor_other.py index 71a9faaa..0091fbe3 100644 --- a/tests/test_4300_cursor_other.py +++ b/tests/test_4300_cursor_other.py @@ -997,6 +997,19 @@ def test_4369(self): cursor = conn.cursor() self.assertEqual(cursor.rowcount, -1) + def test_4370(self): + "4370 - execute PL/SQL with out vars after query with duplicate data" + self.cursor.execute("truncate table TestTempTable") + self.cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + [(i + 1, "test_4370") for i in range(20)], + ) + self.conn.commit() + self.cursor.execute("select IntCol, StringCol1 from TestTempTable") + var = self.cursor.var(int) + self.cursor.execute("begin :1 := 4370; end;", [var]) + self.assertEqual(var.getvalue(), 4370) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index d2667f20..fb887931 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -913,6 +913,21 @@ async def test_6353(self): cursor = conn.cursor() self.assertEqual(cursor.rowcount, -1) + async def test_6354(self): + "6354 - execute PL/SQL with out vars after query with duplicate data" + await self.cursor.execute("truncate table TestTempTable") + await self.cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + [(i + 1, "test_4370") for i in range(20)], + ) + await self.conn.commit() + await self.cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + var = self.cursor.var(int) + await self.cursor.execute("begin :1 := 4370; end;", [var]) + self.assertEqual(var.getvalue(), 4370) + if __name__ == "__main__": test_env.run_test_cases() From 69353c2748e4ace11f2bfb314f56bf337314880d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:53:58 -0600 Subject: [PATCH 135/239] Update third party licenses. --- THIRD_PARTY_LICENSES.txt | 10996 ------------------------------------- 1 file changed, 10996 deletions(-) diff --git a/THIRD_PARTY_LICENSES.txt b/THIRD_PARTY_LICENSES.txt index 15d44264..0f8f1a9a 100644 --- a/THIRD_PARTY_LICENSES.txt +++ b/THIRD_PARTY_LICENSES.txt @@ -710,33 +710,6 @@ under the License. ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- -Python dataframe interchange protocol - -MIT License - -Copyright (c) 2020 Consortium for Python Data API Standards contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- - Microsoft Authentication Library (MSAL) for Python The MIT License (MIT) @@ -1484,10972 +1457,3 @@ Copyright 2019 Kenneth Reitz ------------------------------------------------------------------------------- ------------------------------------------------------------------------------- - -Pandas license - -BSD 3-Clause License - -Copyright (c) 2008-2011, AQR Capital Management, LLC, Lambda Foundry, Inc. and PyData Development Team -All rights reserved. - -Copyright (c) 2011-2023, Open source contributors. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - ----- Dependency licenses - -numpy - -package_name: numpy -license_type: BSD License -license_text: -Copyright (c) 2005-2024, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----- ------------------------------------------------------------------------------------ -LICENSES_bundled.txt ------------------------------------------------------------------------------------ - -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. -Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. -Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - -$COPYRIGHT$ - -Additional copyrights may follow - -$HEADER$ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - -- Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -The copyright holders provide no reassurances that the source code -provided does not infringe any patent, copyright, or any other -intellectual property rights of third parties. The copyright holders -disclaim any liability to any recipient for claims brought against -recipient by any third party for infringement of that parties -intellectual property rights. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------------- -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - /* - * Copyright (c) 2014 Ryan Juckett - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -/* - * This file contains a modified version of Ryan Juckett's Dragon4 - * implementation, obtained from https://www.ryanjuckett.com, - * which has been ported from C++ to C and which has - * modifications specific to printing floats in numpy. - * - * Ryan Juckett's original code was under the Zlib license; he gave numpy - * permission to include it under the MIT license instead. - */ - - ----------------------------------------------------------------------------- -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - zlib License - ------------ - - Copyright (C) 2010 - 2019 ridiculous_fish, - Copyright (C) 2016 - 2019 Kim Walisch, - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - ----------------------------------------------------------------------------- -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - -# Copyright 2016 The Meson development team - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ----------------------------------------------------------------------------- -Name: spin -Files: .spin/cmds.py -License: BSD-3 - BSD 3-Clause License - -Copyright (c) 2021--2022, Scientific Python project -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------------------------------- -LICENSES_bundled in different directories: ------------------------------------------------------------------------------------ - -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: SPLITMIX64 -Files: /numpy/blob/numpy/random/src/splitmix64/* -License: Sebastiano Vigna © 2005–2019 NumPy Developers, Licensed under the 3-clause BSD License. -For details, see /numpy/blob/numpy/random/src/splitmix64/LICENSE.md - -Name: SFC64 -Files: /numpy/blob/numpy/random/src/sfc64/* -License: MIT -For details, see /numpy/blob/numpy/random/src/sfc64/LICENSE.md - -Name: PHILOX -Files: /numpy/blob/numpy/random/src/philox/* -License: D. E. Shaw Research -For license text, see /numpy/blob/numpy/random/src/philox/LICENSE.md - -Name: PCG64 -Files: /numpy/blob/numpy/random/src/pcg64/* -License: MIT -For license text, see/numpy/blob/numpy/random/src/pcg64/LICENSE.md - -Name: MT19937 -Files: /numpy/blob/numpy/random/src/mt19937/* -License: MIT -For license text, see/numpy/blob/numpy/random/src/mt19937/LICENSE.md - -Name: Julia -Files: /numpy/blob/numpy/random/src/distributions/* -License: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, and other contributors -For license text, see/numpy/blob/numpy/random/src/distributions/LICENSE.md - -Name: Random -Files: /numpy/blob/numpy/random/* -License: dual-licensed under the The University of Illinois/NCSA Open Source License (NCSA) and The 3-Clause BSD License -For license text, see/numpy/blob/numpy/random/LICENSE.md - -Name: numpy.core.ma -Files: /numpy/blob/numpy/ma/* -License: University of Georgia and Pierre G.F. Gerard-Marchant -For license text, see /numpy/blob/numpy/ma/LICENSE - ---------------- RECURSIVE LICENSE Mentioned in LICENSES_bundled in different directories (list above) -------------------- -------------------------------------------------------------------------------------------------------------------------------------- - -Name: SPLITMIX64 - -Written in 2015 by Sebastiano Vigna (vigna@acm.org) - -To the extent possible under law, the author has dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. - -See http://creativecommons.org/publicdomain/zero/1.0/. - -------------------------------------------------------------------------------------------------------------------------------------- - -Name: SFC64 - -© 2005–2019 NumPy Developers, Licensed under the 3-clause BSD License. - -The MIT License - -Adapted from a C++ implementation of Chris Doty-Humphrey's SFC PRNG. - -https://gist.github.com/imneme/f1f7821f07cf76504a97f6537c818083 - -Copyright (c) 2018 Melissa E. O'Neill - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------------------------------------------------------------------------------- -Name: PHILOX - -Copyright 2010-2012, D. E. Shaw Research. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - - Redistributions of source code must retain the above copyright notice, this list of conditions, and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright notice, this list of conditions, and the following disclaimer in the documentation and/or other materials provided with the distribution. - - - Neither the name of D. E. Shaw Research nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------------------------------------------------------------- -Name: PCG64 - -The MIT License - -PCG Random Number Generation for C. - -Copyright 2014 Melissa O'Neill oneill@pcg-random.org - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------------------------------------------------------------------------------- - -Name: MT19937 - -Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - -The rk_random and rk_seed functions algorithms and the original design of the Mersenne Twister RNG: - -Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - - - The names of its contributors may not be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Original algorithm for the implementation of rk_interval function from Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by Magnus Jonsson. - -Constants used in the rk_double implementation by Isaku Wada. - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------------------------------------------------------------------------------- - -Name: Julia - -The ziggurat methods were derived from Julia. - -Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, and other contributors: - -https://github.com/JuliaLang/julia/contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -------------------------------------------------------------------------------------------------------------------------------------- - -Name: Random - -NCSA Open Source License - -Copyright (c) 2019 Kevin Sheppard. All rights reserved. - -Developed by: Kevin Sheppard (kevin.sheppard@economics.ox.ac.uk, kevin.k.sheppard@gmail.com) http://www.kevinsheppard.com - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal with the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimers. - -Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimers in the documentation and/or other materials provided with the distribution. - -Neither the names of Kevin Sheppard, nor the names of any contributors may be used to endorse or promote products derived from this Software without specific prior written permission. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE. - -3-Clause BSD License - -Copyright (c) 2019 Kevin Sheppard. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - - - Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Components - -Many parts of this module have been derived from original sources, often the algorithm's designer. Component licenses are located with the component code. - -------------------------------------------------------------------------------------------------------------------------------------- - -Name: numpy.core.ma - -Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant -All rights reserved. -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the University of Georgia nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------------------------------------------------------------------------ - - -This distribution of NumPy also bundles the following software: - -Name: OpenBLAS -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: LAPACK -Files: numpy/.dylibs/libscipy_openblas*.so -Description: bundled in OpenBLAS -Availability: https://github.com/OpenMathLib/OpenBLAS/ -License: BSD-3-Clause-Attribution - Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. - Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. - Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - - $COPYRIGHT$ - - Additional copyrights may follow - - $HEADER$ - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - - - Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - The copyright holders provide no reassurances that the source code - provided does not infringe any patent, copyright, or any other - intellectual property rights of third parties. The copyright holders - disclaim any liability to any recipient for claims brought against - recipient by any third party for infringement of that parties - intellectual property rights. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Name: libquadmath -Files: numpy/.dylibs/libquadmath*.so -License: LGPL-2.1-or-later - - GCC Quad-Precision Math Library - Copyright (C) 2010-2019 Free Software Foundation, Inc. - Written by Francois-Xavier Coudert - - This file is part of the libquadmath library. - Libquadmath is free software; you can redistribute it and/or - modify it under the terms of the GNU Library General Public - License as published by the Free Software Foundation; either - version 2.1 of the License, or (at your option) any later version. - - Libquadmath is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - Lesser General Public License for more details. - https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html -———————————————————————————————————————————————————————————————————————————————————— ----- -python-dateutil - -Copyright 2017- Paul Ganssle -Copyright 2017- dateutil contributors (see AUTHORS file) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -The above license applies to all contributions after 2017-12-01, as well as -all contributions that have been re-licensed (see AUTHORS file for the list of -contributors who have re-licensed their code). - -The full text of the Apache License here: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -dateutil - Extensions to the standard Python datetime module. - -Copyright (c) 2003-2011 - Gustavo Niemeyer -Copyright (c) 2012-2014 - Tomi Pieviläinen -Copyright (c) 2014-2016 - Yaron de Leeuw -Copyright (c) 2015- - Paul Ganssle -Copyright (c) 2015- - dateutil contributors (see AUTHORS file) - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR -CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -The above BSD License Applies to all code, even that also covered by Apache 2.0. ----- - -six - -Copyright (c) 2010-2020 Benjamin Peterson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ----- - -pytz - -Copyright (c) 2003-2019 Stuart Bishop - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. ----- - -tzdata - -Apache Software License 2.0 - -Copyright (c) 2020, Paul Ganssle (Google) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. ----- - -Vendored dependencies ---------------------- - -Pyperclip - BSD license -======================= - -Copyright (c) 2010, Albert Sweigart -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the pyperclip nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY Albert Sweigart "AS IS" AND ANY -EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL Albert Sweigart BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -===================================================================================== - -klib - -The MIT License - -Copyright (c) 2008- Attractive Chaos - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. ----- - -SAS7BDAT - MIT License -======================= - -Copyright (c) 2015 Jared Hobbs - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- - -PyArrow - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------------------------------------- - -src/arrow/util (some portions): Apache 2.0, and 3-clause BSD - -Some portions of this module are derived from code in the Chromium project, -copyright (c) Google inc and (c) The Chromium Authors and licensed under the -Apache 2.0 License or the under the 3-clause BSD license: - - Copyright (c) 2013 The Chromium Authors. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following disclaimer - in the documentation and/or other materials provided with the - distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from Daniel Lemire's FrameOfReference project. - -https://github.com/lemire/FrameOfReference/blob/6ccaf9e97160f9a3b299e23a8ef739e711ef0c71/src/bpacking.cpp -https://github.com/lemire/FrameOfReference/blob/146948b6058a976bc7767262ad3a2ce201486b93/scripts/turbopacking64.py - -Copyright: 2013 Daniel Lemire -Home page: http://lemire.me/en/ -Project page: https://github.com/lemire/FrameOfReference -License: Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from the TensorFlow project - -Copyright 2015 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the NumPy project. - -https://github.com/numpy/numpy/blob/e1f191c46f2eebd6cb892a4bfe14d9dd43a06c4e/numpy/core/src/multiarray/multiarraymodule.c#L2910 - -https://github.com/numpy/numpy/blob/68fd82271b9ea5a9e50d4e761061dfcca851382a/numpy/core/src/multiarray/datetime.c - -Copyright (c) 2005-2017, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from the Boost project - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -This project includes code from the FlatBuffers project - -Copyright 2014 Google Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the tslib project - -Copyright 2015 Microsoft Corporation. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -This project includes code from the jemalloc project - -https://github.com/jemalloc/jemalloc - -Copyright (C) 2002-2017 Jason Evans . -All rights reserved. -Copyright (C) 2007-2012 Mozilla Foundation. All rights reserved. -Copyright (C) 2009-2017 Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice(s), - this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice(s), - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) ``AS IS'' AND ANY EXPRESS -OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE -OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- - -This project includes code from the Go project, BSD 3-clause license + PATENTS -weak patent termination clause -(https://github.com/golang/go/blob/master/PATENTS). - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project includes code from the hs2client - -https://github.com/cloudera/hs2client - -Copyright 2016 Cloudera Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -The script ci/scripts/util_wait_for_it.sh has the following license - -Copyright (c) 2016 Giles Hall - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The script r/configure has the following license (MIT) - -Copyright (c) 2017, Jeroen Ooms and Jim Hester - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -cpp/src/arrow/util/logging.cc, cpp/src/arrow/util/logging.h and -cpp/src/arrow/util/logging-test.cc are adapted from -Ray Project (https://github.com/ray-project/ray) (Apache 2.0). - -Copyright (c) 2016 Ray Project (https://github.com/ray-project/ray) - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- -The files cpp/src/arrow/vendored/datetime/date.h, cpp/src/arrow/vendored/datetime/tz.h, -cpp/src/arrow/vendored/datetime/tz_private.h, cpp/src/arrow/vendored/datetime/ios.h, -cpp/src/arrow/vendored/datetime/ios.mm, -cpp/src/arrow/vendored/datetime/tz.cpp are adapted from -Howard Hinnant's date library (https://github.com/HowardHinnant/date) -It is licensed under MIT license. - -The MIT License (MIT) -Copyright (c) 2015, 2016, 2017 Howard Hinnant -Copyright (c) 2016 Adrian Colomitchi -Copyright (c) 2017 Florian Dang -Copyright (c) 2017 Paul Thompson -Copyright (c) 2018 Tomasz Kamiński - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/util/utf8.h includes code adapted from the page - https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ -with the following license (MIT) - -Copyright (c) 2008-2009 Bjoern Hoehrmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/xxhash/ have the following license -(BSD 2-Clause License) - -xxHash Library -Copyright (c) 2012-2014, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -You can contact the author at : -- xxHash homepage: http://www.xxhash.com -- xxHash source repository : https://github.com/Cyan4973/xxHash - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/double-conversion/ have the following license -(BSD 3-Clause License) - -Copyright 2006-2011, the V8 project authors. All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name of Google Inc. nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/uriparser/ have the following license -(BSD 3-Clause License) - -uriparser - RFC 3986 URI parsing library - -Copyright (C) 2007, Weijia Song -Copyright (C) 2007, Sebastian Pipping -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - - * Redistributions of source code must retain the above - copyright notice, this list of conditions and the following - disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials - provided with the distribution. - - * Neither the name of the nor the names of its - contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, -STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED -OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files under dev/tasks/conda-recipes have the following license - -BSD 3-clause license -Copyright (c) 2015-2018, conda-forge -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF -THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/utfcpp/ have the following license - -Copyright 2006-2018 Nemanja Trifunovic - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -This project includes code from Apache Kudu. - - * cpp/cmake_modules/CompilerInfo.cmake is based on Kudu's cmake_modules/CompilerInfo.cmake - -Copyright: 2016 The Apache Software Foundation. -Home page: https://kudu.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from Apache Impala (incubating), formerly -Impala. The Impala code and rights were donated to the ASF as part of the -Incubator process after the initial code imports into Apache Parquet. - -Copyright: 2012 Cloudera, Inc. -Copyright: 2016 The Apache Software Foundation. -Home page: http://impala.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from Apache Aurora. - -* dev/release/{release,changelog,release-candidate} are based on the scripts from - Apache Aurora - -Copyright: 2016 The Apache Software Foundation. -Home page: https://aurora.apache.org/ -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -This project includes code from the Google styleguide. - -* cpp/build-support/cpplint.py is based on the scripts from the Google styleguide. - -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/styleguide -License: 3-clause BSD - --------------------------------------------------------------------------------- - -This project includes code from Snappy. - -* cpp/cmake_modules/{SnappyCMakeLists.txt,SnappyConfig.h} are based on code - from Google's Snappy project. - -Copyright: 2009 Google Inc. All rights reserved. -Homepage: https://github.com/google/snappy -License: 3-clause BSD - --------------------------------------------------------------------------------- - -This project includes code from the manylinux project. - -* python/manylinux1/scripts/{build_python.sh,python-tag-abi-tag.py, - requirements.txt} are based on code from the manylinux project. - -Copyright: 2016 manylinux -Homepage: https://github.com/pypa/manylinux -License: The MIT License (MIT) - --------------------------------------------------------------------------------- - -This project includes code from the cymove project: - -* python/pyarrow/includes/common.pxd includes code from the cymove project - -The MIT License (MIT) -Copyright (c) 2019 Omer Ozarslan - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE -OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The projects includes code from the Ursabot project under the dev/archery -directory. - -License: BSD 2-Clause - -Copyright 2019 RStudio, Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -This project include code from mingw-w64. - -* cpp/src/arrow/util/cpu-info.cc has a polyfill for mingw-w64 < 5 - -Copyright (c) 2009 - 2013 by the mingw-w64 project -Homepage: https://mingw-w64.org -License: Zope Public License (ZPL) Version 2.1. - ---------------------------------------------------------------------------------- - -This project include code from Google's Asylo project. - -* cpp/src/arrow/result.h is based on status_or.h - -Copyright (c) Copyright 2017 Asylo authors -Homepage: https://asylo.dev/ -License: Apache 2.0 - --------------------------------------------------------------------------------- - -This project includes code from Google's protobuf project - -* cpp/src/arrow/result.h ARROW_ASSIGN_OR_RAISE is based off ASSIGN_OR_RETURN -* cpp/src/arrow/util/bit_stream_utils.h contains code from wire_format_lite.h - -Copyright 2008 Google Inc. All rights reserved. -Homepage: https://developers.google.com/protocol-buffers/ -License: - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - --------------------------------------------------------------------------------- - -3rdparty dependency LLVM is statically linked in certain binary distributions. -Additionally some sections of source code have been derived from sources in LLVM -and have been clearly labeled as such. LLVM has the following license: - -============================================================================== -The LLVM Project is under the Apache License v2.0 with LLVM Exceptions: -============================================================================== - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ----- LLVM Exceptions to the Apache 2.0 License ---- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into an Object form of such source code, you -may redistribute such embedded portions in such Object form without complying -with the conditions of Sections 4(a), 4(b) and 4(d) of the License. - -In addition, if you combine or link compiled forms of this Software with -software that is licensed under the GPLv2 ("Combined Software") and if a -court of competent jurisdiction determines that the patent provision (Section -3), the indemnity provision (Section 9) or other Section of the License -conflicts with the conditions of the GPLv2, you may retroactively and -prospectively choose to deem waived or otherwise exclude such Section(s) of -the License, but only in their entirety and only with respect to the Combined -Software. - -============================================================================== -Software from third parties included in the LLVM Project: -============================================================================== -The LLVM Project contains third party software which is under different license -terms. All such code will be identified clearly using at least one of two -mechanisms: -1) It will be in a separate directory tree with its own `LICENSE.txt` or - `LICENSE` file at the top containing the specific license and restrictions - which apply to that software, or -2) It will contain specific license and restriction terms at the top of every - file. - --------------------------------------------------------------------------------- - -3rdparty dependency gRPC is statically linked in certain binary -distributions, like the python wheels. gRPC has the following license: - -Copyright 2014 gRPC authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache Thrift is statically linked in certain binary -distributions, like the python wheels. Apache Thrift has the following license: - -Apache Thrift -Copyright (C) 2006 - 2019, The Apache Software Foundation - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency Apache ORC is statically linked in certain binary -distributions, like the python wheels. Apache ORC has the following license: - -Apache ORC -Copyright 2013-2019 The Apache Software Foundation - -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). - -This product includes software developed by Hewlett-Packard: -(c) Copyright [2014-2015] Hewlett-Packard Development Company, L.P - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - --------------------------------------------------------------------------------- - -3rdparty dependency zstd is statically linked in certain binary -distributions, like the python wheels. ZSTD has the following license: - -BSD License - -For Zstandard software - -Copyright (c) 2016-present, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency lz4 is statically linked in certain binary -distributions, like the python wheels. lz4 has the following license: - -LZ4 Library -Copyright (c) 2011-2016, Yann Collet -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency Brotli is statically linked in certain binary -distributions, like the python wheels. Brotli has the following license: - -Copyright (c) 2009, 2010, 2013-2016 by the Brotli Authors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - --------------------------------------------------------------------------------- - -3rdparty dependency rapidjson is statically linked in certain binary -distributions, like the python wheels. rapidjson and its dependencies have the -following licenses: - -Tencent is pleased to support the open source community by making RapidJSON -available. - -Copyright (C) 2015 THL A29 Limited, a Tencent company, and Milo Yip. -All rights reserved. - -If you have downloaded a copy of the RapidJSON binary from Tencent, please note -that the RapidJSON binary is licensed under the MIT License. -If you have downloaded a copy of the RapidJSON source code from Tencent, please -note that RapidJSON source code is licensed under the MIT License, except for -the third-party components listed below which are subject to different license -terms. Your integration of RapidJSON into your own projects may require -compliance with the MIT License, as well as the other licenses applicable to -the third-party components included within RapidJSON. To avoid the problematic -JSON license in your own projects, it's sufficient to exclude the -bin/jsonchecker/ directory, as it's the only code under the JSON license. -A copy of the MIT License is included in this file. - -Other dependencies and licenses: - - Open Source Software Licensed Under the BSD License: - -------------------------------------------------------------------- - - The msinttypes r29 - Copyright (c) 2006-2013 Alexander Chemeris - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE REGENTS AND CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH - DAMAGE. - - Terms of the MIT License: - -------------------------------------------------------------------- - - Permission is hereby granted, free of charge, to any person obtaining a - copy of this software and associated documentation files (the "Software"), - to deal in the Software without restriction, including without limitation - the rights to use, copy, modify, merge, publish, distribute, sublicense, - and/or sell copies of the Software, and to permit persons to whom the - Software is furnished to do so, subject to the following conditions: - - The above copyright notice and this permission notice shall be included - in all copies or substantial portions of the Software. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -3rdparty dependency snappy is statically linked in certain binary -distributions, like the python wheels. snappy has the following license: - -Copyright 2011, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -Some of the benchmark data in testdata/ is licensed differently: - - - fireworks.jpeg is Copyright 2013 Steinar H. Gunderson, and - is licensed under the Creative Commons Attribution 3.0 license - (CC-BY-3.0). See https://creativecommons.org/licenses/by/3.0/ - for more information. - - - kppkn.gtb is taken from the Gaviota chess tablebase set, and - is licensed under the MIT License. See - https://sites.google.com/site/gaviotachessengine/Home/endgame-tablebases-1 - for more information. - - - paper-100k.pdf is an excerpt (bytes 92160 to 194560) from the paper - “Combinatorial Modeling of Chromatin Features Quantitatively Predicts DNA - Replication Timing in _Drosophila_” by Federico Comoglio and Renato Paro, - which is licensed under the CC-BY license. See - http://www.ploscompbiol.org/static/license for more ifnormation. - - - alice29.txt, asyoulik.txt, plrabn12.txt and lcet10.txt are from Project - Gutenberg. The first three have expired copyrights and are in the public - domain; the latter does not have expired copyright, but is still in the - public domain according to the license information - (http://www.gutenberg.org/ebooks/53). - --------------------------------------------------------------------------------- - -3rdparty dependency gflags is statically linked in certain binary -distributions, like the python wheels. gflags has the following license: - -Copyright (c) 2006, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency glog is statically linked in certain binary -distributions, like the python wheels. glog has the following license: - -Copyright (c) 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -A function gettimeofday in utilities.cc is based on - -http://www.google.com/codesearch/p?hl=en#dR3YEbitojA/COPYING&q=GetSystemTimeAsFileTime%20license:bsd - -The license of this code is: - -Copyright (c) 2003-2008, Jouni Malinen and contributors -All Rights Reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the name(s) of the above-listed copyright holder(s) nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency re2 is statically linked in certain binary -distributions, like the python wheels. re2 has the following license: - -Copyright (c) 2009 The RE2 Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - * Neither the name of Google Inc. nor the names of its contributors - may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -3rdparty dependency c-ares is statically linked in certain binary -distributions, like the python wheels. c-ares has the following license: - -# c-ares license - -Copyright (c) 2007 - 2018, Daniel Stenberg with many contributors, see AUTHORS -file. - -Copyright 1998 by the Massachusetts Institute of Technology. - -Permission to use, copy, modify, and distribute this software and its -documentation for any purpose and without fee is hereby granted, provided that -the above copyright notice appear in all copies and that both that copyright -notice and this permission notice appear in supporting documentation, and that -the name of M.I.T. not be used in advertising or publicity pertaining to -distribution of the software without specific, written prior permission. -M.I.T. makes no representations about the suitability of this software for any -purpose. It is provided "as is" without express or implied warranty. - --------------------------------------------------------------------------------- - -3rdparty dependency zlib is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. In the future -this will likely change to static linkage. zlib has the following license: - -zlib.h -- interface of the 'zlib' general purpose compression library - version 1.2.11, January 15th, 2017 - - Copyright (C) 1995-2017 Jean-loup Gailly and Mark Adler - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - Jean-loup Gailly Mark Adler - jloup@gzip.org madler@alumni.caltech.edu - --------------------------------------------------------------------------------- - -3rdparty dependency openssl is redistributed as a dynamically linked shared -library in certain binary distributions, like the python wheels. openssl -preceding version 3 has the following license: - - LICENSE ISSUES - ============== - - The OpenSSL toolkit stays under a double license, i.e. both the conditions of - the OpenSSL License and the original SSLeay license apply to the toolkit. - See below for the actual license texts. - - OpenSSL License - --------------- - -/* ==================================================================== - * Copyright (c) 1998-2019 The OpenSSL Project. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * 3. All advertising materials mentioning features or use of this - * software must display the following acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" - * - * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to - * endorse or promote products derived from this software without - * prior written permission. For written permission, please contact - * openssl-core@openssl.org. - * - * 5. Products derived from this software may not be called "OpenSSL" - * nor may "OpenSSL" appear in their names without prior written - * permission of the OpenSSL Project. - * - * 6. Redistributions of any form whatsoever must retain the following - * acknowledgment: - * "This product includes software developed by the OpenSSL Project - * for use in the OpenSSL Toolkit (http://www.openssl.org/)" - * - * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY - * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR - * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, - * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * ==================================================================== - * - * This product includes cryptographic software written by Eric Young - * (eay@cryptsoft.com). This product includes software written by Tim - * Hudson (tjh@cryptsoft.com). - * - */ - - Original SSLeay License - ----------------------- - -/* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com) - * All rights reserved. - * - * This package is an SSL implementation written - * by Eric Young (eay@cryptsoft.com). - * The implementation was written so as to conform with Netscapes SSL. - * - * This library is free for commercial and non-commercial use as long as - * the following conditions are aheared to. The following conditions - * apply to all code found in this distribution, be it the RC4, RSA, - * lhash, DES, etc., code; not just the SSL code. The SSL documentation - * included with this distribution is covered by the same copyright terms - * except that the holder is Tim Hudson (tjh@cryptsoft.com). - * - * Copyright remains Eric Young's, and as such any Copyright notices in - * the code are not to be removed. - * If this package is used in a product, Eric Young should be given attribution - * as the author of the parts of the library used. - * This can be in the form of a textual message at program startup or - * in documentation (online or textual) provided with the package. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * "This product includes cryptographic software written by - * Eric Young (eay@cryptsoft.com)" - * The word 'cryptographic' can be left out if the rouines from the library - * being used are not cryptographic related :-). - * 4. If you include any Windows specific code (or a derivative thereof) from - * the apps directory (application code) you must include an acknowledgement: - * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)" - * - * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * The licence and distribution terms for any publically available version or - * derivative of this code cannot be changed. i.e. this code cannot simply be - * copied and put under another distribution licence - * [including the GNU Public Licence.] - */ - --------------------------------------------------------------------------------- - -This project includes code from the rtools-backports project. - -* ci/scripts/PKGBUILD and ci/scripts/r_windows_build.sh are based on code - from the rtools-backports project. - -Copyright: Copyright (c) 2013 - 2019, Алексей and Jeroen Ooms. -All rights reserved. -Homepage: https://github.com/r-windows/rtools-backports -License: 3-clause BSD - --------------------------------------------------------------------------------- - -Some code from pandas has been adapted for the pyarrow codebase. pandas is -available under the 3-clause BSD license, which follows: - -pandas license -============== - -Copyright (c) 2011-2012, Lambda Foundry, Inc. and PyData Development Team -All rights reserved. - -Copyright (c) 2008-2011 AQR Capital Management, LLC -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the copyright holder nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -Some bits from DyND, in particular aspects of the build system, have been -adapted from libdynd and dynd-python under the terms of the BSD 2-clause -license - -The BSD 2-Clause License - - Copyright (C) 2011-12, Dynamic NDArray Developers - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Dynamic NDArray Developers list: - - * Mark Wiebe - * Continuum Analytics - --------------------------------------------------------------------------------- - -Some source code from Ibis (https://github.com/cloudera/ibis) has been adapted -for PyArrow. Ibis is released under the Apache License, Version 2.0. - --------------------------------------------------------------------------------- - -dev/tasks/homebrew-formulae/apache-arrow.rb has the following license: - -BSD 2-Clause License - -Copyright (c) 2009-present, Homebrew contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ----------------------------------------------------------------------- - -cpp/src/arrow/vendored/base64.cpp has the following license - -ZLIB License - -Copyright (C) 2004-2017 René Nyffenegger - -This source code is provided 'as-is', without any express or implied -warranty. In no event will the author be held liable for any damages arising -from the use of this software. - -Permission is granted to anyone to use this software for any purpose, including -commercial applications, and to alter it and redistribute it freely, subject to -the following restrictions: - -1. The origin of this source code must not be misrepresented; you must not - claim that you wrote the original source code. If you use this source code - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - -2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original source code. - -3. This notice may not be removed or altered from any source distribution. - -René Nyffenegger rene.nyffenegger@adp-gmbh.ch - --------------------------------------------------------------------------------- - -This project includes code from Folly. - - * cpp/src/arrow/vendored/ProducerConsumerQueue.h - -is based on Folly's - - * folly/Portability.h - * folly/lang/Align.h - * folly/ProducerConsumerQueue.h - -Copyright: Copyright (c) Facebook, Inc. and its affiliates. -Home page: https://github.com/facebook/folly -License: http://www.apache.org/licenses/LICENSE-2.0 - --------------------------------------------------------------------------------- - -The file cpp/src/arrow/vendored/musl/strptime.c has the following license - -Copyright © 2005-2020 Rich Felker, et al. - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- - -The file cpp/cmake_modules/BuildUtils.cmake contains code from - -https://gist.github.com/cristianadam/ef920342939a89fae3e8a85ca9459b49 - -which is made available under the MIT license - -Copyright (c) 2019 Cristian Adam - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/portable-snippets/ contain code from - -https://github.com/nemequ/portable-snippets - -and have the following copyright notice: - -Each source file contains a preamble explaining the license situation -for that file, which takes priority over this file. With the -exception of some code pulled in from other repositories (such as -µnit, an MIT-licensed project which is used for testing), the code is -public domain, released using the CC0 1.0 Universal dedication (*). - -(*) https://creativecommons.org/publicdomain/zero/1.0/legalcode - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/fast_float/ contain code from - -https://github.com/lemire/fast_float - -which is made available under the Apache License 2.0. - --------------------------------------------------------------------------------- - -The file python/pyarrow/vendored/docscrape.py contains code from - -https://github.com/numpy/numpydoc/ - -which is made available under the BSD 2-clause license. - --------------------------------------------------------------------------------- - -The file python/pyarrow/vendored/version.py contains code from - -https://github.com/pypa/packaging/ - -which is made available under both the Apache license v2.0 and the -BSD 2-clause license. - --------------------------------------------------------------------------------- - -The files in cpp/src/arrow/vendored/pcg contain code from - -https://github.com/imneme/pcg-cpp - -and have the following copyright notice: - -Copyright 2014-2019 Melissa O'Neill , - and the PCG Project contributors. - -SPDX-License-Identifier: (Apache-2.0 OR MIT) - -Licensed under the Apache License, Version 2.0 (provided in -LICENSE-APACHE.txt and at http://www.apache.org/licenses/LICENSE-2.0) -or under the MIT license (provided in LICENSE-MIT.txt and at -http://opensource.org/licenses/MIT), at your option. This file may not -be copied, modified, or distributed except according to those terms. - -Distributed on an "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, either -express or implied. See your chosen license for details. - --------------------------------------------------------------------------------- -r/R/dplyr-count-tally.R (some portions) - -Some portions of this file are derived from code from - -https://github.com/tidyverse/dplyr/ - -which is made available under the MIT license - -Copyright (c) 2013-2019 RStudio and others. - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the “Software”), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -The file src/arrow/util/io_util.cc contains code from the CPython project -which is made available under the Python Software Foundation License Version 2. - --------------------------------------------------------------------------------- - -3rdparty dependency opentelemetry-cpp is statically linked in certain binary -distributions. opentelemetry-cpp is made available under the Apache License 2.0. - -Copyright The OpenTelemetry Authors -SPDX-License-Identifier: Apache-2.0 - --------------------------------------------------------------------------------- - -ci/conan/ is based on code from Conan Package and Dependency Manager. - -Copyright (c) 2019 Conan.io - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- - -3rdparty dependency UCX is redistributed as a dynamically linked shared -library in certain binary distributions. UCX has the following license: - -Copyright (c) 2014-2015 UT-Battelle, LLC. All rights reserved. -Copyright (C) 2014-2020 Mellanox Technologies Ltd. All rights reserved. -Copyright (C) 2014-2015 The University of Houston System. All rights reserved. -Copyright (C) 2015 The University of Tennessee and The University - of Tennessee Research Foundation. All rights reserved. -Copyright (C) 2016-2020 ARM Ltd. All rights reserved. -Copyright (c) 2016 Los Alamos National Security, LLC. All rights reserved. -Copyright (C) 2016-2020 Advanced Micro Devices, Inc. All rights reserved. -Copyright (C) 2019 UChicago Argonne, LLC. All rights reserved. -Copyright (c) 2018-2020 NVIDIA CORPORATION. All rights reserved. -Copyright (C) 2020 Huawei Technologies Co., Ltd. All rights reserved. -Copyright (C) 2016-2020 Stony Brook University. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. -3. Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------------------------------------------------------------------------- - -The file dev/tasks/r/github.packages.yml contains code from - -https://github.com/ursa-labs/arrow-r-nightly - -which is made available under the Apache License 2.0. - --------------------------------------------------------------------------------- -.github/actions/sync-nightlies/action.yml (some portions) - -Some portions of this file are derived from code from - -https://github.com/JoshPiper/rsync-docker - -which is made available under the MIT license - -Copyright (c) 2020 Joshua Piper - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -.github/actions/sync-nightlies/action.yml (some portions) - -Some portions of this file are derived from code from - -https://github.com/burnett01/rsync-deployments - -which is made available under the MIT license - -Copyright (c) 2019-2022 Contention -Copyright (c) 2019-2022 Burnett01 - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - --------------------------------------------------------------------------------- -java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectHashMap.java -java/vector/src/main/java/org/apache/arrow/vector/util/IntObjectMap.java - -These file are derived from code from Netty, which is made available under the -Apache License 2.0. - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- - -Polars - -Copyright (c) 2020 Ritchie Vink -Some portions Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- - -NumPy - -Copyright: NumPy Developers -License: BSD 3-Clause - - ./.spin/LICENSE - -BSD 3-Clause License - -Copyright (c) 2021--2022, Scientific Python project -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------- Separator -------------- - - ./LICENSE.txt - -Copyright (c) 2005-2024, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - * Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------- Separator -------------- - - ./LICENSES_bundled.txt - -The NumPy repository and source distributions bundle several libraries that are -compatibly licensed. We list these here. - -Name: lapack-lite -Files: numpy/linalg/lapack_lite/* -License: BSD-3-Clause - For details, see numpy/linalg/lapack_lite/LICENSE.txt - -Name: dragon4 -Files: numpy/_core/src/multiarray/dragon4.c -License: MIT - For license text, see numpy/_core/src/multiarray/dragon4.c - -Name: libdivide -Files: numpy/_core/include/numpy/libdivide/* -License: Zlib - For license text, see numpy/_core/include/numpy/libdivide/LICENSE.txt - - -Note that the following files are vendored in the repository and sdist but not -installed in built numpy packages: - -Name: Meson -Files: vendored-meson/meson/* -License: Apache 2.0 - For license text, see vendored-meson/meson/COPYING - -Name: spin -Files: .spin/cmds.py -License: BSD-3 - For license text, see .spin/LICENSE - -Name: tempita -Files: numpy/_build_utils/tempita/* -License: MIT - For details, see numpy/_build_utils/tempita/LICENCE.txt - --------------- Separator -------------- - - ./numpy/_build_utils/tempita/LICENSE.txt - -Copyright (c) 2008 Ian Bicking and Contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------- Separator -------------- - - ./numpy/_core/include/numpy/libdivide/LICENSE.txt - - zlib License - ------------ - - Copyright (C) 2010 - 2019 ridiculous_fish, - Copyright (C) 2016 - 2019 Kim Walisch, - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - --------------- Separator -------------- - - ./numpy/core/src/multiarray/dragon4.c - -/* - * Copyright (c) 2014 Ryan Juckett - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to - * deal in the Software without restriction, including without limitation the - * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or - * sell copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ --------------- Separator -------------- - - ./numpy/core/src/multiarray/dragon4.c - -MIT ( same as top-level ) --------------- Separator -------------- - - ./numpy/linalg/lapack_lite/LICENSE.txt - -Copyright (c) 1992-2013 The University of Tennessee and The University - of Tennessee Research Foundation. All rights - reserved. -Copyright (c) 2000-2013 The University of California Berkeley. All - rights reserved. -Copyright (c) 2006-2013 The University of Colorado Denver. All rights - reserved. - -$COPYRIGHT$ - -Additional copyrights may follow - -$HEADER$ - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -- Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -- Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer listed - in this license in the documentation and/or other materials - provided with the distribution. - -- Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -The copyright holders provide no reassurances that the source code -provided does not infringe any patent, copyright, or any other -intellectual property rights of third parties. The copyright holders -disclaim any liability to any recipient for claims brought against -recipient by any third party for infringement of that parties -intellectual property rights. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------- Separator -------------- - - ./numpy/ma/LICENSE - -* Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant -* All rights reserved. -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* * Neither the name of the University of Georgia nor the -* names of its contributors may be used to endorse or promote products -* derived from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY -* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -* DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY -* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------- Separator -------------- - - ./numpy/random/LICENSE.md - -**This software is dual-licensed under the The University of Illinois/NCSA -Open Source License (NCSA) and The 3-Clause BSD License** - -# NCSA Open Source License -**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** - -Developed by: Kevin Sheppard (, -) -[http://www.kevinsheppard.com](http://www.kevinsheppard.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal with -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimers. - -Redistributions in binary form must reproduce the above copyright notice, this -list of conditions and the following disclaimers in the documentation and/or -other materials provided with the distribution. - -Neither the names of Kevin Sheppard, nor the names of any contributors may be -used to endorse or promote products derived from this Software without specific -prior written permission. - -**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH -THE SOFTWARE.** - - -# 3-Clause BSD License -**Copyright (c) 2019 Kevin Sheppard. All rights reserved.** - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE.** - -# Components - -Many parts of this module have been derived from original sources, -often the algorithm's designer. Component licenses are located with -the component code. - --------------- Separator -------------- - - ./numpy/random/src/distributions/LICENSE.md - -## NumPy - -Copyright (c) 2005-2017, NumPy Developers. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - -* Neither the name of the NumPy Developers nor the names of any - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -## Julia - -The ziggurat methods were derived from Julia. - -Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah, -and other contributors: - -https://github.com/JuliaLang/julia/contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------- Separator -------------- - - ./numpy/random/src/mt19937/LICENSE.md - -# MT19937 - -Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org) - -The rk_random and rk_seed functions algorithms and the original design of -the Mersenne Twister RNG: - - Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura, - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - 3. The names of its contributors may not be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Original algorithm for the implementation of rk_interval function from -Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by -Magnus Jonsson. - -Constants used in the rk_double implementation by Isaku Wada. - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------- Separator -------------- - - ./numpy/random/src/pcg64/LICENSE.md - -# PCG64 - -## The MIT License - -PCG Random Number Generation for C. - -Copyright 2014 Melissa O'Neill - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------- Separator -------------- - - ./numpy/random/src/philox/LICENSE.md - -# PHILOX - -Copyright 2010-2012, D. E. Shaw Research. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright - notice, this list of conditions, and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright - notice, this list of conditions, and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -* Neither the name of D. E. Shaw Research nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - --------------- Separator -------------- - - ./numpy/random/src/sfc64/LICENSE.md - -# SFC64 - -## The MIT License - -Adapted from a C++ implementation of Chris Doty-Humphrey's SFC PRNG. - -https://gist.github.com/imneme/f1f7821f07cf76504a97f6537c818083 - -Copyright (c) 2018 Melissa E. O'Neill - -Permission is hereby granted, free of charge, to any person obtaining a -copy of this software and associated documentation files (the "Software"), -to deal in the Software without restriction, including without limitation -the rights to use, copy, modify, merge, publish, distribute, sublicense, -and/or sell copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - --------------- Separator -------------- - - ./numpy/random/src/splitmix64/LICENSE.md - -# SPLITMIX64 - -Written in 2015 by Sebastiano Vigna (vigna@acm.org) - -To the extent possible under law, the author has dedicated all copyright -and related and neighboring rights to this software to the public domain -worldwide. This software is distributed without any warranty. - -See . - --------------- Separator -------------- - - ./vendored-meson/meson/COPYING - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------- Separator -------------- - - -This binary distribution of NumPy also bundles the following software: - - -Name: OpenBLAS -Files: .libs/libopenb*.so -Description: bundled as a dynamically linked library -Availability: https://github.com/xianyi/OpenBLAS/ -License: 3-clause BSD - Copyright (c) 2011-2014, The OpenBLAS Project - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are - met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" - AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE - LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, - OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE - USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------- Separator -------------- - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- - -PyTorch - -Third-party public licences / copyright notices -################################################ - -From PyTorch: - -Copyright (c) 2016- Facebook, Inc (Adam Paszke) -Copyright (c) 2014- Facebook, Inc (Soumith Chintala) -Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert) -Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu) -Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu) -Copyright (c) 2011-2013 NYU (Clement Farabet) -Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston) -Copyright (c) 2006 Idiap Research Institute (Samy Bengio) -Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz) - -From Caffe2: - -Copyright (c) 2016-present, Facebook Inc. All rights reserved. - -All contributions by Facebook: -Copyright (c) 2016 Facebook Inc. - -All contributions by Google: -Copyright (c) 2015 Google Inc. -All rights reserved. - -All contributions by Yangqing Jia: -Copyright (c) 2015 Yangqing Jia -All rights reserved. - -All contributions by Kakao Brain: -Copyright 2019-2020 Kakao Brain - -All contributions by Cruise LLC: -Copyright (c) 2022 Cruise LLC. -All rights reserved. - -All contributions from Caffe: -Copyright(c) 2013, 2014, 2015, the respective contributors -All rights reserved. - -All other contributions: -Copyright(c) 2015, 2016 the respective contributors -All rights reserved. - -Caffe2 uses a copyright model similar to Caffe: each contributor holds -copyright over their contributions to Caffe2. The project versioning records -all such contribution and copyright details. If a contributor wants to further -mark their specific copyright on a particular contribution, they should -indicate their copyright solely in the commit message of the change when it is -committed. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -3. Neither the names of Facebook, Deepmind Technologies, NYU, NEC Laboratories America - and IDIAP Research Institute nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. - -======================================================================= -Software under third_party -======================================================================= -Software libraries under third_party are provided as github submodule -links, and their content is not part of the Caffe2 codebase. Their -licences can be found under the respective software repositories. - -======================================================================= -Earlier BSD License -======================================================================= -Early development of Caffe2 in 2015 and early 2016 is licensed under the -BSD license. The license is attached below: - -All contributions by Facebook: -Copyright (c) 2016 Facebook Inc. - -All contributions by Google: -Copyright (c) 2015 Google Inc. -All rights reserved. - -All contributions by Yangqing Jia: -Copyright (c) 2015 Yangqing Jia -All rights reserved. - -All contributions by Kakao Brain: -Copyright 2019-2020 Kakao Brain - -All other contributions: -Copyright(c) 2015, 2016 the respective contributors -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -======================================================================= -Caffe's BSD License -======================================================================= -Some parts of the caffe2 code is derived from the original Caffe code, which is -created by Yangqing Jia and is now a BSD-licensed open-source project. The Caffe -license is as follows: - -COPYRIGHT - -All contributions by the University of California: -Copyright (c) 2014, The Regents of the University of California (Regents) -All rights reserved. - -All other contributions: -Copyright (c) 2014, the respective contributors -All rights reserved. - -Caffe uses a shared copyright model: each contributor holds copyright over -their contributions to Caffe. The project versioning records all such -contribution and copyright details. If a contributor wants to further mark -their specific copyright on a particular contribution, they should indicate -their copyright solely in the commit message of the change when it is -committed. - -LICENSE - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -CONTRIBUTION AGREEMENT - -By contributing to the BVLC/caffe repository through pull-request, comment, -or otherwise, the contributor releases their content to the -license and copyright terms herein. - -======================================================================= -Caffe2's Apache License -======================================================================= - -This repo contains Caffe2 code, which was previously licensed under -Apache License Version 2.0: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - -======================================================================= -Cephes's 3-Clause BSD License -======================================================================= - -Code derived from implementations in the Cephes Math Library should mention -its derivation and reference the following license: - - 3-Clause BSD License for the Cephes Math Library - Copyright (c) 2018, Steven Moshier - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of the nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL Steven Moshier BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -======================================================================= -SciPy's 3-Clause BSD License -======================================================================= - -Code derived from implementations in SciPy should mention its derivation -and reference the following license: - - Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers. - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above - copyright notice, this list of conditions and the following - disclaimer in the documentation and/or other materials provided - with the distribution. - - 3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -======================================================================= -Boost's 1.0 Software License -======================================================================= - -Code derived from implementations in Boost 1.0 should mention its -derivation and reference the following license: - - Boost Software License - Version 1.0 - August 17th, 2003 - - Permission is hereby granted, free of charge, to any person or organization - obtaining a copy of the software and accompanying documentation covered by - this license (the "Software") to use, reproduce, display, distribute, - execute, and transmit the Software, and to prepare derivative works of the - Software, and to permit third-parties to whom the Software is furnished to - do so, all subject to the following: - - The copyright notices in the Software and this entire statement, including - the above license grant, this restriction and the following disclaimer, - must be included in all copies of the Software, in whole or in part, and - all derivative works of the Software, unless such copies or derivative - works are solely in the form of machine-executable object code generated by - a source language processor. - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT - SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE - FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, - ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - DEALINGS IN THE SOFTWARE. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - -======================================================================= -PILLOW-SIMD Software License -======================================================================= - -Code derived from implementations in PILLOW-SIMD should mention its derivation -and reference the following license: - - The Python Imaging Library (PIL) is - - Copyright © 1997-2011 by Secret Labs AB - Copyright © 1995-2011 by Fredrik Lundh - - Pillow is the friendly PIL fork. It is - - Copyright © 2010-2022 by Alex Clark and contributors - - Like PIL, Pillow is licensed under the open source HPND License: - - By obtaining, using, and/or copying this software and/or its associated - documentation, you agree that you have read, understood, and will comply - with the following terms and conditions: - - Permission to use, copy, modify, and distribute this software and its - associated documentation for any purpose and without fee is hereby granted, - provided that the above copyright notice appears in all copies, and that - both that copyright notice and this permission notice appear in supporting - documentation, and that the name of Secret Labs AB or the author not be - used in advertising or publicity pertaining to distribution of the software - without specific, written prior permission. - - SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS - SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. - IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, - INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR - PERFORMANCE OF THIS SOFTWARE. - - - - -Fourth-party public licences / copyright notices -################################################ - - -Name: DCGM -License: Apache-2.0 -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/DCGM - -Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ---------------------------------------------------------- - -Name: FP16 -License: MIT -Files: third_party/FP16 - -The MIT License (MIT) - -Copyright (c) 2017 Facebook Inc. -Copyright (c) 2017 Georgia Institute of Technology -Copyright 2019 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: FXdiv -License: MIT -Files: third_party/FXdiv - -The MIT License (MIT) - -Copyright (c) 2017 Facebook Inc. -Copyright (c) 2016-2017 Marat Dukhan - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: NNPACK -License: BSD-2-Clause -Files: third_party/NNPACK - -Copyright (c) 2017 Facebook Inc. -Copyright (c) 2015-2017, Georgia Institute of Technology -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: VulkanMemoryAllocator -License: MIT -Files: third_party/VulkanMemoryAllocator - -Copyright (c) 2017-2022 Advanced Micro Devices, Inc. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: XNNPACK -License: BSD-3-Clause -Files: third_party/XNNPACK - -BSD License - -For XNNPACK software - -Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. -Copyright 2019 Google LLC - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: benchmark -License: Apache-2.0 -Files: third_party/benchmark, - third_party/onnx/third_party/benchmark, - third_party/onnx-tensorrt/third_party/onnx/third_party/benchmark, - third_party/protobuf/third_party/benchmark, - third_party/opentelemetry-cpp/third_party/benchmark - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: boost-vcpkg-helpers -License: MIT -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/boost-vcpkg-helpers - -Copyright (c) Microsoft Corporation - -All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: cJSON -License: MIT -Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/examples/rest/cJSON - -Copyright (c) 2009-2017 Dave Gamble and cJSON contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: catch2 -License: BSL-1.0 -Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/catch2 - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: clog -License: BSD-2-Clause -Files: third_party/cpuinfo/deps/clog - -Copyright (C) 2018 Marat Dukhan -Copyright (c) 2017-2018 Facebook Inc. -Copyright (c) 2017 Georgia Institute of Technology - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: colorama -License: BSD-3-Clause -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/DCGM/testing/python3/libs_3rdparty/colorama - -Copyright (c) 2010 Jonathan Hartley - -Released under the New BSD license (reproduced below), or alternatively you may -use this software under any OSI approved open source license such as those at -http://opensource.org/licenses/alphabetical - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name(s) of the copyright holders, nor those of its contributors - may be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: cpp-httplib -License: MIT -Files: third_party/cpp-httplib - -The MIT License (MIT) - -Copyright (c) 2017 yhirose - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: cpplint -License: BSD-3-Clause -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/json/third_party/cpplint, - third_party/nlohmann/tools/cpplint - -cpplint.py and its corresponding unit tests are Copyright (C) 2009 Google Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: cpr -License: MIT -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/cpr - -This license applies to everything except the contents of the "test" -directory and its subdirectories. - -MIT License - -Copyright (c) 2017-2021 Huu Nguyen -Copyright (c) 2022 libcpr and many other contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: cpuinfo -License: BSD-2-Clause -Files: third_party/cpuinfo, - third_party/fbgemm/third_party/cpuinfo - -Copyright (c) 2019 Google LLC -Copyright (c) 2017-2018 Facebook Inc. -Copyright (C) 2012-2017 Georgia Institute of Technology -Copyright (C) 2010-2012 Marat Dukhan - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: cudnn_frontend -License: MIT -Files: third_party/cudnn_frontend - -/* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER - * DEALINGS IN THE SOFTWARE. - */ - ---------------------------------------------------------- - -Name: cutlass -License: BSD-3-Clause -Files: third_party/cutlass, - third_party/fbgemm/third_party/cutlass - -Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -SPDX-License-Identifier: BSD-3-Clause - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: dart -License: Apache-2.0 -Files: third_party/flatbuffers/dart - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2014 Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: duktape-1.5.2 -License: MIT -Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.5.2 - -=============== -Duktape license -=============== - -(http://opensource.org/licenses/MIT) - -Copyright (c) 2013-2016 by Duktape authors (see AUTHORS.rst) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: duktape-1.8.0 -License: MIT -Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/civetweb/src/third_party/duktape-1.8.0 - -=============== -Duktape license -=============== - -(http://opensource.org/licenses/MIT) - -Copyright (c) 2013-2017 by Duktape authors (see AUTHORS.rst) - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: dynolog -License: MIT -Files: third_party/kineto/libkineto/third_party/dynolog - -MIT License - -Copyright (c) Facebook, Inc. and its affiliates. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: eigen -License: BSD-3-Clause -Files: third_party/eigen - -/* - Copyright (c) 2011, Intel Corporation. All rights reserved. - - Redistribution and use in source and binary forms, with or without modification, - are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - * Neither the name of Intel Corporation nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON - ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -*/ - ---------------------------------------------------------- - -Name: etw -License: MIT -Files: third_party/opentelemetry-cpp/exporters/etw/include/opentelemetry/exporters/etw - -TraceLogging Dynamic for Windows - -Copyright (c) Microsoft Corporation. All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: expected -License: MIT -Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/expected - -The MIT License (MIT) - -Copyright (c) 2015 Martin Moene -Copyright (c) 2015 Microsoft Corporation. All rights reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: fbgemm -License: BSD-3-Clause -Files: third_party/fbgemm - -BSD License - -For FBGEMM software - -Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: ffnvcodec -License: MIT with exception -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/ffnvcodec - -GNU LESSER GENERAL PUBLIC LICENSE -Version 2.1, February 1999 - -Copyright (C) 1991, 1999 Free Software Foundation, Inc. -51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -Everyone is permitted to copy and distribute verbatim copies -of this license document, but changing it is not allowed. - -[This is the first released version of the Lesser GPL. It also counts - as the successor of the GNU Library Public License, version 2, hence - the version number 2.1.] -Preamble -The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public Licenses are intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. - -This license, the Lesser General Public License, applies to some specially designated software packages--typically libraries--of the Free Software Foundation and other authors who decide to use it. You can use it too, but we suggest you first think carefully about whether this license or the ordinary General Public License is the better strategy to use in any particular case, based on the explanations below. - -When we speak of free software, we are referring to freedom of use, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish); that you receive source code or can get it if you want it; that you can change the software and use pieces of it in new free programs; and that you are informed that you can do these things. - -To protect your rights, we need to make restrictions that forbid distributors to deny you these rights or to ask you to surrender these rights. These restrictions translate to certain responsibilities for you if you distribute copies of the library or if you modify it. - -For example, if you distribute copies of the library, whether gratis or for a fee, you must give the recipients all the rights that we gave you. You must make sure that they, too, receive or can get the source code. If you link other code with the library, you must provide complete object files to the recipients, so that they can relink them with the library after making changes to the library and recompiling it. And you must show them these terms so they know their rights. - -We protect your rights with a two-step method: (1) we copyright the library, and (2) we offer you this license, which gives you legal permission to copy, distribute and/or modify the library. - -To protect each distributor, we want to make it very clear that there is no warranty for the free library. Also, if the library is modified by someone else and passed on, the recipients should know that what they have is not the original version, so that the original author's reputation will not be affected by problems that might be introduced by others. - -Finally, software patents pose a constant threat to the existence of any free program. We wish to make sure that a company cannot effectively restrict the users of a free program by obtaining a restrictive license from a patent holder. Therefore, we insist that any patent license obtained for a version of the library must be consistent with the full freedom of use specified in this license. - -Most GNU software, including some libraries, is covered by the ordinary GNU General Public License. This license, the GNU Lesser General Public License, applies to certain designated libraries, and is quite different from the ordinary General Public License. We use this license for certain libraries in order to permit linking those libraries into non-free programs. - -When a program is linked with a library, whether statically or using a shared library, the combination of the two is legally speaking a combined work, a derivative of the original library. The ordinary General Public License therefore permits such linking only if the entire combination fits its criteria of freedom. The Lesser General Public License permits more lax criteria for linking other code with the library. - -We call this license the "Lesser" General Public License because it does Less to protect the user's freedom than the ordinary General Public License. It also provides other free software developers Less of an advantage over competing non-free programs. These disadvantages are the reason we use the ordinary General Public License for many libraries. However, the Lesser license provides advantages in certain special circumstances. - -For example, on rare occasions, there may be a special need to encourage the widest possible use of a certain library, so that it becomes a de-facto standard. To achieve this, non-free programs must be allowed to use the library. A more frequent case is that a free library does the same job as widely used non-free libraries. In this case, there is little to gain by limiting the free library to free software only, so we use the Lesser General Public License. - -In other cases, permission to use a particular library in non-free programs enables a greater number of people to use a large body of free software. For example, permission to use the GNU C Library in non-free programs enables many more people to use the whole GNU operating system, as well as its variant, the GNU/Linux operating system. - -Although the Lesser General Public License is Less protective of the users' freedom, it does ensure that the user of a program that is linked with the Library has the freedom and the wherewithal to run that program using a modified version of the Library. - -The precise terms and conditions for copying, distribution and modification follow. Pay close attention to the difference between a "work based on the library" and a "work that uses the library". The former contains code derived from the library, whereas the latter must be combined with the library in order to run. - -TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION -0. This License Agreement applies to any software library or other program which contains a notice placed by the copyright holder or other authorized party saying it may be distributed under the terms of this Lesser General Public License (also called "this License"). Each licensee is addressed as "you". - -A "library" means a collection of software functions and/or data prepared so as to be conveniently linked with application programs (which use some of those functions and data) to form executables. - -The "Library", below, refers to any such software library or work which has been distributed under these terms. A "work based on the Library" means either the Library or any derivative work under copyright law: that is to say, a work containing the Library or a portion of it, either verbatim or with modifications and/or translated straightforwardly into another language. (Hereinafter, translation is included without limitation in the term "modification".) - -"Source code" for a work means the preferred form of the work for making modifications to it. For a library, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the library. - -Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running a program using the Library is not restricted, and output from such a program is covered only if its contents constitute a work based on the Library (independent of the use of the Library in a tool for writing it). Whether that is true depends on what the Library does and what the program that uses the Library does. - -1. You may copy and distribute verbatim copies of the Library's complete source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and distribute a copy of this License along with the Library. - -You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. - -2. You may modify your copy or copies of the Library or any portion of it, thus forming a work based on the Library, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: - -a) The modified work must itself be a software library. -b) You must cause the files modified to carry prominent notices stating that you changed the files and the date of any change. -c) You must cause the whole of the work to be licensed at no charge to all third parties under the terms of this License. -d) If a facility in the modified Library refers to a function or a table of data to be supplied by an application program that uses the facility, other than as an argument passed when the facility is invoked, then you must make a good faith effort to ensure that, in the event an application does not supply such function or table, the facility still operates, and performs whatever part of its purpose remains meaningful. -(For example, a function in a library to compute square roots has a purpose that is entirely well-defined independent of the application. Therefore, Subsection 2d requires that any application-supplied function or table used by this function must be optional: if the application does not supply it, the square root function must still compute square roots.) - -These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Library, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Library, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Library. - -In addition, mere aggregation of another work not based on the Library with the Library (or with a work based on the Library) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. - -3. You may opt to apply the terms of the ordinary GNU General Public License instead of this License to a given copy of the Library. To do this, you must alter all the notices that refer to this License, so that they refer to the ordinary GNU General Public License, version 2, instead of to this License. (If a newer version than version 2 of the ordinary GNU General Public License has appeared, then you can specify that version instead if you wish.) Do not make any other change in these notices. - -Once this change is made in a given copy, it is irreversible for that copy, so the ordinary GNU General Public License applies to all subsequent copies and derivative works made from that copy. - -This option is useful when you wish to copy part of the code of the Library into a program that is not a library. - -4. You may copy and distribute the Library (or a portion or derivative of it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange. - -If distribution of object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place satisfies the requirement to distribute the source code, even though third parties are not compelled to copy the source along with the object code. - -5. A program that contains no derivative of any portion of the Library, but is designed to work with the Library by being compiled or linked with it, is called a "work that uses the Library". Such a work, in isolation, is not a derivative work of the Library, and therefore falls outside the scope of this License. - -However, linking a "work that uses the Library" with the Library creates an executable that is a derivative of the Library (because it contains portions of the Library), rather than a "work that uses the library". The executable is therefore covered by this License. Section 6 states terms for distribution of such executables. - -When a "work that uses the Library" uses material from a header file that is part of the Library, the object code for the work may be a derivative work of the Library even though the source code is not. Whether this is true is especially significant if the work can be linked without the Library, or if the work is itself a library. The threshold for this to be true is not precisely defined by law. - -If such an object file uses only numerical parameters, data structure layouts and accessors, and small macros and small inline functions (ten lines or less in length), then the use of the object file is unrestricted, regardless of whether it is legally a derivative work. (Executables containing this object code plus portions of the Library will still fall under Section 6.) - -Otherwise, if the work is a derivative of the Library, you may distribute the object code for the work under the terms of Section 6. Any executables containing that work also fall under Section 6, whether or not they are linked directly with the Library itself. - -6. As an exception to the Sections above, you may also combine or link a "work that uses the Library" with the Library to produce a work containing portions of the Library, and distribute that work under terms of your choice, provided that the terms permit modification of the work for the customer's own use and reverse engineering for debugging such modifications. - -You must give prominent notice with each copy of the work that the Library is used in it and that the Library and its use are covered by this License. You must supply a copy of this License. If the work during execution displays copyright notices, you must include the copyright notice for the Library among them, as well as a reference directing the user to the copy of this License. Also, you must do one of these things: - -a) Accompany the work with the complete corresponding machine-readable source code for the Library including whatever changes were used in the work (which must be distributed under Sections 1 and 2 above); and, if the work is an executable linked with the Library, with the complete machine-readable "work that uses the Library", as object code and/or source code, so that the user can modify the Library and then relink to produce a modified executable containing the modified Library. (It is understood that the user who changes the contents of definitions files in the Library will not necessarily be able to recompile the application to use the modified definitions.) -b) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (1) uses at run time a copy of the library already present on the user's computer system, rather than copying library functions into the executable, and (2) will operate properly with a modified version of the library, if the user installs one, as long as the modified version is interface-compatible with the version that the work was made with. -c) Accompany the work with a written offer, valid for at least three years, to give the same user the materials specified in Subsection 6a, above, for a charge no more than the cost of performing this distribution. -d) If distribution of the work is made by offering access to copy from a designated place, offer equivalent access to copy the above specified materials from the same place. -e) Verify that the user has already received a copy of these materials or that you have already sent this user a copy. -For an executable, the required form of the "work that uses the Library" must include any data and utility programs needed for reproducing the executable from it. However, as a special exception, the materials to be distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. - -It may happen that this requirement contradicts the license restrictions of other proprietary libraries that do not normally accompany the operating system. Such a contradiction means you cannot use both them and the Library together in an executable that you distribute. - -7. You may place library facilities that are a work based on the Library side-by-side in a single library together with other library facilities not covered by this License, and distribute such a combined library, provided that the separate distribution of the work based on the Library and of the other library facilities is otherwise permitted, and provided that you do these two things: - -a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities. This must be distributed under the terms of the Sections above. -b) Give prominent notice with the combined library of the fact that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. -8. You may not copy, modify, sublicense, link with, or distribute the Library except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense, link with, or distribute the Library is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. - -9. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Library or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Library (or any work based on the Library), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Library or works based on it. - -10. Each time you redistribute the Library (or any work based on the Library), the recipient automatically receives a license from the original licensor to copy, distribute, link with or modify the Library subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties with this License. - -11. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Library at all. For example, if a patent license would not permit royalty-free redistribution of the Library by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Library. - -If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply, and the section as a whole is intended to apply in other circumstances. - -It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. - -This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. - -12. If the distribution and/or use of the Library is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Library under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. - -13. The Free Software Foundation may publish revised and/or new versions of the Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Library does not specify a license version number, you may choose any version ever published by the Free Software Foundation. - -14. If you wish to incorporate parts of the Library into other free programs whose distribution conditions are incompatible with these, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. - -NO WARRANTY - -15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - -16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -END OF TERMS AND CONDITIONS -How to Apply These Terms to Your New Libraries -If you develop a new library, and you want it to be of the greatest possible use to the public, we recommend making it free software that everyone can redistribute and change. You can do so by permitting redistribution under these terms (or, alternatively, under the terms of the ordinary General Public License). - -To apply these terms, attach the following notices to the library. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. - -one line to give the library's name and an idea of what it does. -Copyright (C) year name of author - -This library is free software; you can redistribute it and/or -modify it under the terms of the GNU Lesser General Public -License as published by the Free Software Foundation; either -version 2.1 of the License, or (at your option) any later version. - -This library is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -Lesser General Public License for more details. - -You should have received a copy of the GNU Lesser General Public -License along with this library; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -Also add information on how to contact you by electronic and paper mail. - -You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the library, if necessary. Here is a sample; alter the names: - -Yoyodyne, Inc., hereby disclaims all copyright interest in -the library `Frob' (a library for tweaking knobs) written -by James Random Hacker. - -signature of Ty Coon, 1 April 1990 -Ty Coon, President of Vice -That's all there is to it! - ---------------------------------------------------------- - -Name: flatbuffers -License: Apache-2.0 -Files: third_party/flatbuffers - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: fmt -License: MIT with exception -Files: third_party/fmt - -Copyright (c) 2012 - present, Victor Zverovich and {fmt} contributors - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---- Optional exception to the license --- - -As an exception, if, as a result of your compiling your source code, portions -of this Software are embedded into a machine-executable object form of such -source code, you may redistribute such embedded portions in such object form -without including the above copyright and permission notices. - ---------------------------------------------------------- - -Name: foxi -License: MIT -Files: third_party/foxi - -MIT License - -Copyright (c) 2019 Lu Fang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: gemmlowp -License: Apache-2.0 -Files: third_party/gemmlowp/gemmlowp - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: generator -License: Apache-2.0 -Files: third_party/fbgemm/third_party/googletest/googlemock/scripts/generator, - third_party/googletest/googlemock/scripts/generator, - third_party/kineto/libkineto/third_party/googletest/googlemock/scripts/generator, - third_party/protobuf/third_party/googletest/googlemock/scripts/generator, - third_party/tensorpipe/third_party/googletest/googlemock/scripts/generator, - third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest/googlemock/scripts/generator - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [2007] Neal Norwitz - Portions Copyright [2007] Google Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: gettimeofday -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/gettimeofday - -/* - * Copied from PostgreSQL source: - * http://doxygen.postgresql.org/gettimeofday_8c_source.html - * - */ - -/* - * gettimeofday.c - * Win32 gettimeofday() replacement - * - * src/port/gettimeofday.c - * - * Copyright (c) 2003 SRA, Inc. - * Copyright (c) 2003 SKC, Inc. - * - * Permission to use, copy, modify, and distribute this software and - * its documentation for any purpose, without fee, and without a - * written agreement is hereby granted, provided that the above - * copyright notice and this paragraph and the following two - * paragraphs appear in all copies. - * - * IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, - * INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING - * LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS - * DOCUMENTATION, EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED - * OF THE POSSIBILITY OF SUCH DAMAGE. - * - * THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS - * IS" BASIS, AND THE AUTHOR HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, - * SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. - */ - ---------------------------------------------------------- - -Name: gloo -License: BSD-3-Clause -Files: third_party/gloo - -BSD License - -For Gloo software - -Copyright (c) 2017-present, Facebook, Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: googlemock -License: BSD-3-Clause -Files: third_party/fbgemm/third_party/googletest/googlemock, - third_party/kineto/libkineto/third_party/googletest/googlemock, - third_party/protobuf/third_party/googletest/googlemock, - third_party/tensorpipe/third_party/googletest/googlemock - -Copyright 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: googletest -License: BSD-3-Clause -Files: third_party/fbgemm/third_party/googletest, - third_party/fbgemm/third_party/googletest/googletest, - third_party/googletest, - third_party/kineto/libkineto/third_party/dynolog/third_party/googletest, - third_party/kineto/libkineto/third_party/googletest, - third_party/kineto/libkineto/third_party/googletest/googletest, - third_party/protobuf/third_party/googletest, - third_party/protobuf/third_party/googletest/googletest, - third_party/tensorpipe/third_party/googletest, - third_party/tensorpipe/third_party/googletest/googletest, - third_party/opentelemetry-cpp/third_party/googletest, - third_party/opentelemetry-cpp/third_party/prometheus-cpp/3rdparty/googletest - -Copyright 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: gtest -License: BSD-3-Clause -Files: third_party/ideep/mkl-dnn/tests/gtests/gtest - -Copyright 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: hipify_torch -License: MIT -Files: third_party/fbgemm/third_party/hipify_torch - -MIT License - -Copyright (c) 2017 AMD Compute Libraries - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: hungarian -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/hungarian - -/******************************************************************** - ******************************************************************** - ** - ** libhungarian by Cyrill Stachniss, 2004 - ** - ** - ** Solving the Minimum Assignment Problem using the - ** Hungarian Method. - ** - ** ** This file may be freely copied and distributed! ** - ** - ** Parts of the used code was originally provided by the - ** "Stanford GraphGase", but I made changes to this code. - ** As asked by the copyright node of the "Stanford GraphGase", - ** I hereby proclaim that this file are *NOT* part of the - ** "Stanford GraphGase" distrubition! - ** - ** This file is distributed in the hope that it will be useful, - ** but WITHOUT ANY WARRANTY; without even the implied - ** warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR - ** PURPOSE. - ** - ******************************************************************** - ********************************************************************/ - ---------------------------------------------------------- - -Name: ideep -License: MIT -Files: third_party/ideep - -Copyright (c) 2018 Intel Corporation. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: irrlicht -License: MIT -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/irrlicht - -The Irrlicht Engine License -=========================== - -Copyright (C) 2002-2015 Nikolaus Gebhardt - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgement in the product documentation would be - appreciated but is not required. -2. Altered source versions must be clearly marked as such, and must not be - misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. - ---------------------------------------------------------- - -Name: json -License: MIT -Files: third_party/nlohmann/json - -MIT License - -Copyright (c) 2013-2022 Niels Lohmann - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: kineto -License: BSD-3-Clause -Files: third_party/kineto - -BSD License - -For Kineto software - -Copyright (c) Meta Platforms, Inc. and affiliates. - -All contributions by Microsoft: -Copyright (c) Microsoft Corporation. (The Azure AI Platform team) - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Meta nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: libnop -License: Apache-2.0 -Files: third_party/tensorpipe/third_party/libnop - -Copyright 2017 The Native Object Protocols Authors - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - ---------------------------------------------------------- - -Name: libstemmer -License: BSD-3-Clause -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/libstemmer - -Snowball - License -Except where explicitly noted, all the software given out on this Snowball site is covered by the 3-clause BSD License: - -Copyright (c) 2001, Dr Martin Porter, -Copyright (c) 2002, Richard Boulton. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -Essentially, all this means is that you can do what you like with the code, except claim another Copyright for it, or claim that it is issued under a different license. The software is also issued without warranties, which means that if anyone suffers through its use, they cannot come back and sue you. You also have to alert anyone to whom you give the Snowball software to the fact that it is covered by the BSD license. - -We have not bothered to insert the licensing arrangement into the text of the Snowball software. - ---------------------------------------------------------- - -Name: libuv -License: MIT -Files: third_party/tensorpipe/third_party/libuv - -libuv is licensed for use as follows: - -==== -Copyright (c) 2015-present libuv project contributors. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. -==== - -This license applies to parts of libuv originating from the -https://github.com/joyent/libuv repository: - -==== - -Copyright Joyent, Inc. and other Node contributors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to -deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -sell copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS -IN THE SOFTWARE. - -==== - -This license applies to all parts of libuv that are not externally -maintained libraries. - -The externally maintained libraries used by libuv are: - - - tree.h (from FreeBSD), copyright Niels Provos. Two clause BSD license. - - - inet_pton and inet_ntop implementations, contained in src/inet.c, are - copyright the Internet Systems Consortium, Inc., and licensed under the ISC - license. - - - stdint-msvc2008.h (from msinttypes), copyright Alexander Chemeris. Three - clause BSD license. - - - pthread-fixes.c, copyright Google Inc. and Sony Mobile Communications AB. - Three clause BSD license. - - - android-ifaddrs.h, android-ifaddrs.c, copyright Berkeley Software Design - Inc, Kenneth MacKay and Emergya (Cloud4all, FP7/2007-2013, grant agreement - n° 289016). Three clause BSD license. - ---------------------------------------------------------- - -Name: miniz-2.1.0 -License: MIT -Files: third_party/miniz-2.1.0 - -Copyright 2013-2014 RAD Game Tools and Valve Software -Copyright 2010-2014 Rich Geldreich and Tenacious Software LLC - -All Rights Reserved. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: mimalloc -License: MIT -Files: third_party/mimalloc - -MIT License - -Copyright (c) 2018-2021 Microsoft Corporation, Daan Leijen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: mkl-dnn -License: Apache-2.0 -Files: third_party/ideep/mkl-dnn - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - ============================================================================ - - Copyright 2016-2023 Intel Corporation - Copyright 2018 YANDEX LLC - Copyright 2019-2023 FUJITSU LIMITED - Copyright 2020-2023 Arm Ltd. and affiliates - Copyright 2020-2022 Codeplay Software Limited - Copyright 2021 Alanna Tempest - Copyright 2022-2023 IBM Corporation - Copyright 2023 KNS Group LLC (YADRO) - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - This distribution includes third party software ("third party programs"). - This third party software, even if included with the distribution of - the Intel software, may be governed by separate license terms, including - without limitation, third party license terms, other Intel software license - terms, and open source software license terms. These separate license terms - govern your use of the third party programs as set forth in the - "THIRD-PARTY-PROGRAMS" file. - ---------------------------------------------------------- - -Name: ms-gsl -License: MIT -Files: third_party/opentelemetry-cpp/third_party/ms-gsl - -Copyright (c) 2015 Microsoft Corporation. All rights reserved. - -This code is licensed under the MIT License (MIT). - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - ---------------------------------------------------------- - -Name: nccl -License: BSD-3-Clause -Files: third_party/nccl/nccl - - - Copyright (c) 2015-2020, NVIDIA CORPORATION. All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions - are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of NVIDIA CORPORATION, Lawrence Berkeley National - Laboratory, the U.S. Department of Energy, nor the names of their - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR - CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - The U.S. Department of Energy funded the development of this software - under subcontract 7078610 with Lawrence Berkeley National Laboratory. - - -This code also includes files from the NVIDIA Tools Extension SDK project. - -See: - - https://github.com/NVIDIA/NVTX - -for more information and license details. - ---------------------------------------------------------- - -Name: onnx -License: Apache-2.0 -Files: third_party/onnx - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: OpenBLAS -License: 3-clause BSD -Files: .libs/libopenb*.so - -Copyright (c) 2011-2014, The OpenBLAS Project -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - 1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - 2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - 3. Neither the name of the OpenBLAS project nor the names of - its contributors may be used to endorse or promote products - derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - ---------------------------------------------------------- - -Name: opentelemetry-cpp -License: Apache-2.0 -Files: third_party/opentelemetry-cpp - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: opentelemetry-proto -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/third_party/opentelemetry-proto - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: opentracing-cpp -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright The OpenTracing Authors - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: pdcurses -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/pdcurses - -The core package is in the public domain, but small portions of PDCurses are subject to copyright under various licenses. - -The win32 files are released to the public domain. - -If you use PDCurses in an application, an acknowledgement would be appreciated, but is not mandatory. If you make corrections or enhancements to PDCurses, please forward them to the current maintainer for the benefit of other users. - -This software is provided AS IS with NO WARRANTY whatsoever. - ---------------------------------------------------------- - -Name: pfs -License: Apache-2.0 -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/pfs - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2020-present Daniel Trugman - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: physac -License: MIT -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/physac - -MIT License - -Copyright (c) 2022 Víctor Fisac - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: pocketfft -License: BSD-3-Clause -Files: third_party/pocketfft - -Copyright (C) 2010-2018 Max-Planck-Society -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -* Neither the name of the copyright holder nor the names of its contributors may - be used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: pqp -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/pqp - -Copyright 1999 University of North Carolina at Chapel Hill. -All rights reserved. - -Permission to use, copy, modify, and distribute this software and its -documentation for educational, research, and non-profit purposes, without fee, -and without a written agreement is hereby granted, provided that the above -copyright notice and the following three paragraphs appear in all copies. - -IN NO EVENT SHALL THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL BE LIABLE TO -ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, -INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS -DOCUMENTATION, EVEN IF THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL HAS -BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -THE UNIVERSITY OF NORTH CAROLINA AT CHAPEL HILL SPECIFICALLY DISCLAIMS ANY -WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED -HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF NORTH CAROLINA AT -CHAPEL HILL HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, -ENHANCEMENTS, OR MODIFICATIONS. - -The authors may be contacted via: - -US Mail: Eric Larsen, Stefan Gottschalk - Department of Computer Science - Sitterson Hall, CB #3175 - University of North Carolina - Chapel Hill, NC 27599-3175 - -Phone: (919) 962-1749 - -Email: geom@cs.unc.edu - ---------------------------------------------------------- - -Name: prometheus-cpp -License: MIT -Files: third_party/opentelemetry-cpp/third_party/prometheus-cpp - -MIT License - -Copyright (c) 2016-2021 Jupp Mueller -Copyright (c) 2017-2022 Gregor Jasny - -And many contributors, see -https://github.com/jupp0r/prometheus-cpp/graphs/contributors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: protobuf -License: BSD-3-Clause -Files: third_party/protobuf - -Copyright 2008 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Code generated by the Protocol Buffer compiler is owned by the owner -of the input file used when generating it. This code is not -standalone and requires a support library to be linked with it. This -support library is itself covered by the above license. - ---------------------------------------------------------- - -Name: psimd -License: MIT -Files: third_party/psimd - -The MIT License (MIT) - -Copyright (c) 2017 Facebook Inc. -Copyright (c) 2014-2017 Georgia Institute of Technology -Copyright 2019 Google LLC - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: pthreadpool -License: BSD-2-Clause -Files: third_party/pthreadpool - -Copyright 2019 Google LLC -Copyright (c) 2017 Facebook Inc. -Copyright (c) 2015-2017 Georgia Institute of Technology -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: pybind11 -License: BSD-3-Clause -Files: third_party/onnx/third_party/pybind11, - third_party/onnx-tensorrt/third_party/onnx/third_party/pybind11, - third_party/pybind11, - third_party/tensorpipe/third_party/pybind11 - -Copyright (c) 2016 Wenzel Jakob , All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Please also refer to the file .github/CONTRIBUTING.md, which clarifies licensing of -external contributions to this project including patches, pull requests, etc. - ---------------------------------------------------------- - -Name: python -License: BSD-3-Clause -Files: third_party/cutlass/python - -Copyright (c) 2017 - 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -SPDX-License-Identifier: BSD-3-Clause - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this -list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: python-peachpy -License: BSD-2-Clause -Files: third_party/python-peachpy - -============================== -PeachPy license (2-clause BSD) -============================== - -Copyright (c) 2017, Facebook Inc. -Copyright (c) 2013-2017, Georgia Institute of Technology -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: sigslot -License: Apache-2.0 -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/sigslot - -License -The sigslot library has been placed in the public domain. This means that you are free to use it however you like. - -The author takes no responsibility or liability of any kind for any use that you may make of this library. - -If you screw up, it's your fault. - -If the library screws up, you got it for free, so you should have tested it better - it's still your responsibility. - ---------------------------------------------------------- - -Name: sleef -License: BSL-1.0 -Files: third_party/sleef - For details, see: third_party/sleef/LICENSE.txt - -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: swift -License: Apache-2.0 -Files: third_party/flatbuffers/swift - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ---------------------------------------------------------- - -Name: tb_plugin -License: BSD-3-Clause -Files: third_party/kineto/tb_plugin - -BSD License - -For Kineto software - -Copyright (c) Facebook, Inc. and its affiliates. All rights reserved. - -All contributions by Microsoft: -Copyright (c) Microsoft Corporation. (The Azure AI Platform team) - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Facebook nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: tensorflow-common -License: MIT -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/tensorflow-common - -Copyright (c) Microsoft Corporation - -All rights reserved. - -MIT License - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - ---------------------------------------------------------- - -Name: tensorpipe -License: BSD-3-Clause -Files: third_party/tensorpipe - -BSD License - -For TensorPipe software - -Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name Meta nor the names of its contributors may be used to - endorse or promote products derived from this software without specific - prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: test -License: MIT with exception -Files: third_party/kineto/libkineto/third_party/dynolog/third_party/cpr/test - -This license applies to everything inside this directory and all -subdirectories. - - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - ---------------------------------------------------------- - -Name: variant -License: BSD-3-Clause -Files: third_party/opentelemetry-cpp/third_party/opentracing-cpp/3rd_party/include/opentracing/variant - -Copyright (c) MapBox -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - -- Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. -- Redistributions in binary form must reproduce the above copyright notice, this - list of conditions and the following disclaimer in the documentation and/or - other materials provided with the distribution. -- Neither the name "MapBox" nor the names of its contributors may be - used to endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ---------------------------------------------------------- - -Name: vcpkg -License: MIT -Files: third_party/opentelemetry-cpp/tools/vcpkg - -MIT License - -Copyright (c) Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy of this -software and associated documentation files (the "Software"), to deal in the Software -without restriction, including without limitation the rights to use, copy, modify, -merge, publish, distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be included in all copies -or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, -INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A -PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF -CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE -OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - ---------------------------------------------------------- - -Name: vulkan -License: Apache-2.0 with exception -Files: third_party/opentelemetry-cpp/tools/vcpkg/ports/vulkan - -/* -* -* Licensed under the Apache License, Version 2.0 (the "License"); -* you may not use this file except in compliance with the License. -* You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ - - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. - -"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of this License; and -You must cause any modified files to carry prominent notices stating that You changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. -You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -=============================================================================================================================================== - -/Copyright (C) 2012 LunarG, Inc. -//All rights reserved. -// -//Redistribution and use in source and binary forms, with or without -//modification, are permitted provided that the following conditions -//are met: -// -// Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// -// Neither the name of LunarG Inc. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -//THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -//"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -//LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -//FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -//COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -//INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -//BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -//LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -//CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -//LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -//ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -//POSSIBILITY OF SUCH DAMAGE. - -=============================================================================================================================================== - -#============================================================================= -# Copyright 2007-2009 Kitware, Inc. -# Copyright 2007-2008 Miguel A. Figueroa-Villanueva -# -# Distributed under the OSI-approved BSD License (the "License"); -# see accompanying file Copyright_cmake.txt for details. -# -# This software is distributed WITHOUT ANY WARRANTY; without even the -# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. -# See the License for more information. -#============================================================================= -# (To distributed this file outside of CMake, substitute the full -# License text for the above reference.) - - -============================================================================================================================================== - -// -// Copyright (C) 2015-2018 Google, Inc. -// Copyright (C) -// -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions -// are met: -// -// Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// -// Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following -// disclaimer in the documentation and/or other materials provided -// with the distribution. -// -// Neither the name of 3Dlabs Inc. Ltd. nor the names of its -// contributors may be used to endorse or promote products derived -// from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -// COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -// BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -// LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -// ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -// POSSIBILITY OF SUCH DAMAGE. -// - -========================================================================================================================================== - -Note: This license has also been called the "New BSD License" or "Modified BSD License". See also the 2-clause BSD License. -Copyright -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: -1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -========================================================================================================================================== - -/* -* xxHash - Fast Hash algorithm -* Copyright (C) 2012-2016, Yann Collet -* -* BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions are -* met: -* -* * Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* * Redistributions in binary form must reproduce the above -* copyright notice, this list of conditions and the following disclaimer -* in the documentation and/or other materials provided with the -* distribution. -* -* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -* -* You can contact the author at : -* - xxHash homepage: http://www.xxhash.com -* - xxHash source repository : https://github.com/Cyan4973/xxHash -*/ - - -=========================================================================================================================================== - -# Copyright (C) 2018 Google, Inc. -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# -# Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following -# disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived -# from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -========================================================================================================================================== - -/* A Bison parser, made by GNU Bison 3.0.4. */ - -/* Bison implementation for Yacc-like parsers in C -Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc. -This program is free software: you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation, either version 3 of the License, or -(at your option) any later version. -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. -You should have received a copy of the GNU General Public License -along with this program. If not, see . */ - -/* As a special exception, you may create a larger work that contains -part or all of the Bison parser skeleton and distribute that work -under terms of your choice, so long as that work isn't itself a -parser generator using the skeleton or a modified version thereof -as a parser skeleton. Alternatively, if you modify or redistribute -the parser skeleton itself, you may (at your option) remove this -special exception, which will cause the skeleton and the resulting -Bison output files to be licensed under the GNU General Public -License without this special exception. -This special exception was added by the Free Software Foundation in -version 2.2 of Bison. */ - -/* C LALR(1) parser skeleton written by Richard Stallman, by -simplifying the original so-called "semantic" parser. */ - -/* All symbols defined below should begin with yy or YY, to avoid -infringing on user name space. This should be done even for local -variables, as they might otherwise be expanded by user macros. -There are some unavoidable exceptions within include files to -define necessary library symbols; they are noted "INFRINGES ON -USER NAME SPACE" below. */ - -============================================================================================================================================== - -copyright : [ -Copyright (c) 2017 The Khronos Group Inc., -, -Permission is hereby granted, free of charge, to any person obtaining a copy, -of this software and/or associated documentation files (the \Materials\"),", -to deal in the Materials without restriction, including without limitation, -the rights to use, copy, modify, merge, publish, distribute, sublicense,, -and/or sell copies of the Materials, and to permit persons to whom the, -Materials are furnished to do so, subject to the following conditions:, -, -The above copyright notice and this permission notice shall be included in, -all copies or substantial portions of the Materials., -, -MODIFICATIONS TO THIS FILE MAY MEAN IT NO LONGER ACCURATELY REFLECTS KHRONOS, -STANDARDS. THE UNMODIFIED, NORMATIVE VERSIONS OF KHRONOS SPECIFICATIONS AND, -HEADER INFORMATION ARE LOCATED AT https://www.khronos.org/registry/ , -, -THE MATERIALS ARE PROVIDED \AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", -OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL, -THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER, -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING, -FROM,OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE USE OR OTHER DEALINGS, -IN THE MATERIALS. - -============================================================================================================================================= - -CMake - Cross Platform Makefile Generator -Copyright 2000-2009 Kitware, Inc., Insight Software Consortium -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -* Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -* Neither the names of Kitware, Inc., the Insight Software Consortium, -nor the names of their contributors may be used to endorse or promote -products derived from this software without specific prior written -permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -AS IS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - ------------------------------------------------------------------------------- - -The above copyright and license notice applies to distributions of -CMake in source and binary form. Some source files contain additional -notices of original copyright by their contributors; see each source -for details. Third-party software packages supplied with CMake under -compatible licenses provide their own copyright notices documented in -corresponding subdirectories. - ------------------------------------------------------------------------------- - -CMake was initially developed by Kitware with the following sponsorship: - -* National Library of Medicine at the National Institutes of Health -as part of the Insight Segmentation and Registration Toolkit (ITK). - -* US National Labs (Los Alamos, Livermore, Sandia) ASC Parallel -Visualization Initiative. - -* National Alliance for Medical Image Computing (NAMIC) is funded by the -National Institutes of Health through the NIH Roadmap for Medical Research, -Grant U54 EB005149. - -* Kitware, Inc. - -======================================================================================================================================== - -The authors of this software are Rob Pike and Ken Thompson. -* Copyright (c) 2002 by Lucent Technologies. -* Permission to use, copy, modify, and distribute this software for any -* purpose without fee is hereby granted, provided that this entire notice -* is included in all copies of any software which is or includes a copy -* or modification of this software and in all copies of the supporting -* documentation for such software. -* THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR IMPLIED -* WARRANTY. IN PARTICULAR, NEITHER THE AUTHORS NOR LUCENT TECHNOLOGIES MAKE ANY -* REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE MERCHANTABILITY -* OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR PURPOSE. - - -======================================================================================================================================== - -Copyright (c) 2015-2018 Baldur Karlsson - -Copyright (c) 2014 Crytek - -Copyright (c) 1998-2018 Third party code and tools - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -========================================================================================================================================= - -/* -Copyright (c) 2009 Dave Gamble -Copyright (c) 2015-2016 The Khronos Group Inc. -Copyright (c) 2015-2016 Valve Corporation -Copyright (c) 2015-2016 LunarG, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. -*/ - -=========================================================================================================================================== - -Copyright (c) 2005 - 2017 G-Truc Creation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - - -========================================================================================================================================== - -/* -The JsonCpp library's source code, including accompanying documentation, -tests and demonstration applications, are licensed under the following -conditions... -The author (Baptiste Lepilleur) explicitly disclaims copyright in all -jurisdictions which recognize such a disclaimer. In such jurisdictions, -this software is released into the Public Domain. -In jurisdictions which do not recognize Public Domain property (e.g. Germany as of -2010), this software is Copyright (c) 2007-2010 by Baptiste Lepilleur, and is -released under the terms of the MIT License (see below). -In jurisdictions which recognize Public Domain property, the user of this -software may choose to accept it either as 1) Public Domain, 2) under the -conditions of the MIT License (see below), or 3) under the terms of dual -Public Domain/MIT License conditions described here, as they choose. -The MIT License is about as close to Public Domain as a license can get, and is -described in clear, concise terms at: -http://en.wikipedia.org/wiki/MIT_License - -The full text of the MIT License follows: - -Copyright (c) 2007-2010 Baptiste Lepilleur -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, copy, -modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS -BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN -ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -========================================================================================================================================== - -/** -* `murmurhash.h' - murmurhash -* -* copyright (c) 2014 joseph werle -* Copyright (c) 2015-2016 The Khronos Group Inc. -* Copyright (c) 2015-2016 Valve Corporation -* Copyright (c) 2015-2016 LunarG, Inc. -* -* Permission is hereby granted, free of charge, to any person obtaining a copy -* of this software and/or associated documentation files (the "Materials"), to -* deal in the Materials without restriction, including without limitation the -* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or -* sell copies of the Materials, and to permit persons to whom the Materials are -* furnished to do so, subject to the following conditions: -* -* The above copyright notice(s) and this permission notice shall be included in -* all copies or substantial portions of the Materials. -* -* THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -* -* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, -* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE -* USE OR OTHER DEALINGS IN THE MATERIALS. -*/ - -========================================================================================================================================= - -Licenced as X11: http://www.kryogenix.org/code/browser/licence.html -This basically means: do what you want with it. - -========================================================================================================================================= - -/////////////////////////////////////////////////////////////////////////////////// -/// OpenGL Mathematics (glm.g-truc.net) -/// -/// Copyright (c) 2005 - 2014 G-Truc Creation (www.g-truc.net) -/// Permission is hereby granted, free of charge, to any person obtaining a copy -/// of this software and associated documentation files (the "Software"), to deal -/// in the Software without restriction, including without limitation the rights -/// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -/// copies of the Software, and to permit persons to whom the Software is -/// furnished to do so, subject to the following conditions: -/// -/// The above copyright notice and this permission notice shall be included in -/// all copies or substantial portions of the Software. -/// -/// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -/// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -/// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -/// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -/// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -/// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -/// THE SOFTWARE. -/// -/// @ref core -/// @file glm/common.hpp -/// @date 2013-12-24 / 2013-12-24 -/// @author Christophe Riccio -/////////////////////////////////////////////////////////////////////////////////// - - -========================================================================================================================================== - -// LICENSE -// -// This software is in the public domain. Where that dedication is not -// recognized, you are granted a perpetual, irrevocable license to copy, -// distribute, and modify this file as you see fit. -// - -========================================================================================================================================== - -Simple DirectMedia Layer -Copyright (C) 1997-2018 Sam Lantinga - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not -claim that you wrote the original software. If you use this software -in a product, an acknowledgment in the product documentation would be -appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be -misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. - -========================================================================================================================================= - -/****************************************************************************\ -Copyright (c) 2002, NVIDIA Corporation. - -NVIDIA Corporation("NVIDIA") supplies this software to you in -consideration of your agreement to the following terms, and your use, -installation, modification or redistribution of this NVIDIA software -constitutes acceptance of these terms. If you do not agree with these -terms, please do not use, install, modify or redistribute this NVIDIA -software. - -In consideration of your agreement to abide by the following terms, and -subject to these terms, NVIDIA grants you a personal, non-exclusive -license, under NVIDIA's copyrights in this original NVIDIA software (the -NVIDIA Software), to use, reproduce, modify and redistribute the -NVIDIA Software, with or without modifications, in source and/or binary -forms; provided that if you redistribute the NVIDIA Software, you must -retain the copyright notice of NVIDIA, this notice and the following -text and disclaimers in all such redistributions of the NVIDIA Software. -Neither the name, trademarks, service marks nor logos of NVIDIA -Corporation may be used to endorse or promote products derived from the -NVIDIA Software without specific prior written permission from NVIDIA. -Except as expressly stated in this notice, no other rights or licenses -express or implied, are granted by NVIDIA herein, including but not -limited to any patent rights that may be infringed by your derivative -works or by other works in which the NVIDIA Software may be -incorporated. No hardware is licensed hereunder. - -THE NVIDIA SOFTWARE IS BEING PROVIDED ON AN "AS IS" BASIS, WITHOUT -WARRANTIES OR CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED, -INCLUDING WITHOUT LIMITATION, WARRANTIES OR CONDITIONS OF TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, OR -ITS USE AND OPERATION EITHER ALONE OR IN COMBINATION WITH OTHER -PRODUCTS. - -IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, -INCIDENTAL, EXEMPLARY, CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, LOST PROFITS; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF -USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) OR ARISING IN ANY WAY -OUT OF THE USE, REPRODUCTION, MODIFICATION AND/OR DISTRIBUTION OF THE -NVIDIA SOFTWARE, HOWEVER CAUSED AND WHETHER UNDER THEORY OF CONTRACT, -TORT (INCLUDING NEGLIGENCE), STRICT LIABILITY OR OTHERWISE, EVEN IF -NVIDIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -\****************************************************************************/ - -================================================================================================================================================== - -This software is provided 'as-is', without any express or implied -warranty. In no event will the authors be held liable for any damages -arising from the use of this software. - -Permission is granted to anyone to use this software for any purpose, -including commercial applications, and to alter it and redistribute it -freely, subject to the following restrictions: - -1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. -2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. -3. This notice may not be removed or altered from any source distribution. - - -================================================================================================================================================== - -GNU LESSER GENERAL PUBLIC LICENSE -Version 3, 29 June 2007 - -Copyright (C) 2007 Free Software Foundation, Inc. - -Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. - -This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. - -0. Additional Definitions. - -As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. - -"The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. - -An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. - -A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". - -The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. - -The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. - -1. Exception to Section 3 of the GNU GPL. - -You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. - -2. Conveying Modified Versions. - -If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: - -a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or -b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. -3. Object Code Incorporating Material from Library Header Files. - -The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: - -a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. -b) Accompany the object code with a copy of the GNU GPL and this license document. -4. Combined Works. - -You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: - -a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. -b) Accompany the Combined Work with a copy of the GNU GPL and this license document. -c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. -d) Do one of the following: -0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. -1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. -e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) -5. Combined Libraries. - -You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: - -a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. -b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. -6. Revised Versions of the GNU Lesser General Public License. - -The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. - -Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. - -If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. - -------------------------------------------------------------------------------- -------------------------------------------------------------------------------- From 35b82f6b3d668f20e33155776b313a18a3a5acaa Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:54:17 -0600 Subject: [PATCH 136/239] Fix code comments. --- src/oracledb/connection.py | 8 ++++---- utils/templates/connection.py | 8 ++++---- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index bd696a18..31053a50 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -812,7 +812,7 @@ def fetch_df_all( arraysize: Optional[int] = None, ): """ - Fetch all data as OracleDataFrame. + Fetch all data as an instance of DataFrame. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -829,7 +829,7 @@ def fetch_df_batches( size: Optional[int] = None, ): """ - Fetch data in batches. Each batch is an OracleDataFrame + Fetch data in batches. Each batch is an instance of DataFrame """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1837,7 +1837,7 @@ async def fetch_df_all( arraysize: Optional[int] = None, ): """ - Fetch all data as OracleDataFrame. + Fetch all data as an instance of DataFrame. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1854,7 +1854,7 @@ async def fetch_df_batches( size: Optional[int] = None, ): """ - Fetch data in batches. Each batch is an OracleDataFrame + Fetch data in batches. Each batch is an instance of DataFrame """ cursor = self.cursor() cursor._impl.fetching_arrow = True diff --git a/utils/templates/connection.py b/utils/templates/connection.py index fd2599cd..f137f3b3 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -810,7 +810,7 @@ def fetch_df_all( arraysize: Optional[int] = None, ): """ - Fetch all data as OracleDataFrame. + Fetch all data as an instance of DataFrame. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -827,7 +827,7 @@ def fetch_df_batches( size: Optional[int] = None, ): """ - Fetch data in batches. Each batch is an OracleDataFrame + Fetch data in batches. Each batch is an instance of DataFrame """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1583,7 +1583,7 @@ async def fetch_df_all( arraysize: Optional[int] = None, ): """ - Fetch all data as OracleDataFrame. + Fetch all data as an instance of DataFrame. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1600,7 +1600,7 @@ async def fetch_df_batches( size: Optional[int] = None, ): """ - Fetch data in batches. Each batch is an OracleDataFrame + Fetch data in batches. Each batch is an instance of DataFrame """ cursor = self.cursor() cursor._impl.fetching_arrow = True From 6885890dabc9a658bea2e95c7a256d0eb0b116aa Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:54:32 -0600 Subject: [PATCH 137/239] Ensure that the GIL is held when releasing references to ArrowArray objects when exported Arrow buffers are released by the consumer. In some circumstances this could cause a segfault. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/arrow/utils.pyx | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e69dbb5e..940a709f 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -46,6 +46,9 @@ Common Changes ``ArrowArray`` objects are now available in Python plugins such as those found in VS Code - Upgraded Arrow C Data (nanoarrow) API version to 0.7.0 + - Ensure that the GIL is held when releasing references to ``ArrowArray`` + objects when exported Arrow buffers are released by the consumer. In + some circumstances this could cause a segfault. Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index 8a2e32ca..4e572b2c 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -219,7 +219,8 @@ cdef int append_uint32_array(ArrowArray *arrow_array, cdef void arrow_buffer_dealloc_callback(ArrowBufferAllocator *allocator, - uint8_t *ptr, int64_t size): + uint8_t *ptr, + int64_t size) noexcept with gil: """ ArrowBufferDeallocatorCallback for an ArrowBuffer borrowed from an Arrow array. From 600feca1beffc3b13423e18671bb001c9a33b6e6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 11 Jul 2025 11:54:57 -0600 Subject: [PATCH 138/239] Fixed regression resulting in cursor leak (#513). --- doc/src/release_notes.rst | 4 ++++ src/oracledb/impl/thick/var.pyx | 8 ++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 940a709f..b0633921 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -33,6 +33,10 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Fixed regression resulting in cursor leak + (`issue 513 `__). + + Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index ab0475b9..d5d95ae8 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -120,8 +120,12 @@ cdef class ThickVarImpl(BaseVarImpl): ThickCursorImpl cursor_impl object cursor cursor = self._values[pos] - if cursor is None: - cursor = self._conn.cursor() + if cursor is not None: + cursor_impl = cursor._impl + if cursor_impl._handle == dbvalue.asStmt: + cursor_impl._fixup_ref_cursor = True + return cursor + cursor = self._conn.cursor() cursor_impl = cursor._impl if dpiStmt_addRef(dbvalue.asStmt) < 0: _raise_from_odpi() From 997bfc32fb0bb23fce0d857aac3ca3b75ec8d4c4 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 15 Jul 2025 15:47:48 -0600 Subject: [PATCH 139/239] Doc updates. --- doc/src/api_manual/async_connection.rst | 9 +++++++ doc/src/api_manual/async_connection_pool.rst | 27 ++++++++++---------- doc/src/api_manual/async_cursor.rst | 16 +++++++++--- doc/src/user_guide/asyncio.rst | 16 +++++++++++- 4 files changed, 50 insertions(+), 18 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index cdcd4204..b40c21c2 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -69,6 +69,15 @@ AsyncConnection Methods Closes the connection. + .. note:: + + Asynchronous connections are not automatically closed at the end of + scope. This is different to synchronous connection + behavior. Asynchronous connections should either be explicitly closed, + or have been initially created via a `context manager + `__ + ``with`` block. + .. method:: AsyncConnection.commit() Commits any pending transaction to the database. diff --git a/doc/src/api_manual/async_connection_pool.rst b/doc/src/api_manual/async_connection_pool.rst index ad3effad..c469136b 100644 --- a/doc/src/api_manual/async_connection_pool.rst +++ b/doc/src/api_manual/async_connection_pool.rst @@ -57,22 +57,23 @@ AsyncConnectionPool Methods .. method:: AsyncConnectionPool.release(connection, tag=None) - Releases the connection back to the pool now, rather than whenever - ``__del__`` is called. The connection will be unusable from this point - forward; an Error exception will be raised if any operation is attempted - with the connection. Any cursors or LOBs created by the connection will - also be marked unusable and an Error exception will be raised if any - operation is attempted with them. - - Internally, references to the connection are held by cursor objects, - LOB objects, and so on. Once all of these references are released, the - connection itself will be released back to the pool automatically. Either - control references to these related objects carefully or explicitly - release connections back to the pool in order to ensure sufficient - resources are available. + Releases the connection back to the pool now. The connection will be + unusable from this point forward. An Error exception will be raised if any + operation is attempted with the connection. Any cursors or LOBs created by + the connection will also be marked unusable and an Error exception will be + raised if any operation is attempted with them. The ``tag`` parameter is ignored in python-oracledb Thin mode. + .. note:: + + Asynchronous connections are not automatically closed at the end of + scope. This is different to synchronous connection + behavior. Asynchronous connections should either be explicitly + released, or have been initially created via a `context manager + `__ + ``with`` statement. + .. _asyncconnpoolattr: AsyncConnectionPool Attributes diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 35283468..59d02aaf 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -106,10 +106,18 @@ AsyncCursor Methods .. method:: AsyncCursor.close() - A synchronous method that closes the cursor now, rather than whenever - ``__del__`` is called. The cursor will be unusable from this point - forward; an Error exception will be raised if any operation is attempted - with the cursor. + A synchronous method that closes the cursor now. The cursor will be + unusable from this point forward. An Error exception will be raised if any + operation is attempted with the cursor. + + .. note:: + + Asynchronous cursors are not automatically closed at the end of + scope. This is different to synchronous cursor behavior. Asynchronous + cursors should either be explicitly closed, or have been initially + created via a `context manager + `__ + ``with`` block. .. method:: AsyncCursor.execute(statement, parameters=None, ** keyword_parameters) diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index c806c1d1..c808aff8 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -32,6 +32,13 @@ The asynchronous API classes are :ref:`AsyncConnection `, :ref:`AsyncConnectionPool `, :ref:`AsyncCursor `, and :ref:`AsyncLOB `. +Unlike their synchronous counterparts, asynchronous connections and cursors are +not automatically closed at the end of scope. These asynchronous resources +should either be explicitly closed, or have been initially created via a +`context manager +`__ +``with`` block. + .. note:: Concurrent programming with asyncio is only supported in @@ -105,6 +112,8 @@ when they are no longer needed, for example: cursor.close() await connection.close() +Note asynchronous connections are not automatically closed at the end of +scope. This is different to synchronous connection behavior. .. _asyncconnpool: @@ -165,7 +174,12 @@ executed using the asynchronous methods :meth:`AsyncCursor.execute()` or :meth:`AsyncCursor.executemany()`. Rows can be iterated over, or fetched using one of the methods :meth:`AsyncCursor.fetchone()`, :meth:`AsyncCursor.fetchone()`, :meth:`AsyncCursor.fetchmany()`, or -:meth:`AsyncCursor.fetchall()`. +:meth:`AsyncCursor.fetchall()`. Note that explicitly opened asynchronous +cursors are not automatically closed at the end of scope. This is different to +synchronous behavior. Asynchronous cursors should either be explicitly closed, +or have been initially created via a `context manager +`__ +``with`` block. You can also use shortcut methods on the :ref:`asyncconnobj` object such as :meth:`AsyncConnection.execute()` or From 968da54c80b0f174689cb67dd476e4bf5dc4c82b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 15 Jul 2025 15:48:05 -0600 Subject: [PATCH 140/239] Fixed bug when deciding Arrow datatype for numeric expressions (#510). --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/base/metadata.pyx | 2 +- tests/test_8000_dataframe.py | 20 ++++++++++++++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b0633921..9aa138b8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -53,6 +53,8 @@ Common Changes - Ensure that the GIL is held when releasing references to ``ArrowArray`` objects when exported Arrow buffers are released by the consumer. In some circumstances this could cause a segfault. + - Fixed bug when deciding Arrow datatype for numeric expressions + (`issue 510 `__) Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index f19ea81c..765dbe29 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -67,7 +67,7 @@ cdef class OracleMetadata: elif py_type_num == PY_TYPE_NUM_STR: self._arrow_type = NANOARROW_TYPE_STRING elif py_type_num == PY_TYPE_NUM_INT and self.scale == 0 \ - and self.precision <= 18: + and 0 < self.precision <= 18: self._arrow_type = NANOARROW_TYPE_INT64 else: self._arrow_type = NANOARROW_TYPE_DOUBLE diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 568aac3e..7ecd57a7 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -894,6 +894,26 @@ def test_8033(self): """ ) + def test_8034(self): + "8034 - test expressions on numeric columns" + # fill only the numeric column - credit score + dataset = [ + (1, None, None, None, None, None, None, 225, None), + (2, None, None, None, None, None, None, 365, None), + ] + data = [ + (56.25,), + (91.25,), + ] + self.__check_interop() + self.__populate_table(dataset) + + # Use numeric expression involving a column + statement = "select CreditScore/4 from TestDataFrame order by Id" + ora_df = self.conn.fetch_df_all(statement) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + if __name__ == "__main__": test_env.run_test_cases() From 96fa1222c75c94688996968e6e78ef38466c32c2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:48:01 -0600 Subject: [PATCH 141/239] Update ODPI-C. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index c4a65f6f..33fc2109 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit c4a65f6f92a15222543260d68c90d8769e2f2d81 +Subproject commit 33fc2109c6bcece63a59dbfe3d700ddfa99af1ef From a692f74b11e93a398887010988e1159b9db2fb73 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:50:21 -0600 Subject: [PATCH 142/239] Added support for being able to bind a data frame to cursor.executemany(). --- doc/src/api_manual/async_connection.rst | 11 +- doc/src/api_manual/async_cursor.rst | 9 +- doc/src/api_manual/connection.rst | 11 +- doc/src/api_manual/cursor.rst | 9 +- doc/src/api_manual/dataframe.rst | 51 +-- doc/src/api_manual/module.rst | 13 + doc/src/release_notes.rst | 25 +- doc/src/user_guide/batch_statement.rst | 3 + doc/src/user_guide/dataframes.rst | 263 ++++++++----- samples/dataframe_insert.py | 107 ++++++ samples/dataframe_numpy.py | 4 +- samples/dataframe_pandas.py | 6 +- samples/dataframe_pandas_async.py | 4 +- samples/dataframe_polars.py | 4 +- samples/dataframe_pyarrow.py | 4 +- samples/dataframe_torch.py | 2 +- ...le-Database-The-New-Wave-of-Scripting.html | 133 ++++--- samples/tutorial/query_pandas.py | 2 +- samples/tutorial/run_sql_script.py | 7 - samples/tutorial/setup_tutorial.py | 2 +- samples/tutorial/solutions/query_pandas.py | 22 +- samples/tutorial/sql/setup_tutorial.sql | 16 + src/oracledb/arrow_impl.pxd | 33 +- src/oracledb/arrow_impl.pyx | 16 +- src/oracledb/base_impl.pxd | 15 + src/oracledb/base_impl.pyx | 3 + src/oracledb/connection.py | 2 +- src/oracledb/cursor.py | 7 +- src/oracledb/errors.py | 8 +- src/oracledb/impl/arrow/array.pyx | 357 +++++++++++++++--- src/oracledb/impl/arrow/utils.pyx | 50 ++- src/oracledb/impl/base/bind_var.pyx | 17 + src/oracledb/impl/base/converters.pyx | 116 ++++++ src/oracledb/impl/base/cursor.pyx | 29 ++ src/oracledb/impl/base/metadata.pyx | 40 ++ src/oracledb/impl/base/var.pyx | 7 + src/oracledb/impl/thick/var.pyx | 61 +++ src/oracledb/impl/thin/messages/base.pyx | 63 ++-- src/oracledb/thick_impl.pyx | 1 + src/oracledb/thin_impl.pyx | 2 + utils/templates/connection.py | 2 +- 41 files changed, 1217 insertions(+), 320 deletions(-) create mode 100644 samples/dataframe_insert.py diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index b40c21c2..8dd20964 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -137,9 +137,9 @@ AsyncConnection Methods .. method:: AsyncConnection.fetch_df_all(statement, parameters=None, \ arraysize=None) - Fetches all rows of the SQL query ``statement``, returning them in an - :ref:`OracleDataFrame ` object. An empty - OracleDataFrame is returned if there are no rows available. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. The ``parameters`` parameter can be a list of tuples, where each tuple item maps to one :ref:`bind variable placeholder ` in ``statement``. It @@ -165,9 +165,8 @@ AsyncConnection Methods size=None) This returns an iterator yielding the next ``size`` rows of the SQL query - ``statement`` in each iteration as an :ref:`OracleDataFrame - ` object. An empty OracleDataFrame is returned if there - are no rows available. + ``statement`` in each iteration as a :ref:`DataFrame ` + object. An empty DataFrame is returned if there are no rows available. The ``parameters`` parameter can be a list of tuples, where each tuple item maps to one :ref:`bind variable placeholder ` in ``statement``. It diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 59d02aaf..487b8717 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -170,7 +170,10 @@ AsyncCursor Methods list of dictionaries, where the keys match the bind variable placeholder names in ``statement``. If there are no bind values, or values have previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. + specifying the number of iterations. The ``parameters`` parameter can also + be a :ref:`DataFrame `, or a third-party data frame + that supports the `Apache Arrow PyCapsule `__ Interface. In python-oracledb Thick mode, if the size of the buffers allocated for any of the parameters exceeds 2 GB, you will receive the error ``DPI-1015: @@ -196,6 +199,10 @@ AsyncCursor Methods string of length 1 so any values that are later bound as numbers or dates will raise a TypeError exception. + .. versionchanged:: 3.3.0 + + Added support for passing data frames in the ``parameters`` parameter. + .. method:: AsyncCursor.fetchall() Fetches all (remaining) rows of a query result, returning them as a list of diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 3164b937..33e4ca7f 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -119,9 +119,9 @@ Connection Methods .. method:: Connection.fetch_df_all(statement, parameters=None, \ arraysize=None) - Fetches all rows of the SQL query ``statement``, returning them in an - :ref:`OracleDataFrame ` object. An empty - OracleDataFrame is returned if there are no rows available. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. The ``parameters`` parameter can be a list of tuples, where each tuple item maps to one :ref:`bind variable placeholder ` in ``statement``. It @@ -151,9 +151,8 @@ Connection Methods size=None) This returns an iterator yielding the next ``size`` rows of the SQL query - ``statement`` in each iteration as an :ref:`OracleDataFrame - ` object. An empty OracleDataFrame is returned if there - are no rows available. + ``statement`` in each iteration as a :ref:`DataFrame ` + object. An empty DataFrame is returned if there are no rows available. The ``parameters`` parameter can be a list of tuples, where each tuple item maps to one :ref:`bind variable placeholder ` in ``statement``. It diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index 3ebf2782..b6efe05b 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -176,7 +176,10 @@ Cursor Methods list of dictionaries, where the keys match the bind variable placeholder names in ``statement``. If there are no bind values, or values have previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. + specifying the number of iterations. The ``parameters`` parameter can also + be a :ref:`DataFrame `, or a third-party data frame + that supports the `Apache Arrow PyCapsule `__ Interface. In python-oracledb Thick mode, if the size of the buffers allocated for any of the parameters exceeds 2 GB, you will receive the error ``DPI-1015: @@ -202,6 +205,10 @@ Cursor Methods *None* is assumed to be a string of length 1 so any values that are later bound as numbers or dates will raise a TypeError exception. + .. versionchanged:: 3.3.0 + + Added support for passing data frames in the ``parameters`` parameter. + .. method:: Cursor.fetchall() Fetches all (remaining) rows of a query result, returning them as a list of diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index 90e22d20..bbb97465 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -18,58 +18,62 @@ from Oracle Database types to Arrow data types. .. _oracledataframeobj: -OracleDataFrame Objects -======================= +DataFrame Objects +================= -OracleDataFrame objects are returned from the methods +DataFrame objects are returned from the methods :meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()`. -Each column in OracleDataFrame exposes an `Apache Arrow PyCapsule +Each column in a DataFrame exposes an `Apache Arrow PyCapsule `__ interface, giving access to the underlying Arrow array. .. dbapiobjectextension:: +.. versionchanged:: 3.3.0 + + Removed the prefix "Oracle" from the class name. + .. versionadded:: 3.0.0 .. _oracledataframemeth: -OracleDataFrame Methods ------------------------ +DataFrame Methods +----------------- -.. method:: OracleDataFrame.column_arrays() +.. method:: DataFrame.column_arrays() - Returns a list of :ref:`OracleArrowArray ` objects, + Returns a list of :ref:`ArrowArray ` objects, each containing a select list column. -.. method:: OracleDataFrame.column_names() +.. method:: DataFrame.column_names() Returns a list of the column names in the data frame. -.. method:: OracleDataFrame.get_column(i) +.. method:: DataFrame.get_column(i) - Returns an :ref:`OracleArrowArray ` object for the column + Returns an :ref:`ArrowArray ` object for the column at the given index ``i``. -.. method:: OracleDataFrame.get_column_by_name(name) +.. method:: DataFrame.get_column_by_name(name) - Returns an :ref:`OracleArrowArray ` object for the column + Returns an :ref:`ArrowArray ` object for the column with the given name ``name``. -.. method:: OracleDataFrame.num_columns() +.. method:: DataFrame.num_columns() Returns the number of columns in the data frame. -.. method:: OracleDataFrame.num_rows() +.. method:: DataFrame.num_rows() Returns the number of rows in the data frame. .. _oracledataframeattr: -OracleDataFrame Attributes --------------------------- +DataFrame Attributes +-------------------- -.. attribute:: OracleDataFrame.metadata +.. attribute:: DataFrame.metadata This read-only attribute returns the metadata for the data frame as a dictionary with keys ``num_columns``, ``num_rows``, and ``num_chunks``, @@ -78,14 +82,17 @@ OracleDataFrame Attributes .. _oraclearrowarrayobj: -OracleArrowArray Objects -======================== +ArrowArray Objects +================== -OracleArrowArray objects are returned by -:meth:`OracleDataFrame.column_arrays()`. +ArrowArray objects are returned by :meth:`DataFrame.column_arrays()`. These are used for conversion to `PyArrow Tables `__, see :ref:`dataframeformat`. +.. versionchanged:: 3.3.0 + + Removed the prefix "Oracle" from the class name. + .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 1bad3c95..3496c843 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2163,6 +2163,19 @@ Oracledb Methods .. versionadded:: 2.5.0 +.. function:: from_arrow(obj) + + This method converts a data frame to a :ref:`DataFrame ` + or :ref:`ArrowArray ` instance. + + If ``obj`` supports the Arrow PyCapsule interface ``__arrow_c_stream__`` + method, then ``from_arrow()`` returns the instance as a :ref:`DataFrame + `. If ``obj`` does not support that method, but does + support ``__arrow_c_array__``, then an :ref:`ArrowArray + ` is returned. + + .. versionadded:: 3.3.0 + .. function:: get_pool(pool_alias) Returns a :ref:`ConnectionPool object ` from the python-oracledb diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9aa138b8..e6c2ec20 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -42,22 +42,25 @@ Common Changes #) Changes to :ref:`data frame ` support: + - Added support for binding data frames to :meth:`Cursor.executemany()` and + :meth:`AsyncCursor.executemany()` for fast data ingestion. See + :ref:`dfinsert`. - Added internal support for the ArrowArrayStream PyCapsule interface to - simplify :ref:`OracleDataFrame ` use. - - Remove use of the DataFrame Interchange Protocol in - :ref:`OracleDataFrames `. - - Documentation on methods and attributes on the ``DataFrame`` and - ``ArrowArray`` objects are now available in Python plugins such as those - found in VS Code - - Upgraded Arrow C Data (nanoarrow) API version to 0.7.0 - - Ensure that the GIL is held when releasing references to ``ArrowArray`` - objects when exported Arrow buffers are released by the consumer. In - some circumstances this could cause a segfault. + simplify :ref:`DataFrame ` use. + - Remove use of the DataFrame Interchange Protocol in python-oracledb + :ref:`DataFrame ` objects. + - Documentation on methods and attributes of the :ref:`DataFrame + ` and :ref:`ArrowArray ` objects + is now available when using IDE introspection. + - Upgraded Arrow C Data (nanoarrow) API version to 0.7.0. + - Ensure that the GIL is held when releasing references to :ref:`ArrowArray + ` objects when exported Arrow buffers are released + by the consumer. This avoids a segfault seen in some circumstances. - Fixed bug when deciding Arrow datatype for numeric expressions (`issue 510 `__) Note the data frame support in python-oracledb 3.3 is a pre-release, and - may change in a future version + may change in a future version. oracledb `3.2.0 `__ (June 2025) -------------------------------------------------------------------------------------------------- diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 2248e4a2..bc860312 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -102,6 +102,9 @@ the bind variable placeholder names: cursor.executemany("insert into ParentTable values :pid, :pdesc)", data) +A data frame can alternatively be passed to :meth:`Cursor.executemany()`, see +:ref:`dfinsert`. + .. _predefmemory: Predefining Memory Areas diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index b500b51c..995ba768 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -4,29 +4,42 @@ Working with Data Frames ************************ -Python-oracledb queries can fetch directly to data frames. This can improve -performance and reduce memory requirements when your application uses Python -data frame libraries such as `Apache PyArrow -`__, `Pandas -`__, `Polars `__, `NumPy +Python-oracledb can query directly to a data frame format, and can also insert +data frames into Oracle Database. This can improve performance and reduce +memory requirements when your application uses Python data frame libraries such +as `Apache PyArrow `__, +`Pandas `__, `Polars `__, `NumPy `__, `Dask `__, `PyTorch `__, or writes files in `Apache Parquet -`__ format. The :ref:`OracleDataFrame -` objects fetched expose an Apache Arrow PyCapsule -Interface which, in some cases, allow zero-copy data interchanges to the data -frame objects of other libraries. +`__ format. + +Python-oracledb has a :ref:`DataFrame ` object that exposes +an Apache Arrow PyCapsule Interface. This enables zero-copy data interchanges +to the data frame objects of other libraries. .. note:: The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. -**Fetching Data Frames** +.. _dfquery: + +Fetching Data Frames +==================== + +Data frames can be fetched by using a standard SQL query. + +Data Frame Queries +------------------ -The method :meth:`Connection.fetch_df_all()` fetches all rows from a query. -The method :meth:`Connection.fetch_df_batches()` implements an iterator for -fetching batches of rows. The methods return :ref:`OracleDataFrame -` objects. +Python-oracledb has two methods for fetching rows into data frames: + +- :meth:`Connection.fetch_df_all()` fetches all rows from a query +- :meth:`Connection.fetch_df_batches()` implements an iterator for fetching + batches of rows + +The methods return python-oracledb :ref:`DataFrame ` +objects. For example, to fetch all rows from a query and print some information about the results: @@ -57,10 +70,8 @@ To fetch in batches, use an iterator: # Adjust "size" to tune the query fetch performance # Here it is small to show iteration for odf in connection.fetch_df_batches(statement=sql, size=4): - df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() - print(df) + pdf = pyarrow.table(odf).to_pandas() + print(pdf) With Oracle Database's standard DEPARTMENTS table, this would display:: @@ -77,27 +88,18 @@ With Oracle Database's standard DEPARTMENTS table, this would display:: Converting to other data frame formats is :ref:`shown later ` in this chapter. -**Inserting OracleDataFrames into Oracle Database** - -To insert data currently in :ref:`OracleDataFrame ` format -into Oracle Database requires it to be converted. For example, you could -convert it into a Pandas DataFrame for insert with the Pandas method -``to_sql()``. Or convert into a Python list via the PyArrow -``Table.to_pylist()`` method and then use standard python-oracledb -functionality to execute a SQL INSERT statement. - .. _dftypemapping: Data Frame Type Mapping ----------------------- -Internally, python-oracledb's :ref:`OracleDataFrame ` -support makes use of `Apache nanoarrow `__ +Internally, python-oracledb's :ref:`DataFrame ` support +makes use of `Apache nanoarrow `__ libraries to build data frames. The following data type mapping occurs from Oracle Database types to the Arrow -types used in OracleDataFrame objects. Querying any other data types from -Oracle Database will result in an exception. :ref:`Output type handlers +types used in python-oracledb DataFrame objects. Querying any other data types +from Oracle Database will result in an exception. :ref:`Output type handlers ` cannot be used to map data types. .. list-table-with-summary:: Mapping from Oracle Database to Arrow data types @@ -106,7 +108,7 @@ Oracle Database will result in an exception. :ref:`Output type handlers :widths: 1 1 :width: 100% :align: left - :summary: The first column is the Oracle Database type. The second column is the Arrow data type used in the OracleDataFrame object. + :summary: The first column is the Oracle Database type. The second column is the Arrow data type used in the python-oracledb DataFrame object. * - Oracle Database Type - Arrow Data Type @@ -153,8 +155,9 @@ Oracle Database will result in an exception. :ref:`Output type handlers When converting Oracle Database NUMBERs: -- If the column has been created without a precision and scale, then the Arrow - data type will be DOUBLE. +- If the column has been created without a precision and scale, or you are + querying an expression that results in a number without precision or scale, + then the Arrow data type will be DOUBLE. - If :attr:`defaults.fetch_decimals` is set to *True*, then the Arrow data type is DECIMAL128. @@ -240,26 +243,26 @@ When converting Oracle Database DATEs and TIMESTAMPs: .. _convertingodf: -Converting OracleDataFrame to Other Data Frames ------------------------------------------------ +Converting python-oracledb's DataFrame to Other Data Frames +----------------------------------------------------------- -To use data frames in your chosen analysis library, :ref:`OracleDataFrame -objects ` can be converted. Examples for some libraries are -shown in the following sections. Other libraries will have similar methods. +To use data frames in your chosen analysis library, :ref:`DataFrame objects +` can be converted. Examples for some libraries are shown +in the following sections. Other libraries will have similar methods. **Conversion Overview** -The guidelines for converting :ref:`OracleDataFrame objects +Guidelines for converting python-oracledb :ref:`DataFrame objects ` to data frames for other libraries are: - To convert to a `PyArrow Table `__, use `pyarrow.Table.from_arrays() - `__ which leverages the Arrow PyCapsule interface. + generated/pyarrow.Table.html>`__, use `pyarrow.table() + `__ which leverages the Arrow PyCapsule interface. - To convert to a `Pandas DataFrame `__, use - `pyarrow.Table.to_pandas() `__. - If you want to use a library other than Pandas or PyArrow, use the library's @@ -268,13 +271,7 @@ The guidelines for converting :ref:`OracleDataFrame objects `__ use `polars.from_arrow() `__. -- If your library does not support ``from_arrow()``, then use - ``from_dataframe()`` if the library supports it. This can be slower, - depending on the implementation. - -Overall, the general recommendation is to use Apache Arrow as much as possible -but if there are no options, then use ``from_dataframe()``. You should test -and benchmark to find the best option for your applications. +You should test and benchmark to find the best option for your applications. Creating PyArrow Tables +++++++++++++++++++++++ @@ -286,31 +283,29 @@ An example that creates and uses a `PyArrow Table import pyarrow - # Get an OracleDataFrame + # Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance - sql = "select id, name from SampleQueryTab order by id" + sql = "select id, name from mytable order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) # Create a PyArrow table - pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() - ) + pyarrow_table = pyarrow.table(odf) print("\nNumber of rows and columns:") (r, c) = pyarrow_table.shape print(f"{r} rows, {c} columns") -This makes use of :meth:`OracleDataFrame.column_arrays()` which returns a list -of :ref:`OracleArrowArray Objects `. - -Internally `pyarrow.Table.from_arrays() `__ leverages the Apache -Arrow PyCapsule interface that :ref:`OracleDataFrame ` -exposes. +Internally `pyarrow.table() +`__ +leverages the Apache Arrow PyCapsule interface that python-oracledb +:ref:`DataFrame ` objects expose. See `samples/dataframe_pyarrow.py `__ for a runnable example. +.. _pandasdf: + Creating Pandas DataFrames ++++++++++++++++++++++++++ @@ -322,20 +317,18 @@ org/docs/reference/api/pandas.DataFrame.html#pandas.DataFrame>`__ is: import pandas import pyarrow - # Get an OracleDataFrame + # Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance sql = "select * from mytable where id = :1" myid = 12345 # the bind variable value odf = connection.fetch_df_all(statement=sql, parameters=[myid], arraysize=1000) # Get a Pandas DataFrame from the data. - df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + pdf = pyarrow.table(odf).to_pandas() # Perform various Pandas operations on the DataFrame - print(df.T) # transform - print(df.tail(3)) # last three rows + print(pdf.T) # transform + print(pdf.tail(3)) # last three rows The `to_pandas() `__ method supports arguments like @@ -356,21 +349,18 @@ An example that creates and uses a `Polars DataFrame import polars import pyarrow - # Get an OracleDataFrame + # Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance - sql = "select id from SampleQueryTab order by id" + sql = "select id from mytable order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) # Convert to a Polars DataFrame - pyarrow_table = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ) - df = polars.from_arrow(pyarrow_table) + pdf = polars.from_arrow(odf) # Perform various Polars operations on the DataFrame - r, c = df.shape + r, c = pdf.shape print(f"{r} rows, {c} columns") - print(p.sum()) + print(pdf.sum()) See `samples/dataframe_polars.py `__ for a runnable example. @@ -381,8 +371,9 @@ Writing Apache Parquet Files To write output in `Apache Parquet `__ file format, you can use data frames as an efficient intermediary. Use the :meth:`Connection.fetch_df_batches()` iterator and convert to a `PyArrow Table -`__ that can -be written by the PyArrow library. +`__ that can be written by the PyArrow +library. .. code-block:: python @@ -399,9 +390,7 @@ be written by the PyArrow library. for odf in connection.fetch_df_batches(statement=sql, size=BATCH_SIZE): # Get a PyArrow table from the query results - pyarrow_table = pyarrow.Table.from_arrays( - arrays=odf.column_arrays(), names=odf.column_names() - ) + pyarrow_table = pyarrow.table(odf) if not pqwriter: pqwriter = pq.ParquetWriter(FILE_NAME, pyarrow_table.schema) @@ -430,9 +419,9 @@ For example, to convert to `NumPy `__ ``ndarray`` format: import numpy import pyarrow - SQL = "select id from SampleQueryTab order by id" + SQL = "select id from mytable order by id" - # Get an OracleDataFrame + # Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL, arraysize=100) @@ -459,9 +448,9 @@ An example of working with data as a `Torch tensor import pyarrow import torch - SQL = "select id from SampleQueryTab order by id" + SQL = "select id from mytable order by id" - # Get an OracleDataFrame + # Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL, arraysize=100) @@ -479,8 +468,8 @@ blob/main/samples/dataframe_torch.py>`__ for a runnable example. .. _dfvector: -Using VECTOR data with Data Frames ----------------------------------- +Fetching VECTOR columns to Data Frames +-------------------------------------- Columns of the `VECTOR `__ data type can be fetched with @@ -508,17 +497,25 @@ then the code: .. code-block:: python odf = connection.fetch_df_all("select v64 from myvec") - pyarrow_table = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ) + pyarrow_table = pyarrow.table(odf) will result in a PyArrow table containing lists of doubles. The table can be -converted to a data frame of your chosen library using functionality described -earlier in this chapter. For example, to convert to Pandas: +converted to a data frame of your chosen library. + +For example, to convert the PyArrow table to Pandas: .. code-block:: python pdf = pyarrow_table.to_pandas() + +Or you can convert the python-oracledb :ref:`DataFrame ` +directly if the library supports it. For example, to fetch to Pandas the syntax +is the same as shown in :ref:`Creating Pandas DataFrames `: + +.. code-block:: python + + odf = connection.fetch_df_all("select v64 from myvec") + pdf = pyarrow.table(odf).to_pandas() print(pdf) The output will be:: @@ -550,9 +547,7 @@ then the code to fetch as data frames: import pyarrow odf = connection.fetch_df_all("select v64 from myvec") - pdf = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() - ).to_pandas() + pdf = pyarrow.table(odf).to_pandas() print(pdf) @@ -604,9 +599,7 @@ final sparse dataframe: return pandas.arrays.SparseArray.from_spmatrix(sparse_matrix) odf = connection.fetch_df_all("select v64 from myvec") - pdf = pyarrow.Table.from_arrays( - odf.column_arrays(), odf.column_names() - ).to_pandas() + pdf = pyarrow.table(odf).to_pandas() pdf["SPARSE_ARRAY_V64"] = pdf["V64"].apply(convert_to_sparse_array) @@ -623,3 +616,75 @@ The code will print:: IntIndex Indices: ar... Name: SPARSE_ARRAY_V64, dtype: object + +.. _dfinsert: + +Inserting Data Frames +===================== + +Python-oracledb :ref:`DataFrame ` instances, or third-party +DataFrame instances that support the Apache Arrow PyCapsule Interface, can be +inserted into Oracle Database by passing them directly to +:meth:`Cursor.executemany()` or :meth:`AsyncCursor.executemany()`. + +For example, with the table:: + + create table t (col1 number, col2 number); + +The following code will insert a Pandas DataFrame: + +.. code-block:: python + + import pandas + + d = {'A': [1.2, 2.4, 8.9], 'B': [3.333, 4.9, 0.0]} + pdf = pandas.DataFrame(data=d) + + cursor.executemany("insert into t (col1, col2) values (:1, :2)", pdf) + +Inserting to a dense VECTOR column:: + + create table SampleVectorTab (v64 vector(3, float64)); + +Can be done like: + +.. code-block:: python + + import pandas + + d = {"v": [[3.3, 1.32, 5.0], [2.2, 2.32, 2.0]]} + pdf = pandas.DataFrame(data=d) + + cursor.executemany("insert into SampleVectorTab (v64) values (:1)", pdf) + +See `dataframe_insert.py `__ for a runnable example. + +For general information about fast data ingestion, and discussion of +:meth:`Cursor.executemany()` and :meth:`AsyncCursor.executemany()` options, see +:ref:`batchstmnt`. + +**Explicit Conversion to DataFrame or ArrowArray** + +Data frames that support the Apache Arrow PyCapsule Interface can be explicitly +converted to :ref:`DataFrame ` and :ref:`ArrowArray +` objects by calling :func:`oracledb.from_arrow()`. The +resulting object depends on what interface is supported by the source object. + +For example: + +.. code-block:: python + + import pandas + + d = {'A': [1.2, 2.4, 8.9], 'B': [3.333, 4.9, 0.0]} + pdf = pandas.DataFrame(data=d) + print(type(pdf)) + + odf = oracledb.from_arrow(pdf) + print(type(odf)) + +will print:: + + + diff --git a/samples/dataframe_insert.py b/samples/dataframe_insert.py new file mode 100644 index 00000000..0a1e89d6 --- /dev/null +++ b/samples/dataframe_insert.py @@ -0,0 +1,107 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# dataframe_insert.py +# +# Shows how executemany() can be used to insert a Pandas dataframe directly +# into Oracle Database. The same technique can be used with data frames from +# many other libraries. +# ----------------------------------------------------------------------------- + +import sys +import pandas + +import oracledb +import sample_env + +# determine whether to use python-oracledb thin mode or thick mode +if not sample_env.get_is_thin(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) + + +connection = oracledb.connect( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_connect_params(), +) + +# ----------------------------------------------------------------------------- +# +# Inserting a simple DataFrame + +with connection.cursor() as cursor: + + # Create a Pandas DataFrame + print("Pandas Dataframe 1:") + d = {"A": [101, 213, 394], "B": ["Christie", "Cindy", "Kate"]} + pdf = pandas.DataFrame(data=d) + print(pdf) + + # Insert data into NUMBER and VARCHAR2(20) columns using Oracle Database's + # efficient "Array DML" method + cursor.executemany("insert into mytab (id, data) values (:1, :2)", pdf) + + # Check data + print("\nOracle Database Query:") + cursor.execute("select * from mytab order by id") + columns = [col.name for col in cursor.description] + print(columns) + for r in cursor: + print(r) + +# ----------------------------------------------------------------------------- +# +# Inserting VECTORs + +# The VECTOR example only works with Oracle Database 23.4 or later +if sample_env.get_server_version() < (23, 4): + sys.exit("This example requires Oracle Database 23.4 or later.") + +# The VECTOR example works with thin mode, or with thick mode using Oracle +# Client 23.4 or later +if not connection.thin and oracledb.clientversion()[:2] < (23, 4): + sys.exit( + "This example requires python-oracledb thin mode, or Oracle Client" + " 23.4 or later" + ) + +with connection.cursor() as cursor: + + # Create a Pandas DataFrame + print("\nPandas Dataframe 2:") + d = {"v": [[3.3, 1.32, 5.0], [2.2, 2.32, 2.0]]} + pdf = pandas.DataFrame(data=d) + print(pdf) + + # Insert data into a VECTOR column using Oracle Database's + # efficient "Array DML" method + cursor.executemany("insert into SampleVectorTab (v64) values (:1)", pdf) + + # Check data + print("\nOracle Database Query:") + cursor.execute("select v64 from SampleVectorTab order by id") + for (r,) in cursor: + print(r) diff --git a/samples/dataframe_numpy.py b/samples/dataframe_numpy.py index 4285720c..ad5f9bad 100644 --- a/samples/dataframe_numpy.py +++ b/samples/dataframe_numpy.py @@ -52,7 +52,7 @@ # # Fetching all records -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance sql = "select id from SampleQueryTab order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) @@ -103,7 +103,7 @@ with connection.cursor() as cursor: cursor.executemany("insert into SampleVectorTab (v64) values (:1)", rows) -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance sql = "select v64 from SampleVectorTab order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) diff --git a/samples/dataframe_pandas.py b/samples/dataframe_pandas.py index ccf1fe96..49c13be0 100644 --- a/samples/dataframe_pandas.py +++ b/samples/dataframe_pandas.py @@ -54,7 +54,7 @@ # # Fetching all records -# Get an OracleDataFrame. +# Get a python-oracledb DataFrame. # Adjust arraysize to tune the query fetch performance sql = "select id, name from SampleQueryTab order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) @@ -127,7 +127,7 @@ cursor.executemany("insert into SampleVectorTab (v64) values (:1)", rows) -# Get an OracleDataFrame. +# Get a python-oracledb DataFrame. # Adjust arraysize to tune the query fetch performance sql = "select id, v64 from SampleVectorTab order by id" odf = connection.fetch_df_all(statement=sql, arraysize=100) @@ -153,7 +153,7 @@ # Scaling all vectors by a factor of two print("\nScaled:") -df["SCALED_V64_COL"] = df["V64"].apply(lambda x: numpy.array(x) * 2) +df["SCALED_V64_COL"] = df["V64"] * 2 print(df) # Calculating vector norms diff --git a/samples/dataframe_pandas_async.py b/samples/dataframe_pandas_async.py index 25271fd5..eaadead2 100644 --- a/samples/dataframe_pandas_async.py +++ b/samples/dataframe_pandas_async.py @@ -59,7 +59,7 @@ async def main(): # # Fetching all records - # Get an OracleDataFrame. + # Get a python-oracledb DataFrame. # Adjust arraysize to tune the query fetch performance odf = await connection.fetch_df_all(statement=SQL, arraysize=100) @@ -131,7 +131,7 @@ async def main(): "insert into SampleVectorTab (v64) values (:1)", rows ) - # Get an OracleDataFrame. + # Get a python-oracledb DataFrame. # Adjust arraysize to tune the query fetch performance sql = "select id, v64 from SampleVectorTab order by id" odf = await connection.fetch_df_all(statement=sql, arraysize=100) diff --git a/samples/dataframe_polars.py b/samples/dataframe_polars.py index 9416af0c..5d1aed22 100644 --- a/samples/dataframe_polars.py +++ b/samples/dataframe_polars.py @@ -52,7 +52,7 @@ SQL1 = "select * from SampleQueryTab order by id" -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL1, arraysize=100) @@ -73,7 +73,7 @@ SQL2 = "select id from SampleQueryTab order by id" -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL2, arraysize=100) diff --git a/samples/dataframe_pyarrow.py b/samples/dataframe_pyarrow.py index 8ce20a4d..1cc56dab 100644 --- a/samples/dataframe_pyarrow.py +++ b/samples/dataframe_pyarrow.py @@ -51,7 +51,7 @@ SQL1 = "select id, name from SampleQueryTab order by id" -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL1, arraysize=100) @@ -76,7 +76,7 @@ SQL2 = "select id from SampleQueryTab order by id" -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL2, arraysize=100) diff --git a/samples/dataframe_torch.py b/samples/dataframe_torch.py index e45d1940..de2d0113 100644 --- a/samples/dataframe_torch.py +++ b/samples/dataframe_torch.py @@ -48,7 +48,7 @@ SQL = "select id from SampleQueryTab order by id" -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = connection.fetch_df_all(statement=SQL, arraysize=100) diff --git a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html index 394d51a5..5243700e 100644 --- a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html +++ b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html @@ -135,7 +135,8 @@

Contents

  • 16. Dataframes
      -
    • 16.1 Working with Dataframes
    • +
    • 16.1 Fetching Dataframes
    • +
    • 16.2 Inserting Dataframes
  • 17. Concurrent Programming with asyncio @@ -285,7 +286,7 @@

    Review the privileged database credentials used for creating the directory. This file is included in other Python files for creating and dropping the tutorial user.

    -

    Edit db_config_sys.py file and change the default +

    Edit db_config_sys.py and change the default values to match the system connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "SYSTEM" @@ -384,7 +385,7 @@

    Review the connection credentials used by the tutorial scripts

    in other Python and SQL files for setting up the database connection.

    -

    Edit db_config.py file and change the default values to +

    Edit db_config.py and change the default values to match the connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "pythondemo" unless the @@ -624,7 +625,7 @@

    1.5 Checking versions

    print(oracledb.__version__) # two underscores before and after the version
  • -

    Run the script:

    +

    Run the script in a terminal window:

    python versions.py
    @@ -720,7 +721,7 @@

    1.7 Checking Connection Health

    else: print("Unusable connection. Please check the database and network settings.")
    -

    Run the script:

    +

    Run the script in a terminal window:

    python connect_health.py
    @@ -1028,7 +1029,7 @@

    2.4 Connection pooling and DRCP

    The script logic does not need to be changed to benefit from DRCP connection pooling.

    -

    Run the script:

    +

    Run the script in a terminal window:

    python connect_pool2.py
    @@ -1498,7 +1499,7 @@

    4.3 Batcherrors

    print(res)
    -

    Run the script:

    +

    Run the script in a terminal window:

    python bind_insert.py
    @@ -1719,7 +1720,7 @@

    6.1 Basic output type handler

    print("Value:", value, "* 3 =", value * 3)
    -

    Run the script:

    +

    Run the script in a terminal window:

    python type_converter.py
    @@ -1873,7 +1874,7 @@

    6.3 Input type handlers

    handler input_type_handler whenever an instance of Building is inserted with the cursor.

    -

    To confirm the behavior, run the script:

    +

    Run the script in a terminal window:

    python type_input.py
    @@ -1930,7 +1931,7 @@

    7.1 Fetching a CLOB using a locator

    LOB Object. Methods on LOB include size() and read().

    -

    To see the output, run the script:

    +

    To see the output, run the script in a terminal window:

    python clob.py
    @@ -2034,7 +2035,7 @@

    8. JSON

    print(j) -

    Run the script:

    +

    Run the script in a terminal window:

    python json_insert.py
    @@ -2054,7 +2055,7 @@

    8. JSON

    print(j) -

    Run the script:

    +

    Run the script in a terminal window:

    python json_insert.py
    @@ -2127,7 +2128,7 @@

    9.2 Fetching a VECTOR

    print(type(v)) -

    Run the script:

    +

    Run the script in a terminal window:

    python vector.py
    @@ -2239,7 +2240,7 @@

    9.3 Working with VECTORs and NumPy

    print(type(v)) -

    Run the script

    +

    Run the script in a terminal window:

    python vector_numpy.py
    @@ -2283,7 +2284,7 @@

    10. Rowfactory functions

    first uses array indexes like row[0]. The second uses loop target variables that take each row tuple's values.

    -

    Run the script:

    +

    Run the script in a terminal window:

    python rowfactory.py
    @@ -2365,7 +2366,7 @@

    11. Subclassing connections and cursors

    custom subclass. All the python-oracledb methods such as cursor() are available, as shown by the query.

    -

    Run the script:

    +

    Run the script in a terminal window:

    python subclass.py
    @@ -2500,7 +2501,7 @@

    12.1 How to bind named objects

    then set. Oracle VARRAY types such as SDO_ELEM_INFO_ARRAY are set with extend().

    -

    Run the script:

    +

    Run the script in a terminal window:

    python bind_sdo.py
    @@ -2687,7 +2688,7 @@

    13.1 Input type handlers with named objects

    called by the input type handler SDOInputTypeHandler whenever an instance of mySDO is inserted with the cursor.

    -

    To confirm the behavior, run the script:

    +

    Run the script in a terminal window:

    python type_input_named_obj.py
    @@ -2808,7 +2809,7 @@

    13.2 Output type handlers with named objects

    the cursor and needs to be converted to a user-defined Python object (mySDO object in this case).

    -

    To confirm the behavior, run the script:

    +

    Run the script in a terminal window:

    python type_output_named_obj.py
    @@ -2916,7 +2917,7 @@

    14.1 Message passing with Oracle Advanced Queuing

    This file sets up Advanced Queuing using Oracle's DBMS_AQADM package. The queue is used for passing Oracle UDT_BOOK objects.

    -

    Run the script:

    +

    Run the script in a terminal window:

    python aq.py
    @@ -3008,22 +3009,26 @@

    15. Scrollable cursors

    16. Dataframes

    -

    Python-oracledb can query data directly into a dataframe format that exposes -an Apache Arrow PyCapsule interface. This is an efficient way to use Python -libraries such as Apache PyArrow, Pandas, Polars, NumPy, PyTorch, or to write -files in Apache Parquet format. Documentation link for further reading: Python-oracledb has a DataFrame class that exposes an Apache Arrow PyCapsule +interface. Data can be fetched directly from Oracle Database into DataFrame +instances for efficient use with Python libraries such as Apache PyArrow, +Pandas, Polars, NumPy, PyTorch, or to write files in Apache Parquet +format. Data that is already in a third-party library DataFrame instance that +exposes the PyCapsule interface can be inserted into Oracle Database by passing +the instance directly to executemany(). Documentation link +for further reading: Fetching Data Frames.

    -
      -
    • 16.1 Working with Dataframes

      - -

      This section shows how to efficiently fetch data for use with Pandas. It - uses the pyarrow and pandas packages, which need to be - installed:

      +

      This section shows how to efficiently fetch data for use with Pandas, modify +it, and then insert it back into Oracle Database. The pyarrow and +pandas packages need to be installed:

      python -m pip install pyarrow pandas --upgrade
      +
        +
      • 16.1 Fetching Dataframes

        +

        Review the code contained in query_pandas.py:

        @@ -3038,21 +3043,23 @@ 

        16. Dataframes

        odf = con.fetch_df_all( statement="select sal from emp order by empno", - arraysize=100) + arraysize=100 +)
        -

        This uses fetch_df_all() to directly fetch data into an - OracleDataFrame that internally exposes a PyCapsule interface. For large - result sets you can tune the arraysize parameter, or use an iterator from - the fetch_df_batches() method.

        +

        This uses fetch_df_all() to directly fetch rows into a + dataframe that internally exposes a PyCapsule interface. For large result + sets you can tune the arraysize parameter, or use an iterator from the + fetch_df_batches() method.

        To use the new dataframe in Pandas, edit query_pandas.py and add this code at the bottom:

        -df = pyarrow.Table.from_arrays(
        -    odf.column_arrays(), names=odf.column_names()
        -).to_pandas()
        +# Get a Pandas DataFrame from the data
        +df = pyarrow.table(odf).to_pandas()
        +
        +# Perform various operations on the Pandas DataFrame
         
         print("\nSum:")
         print(df.sum())
        @@ -3061,8 +3068,9 @@ 

        16. Dataframes

        print(df.median())
        -

        This uses PyArrow functionality to convert the OracleDataFrame to a - Pandas dataframe.

        +

        This uses PyArrow functionality to convert the python-oracledb DataFrame + to a Pandas DataFrame. Pandas sum and median operations are then + performed.

        Run the script in a terminal window:

        @@ -3071,6 +3079,43 @@

        16. Dataframes

        The output is the expected calculations on the employee salary data.

      • + +
      • 16.2 Inserting Dataframes

        + +

        Python-oracledb DataFrame instances, and instances of DataFrames from + third-party libraries that support the Apache Arrow PyCapsule Interface, + can be inserted into Oracle Database by passing them directly to + executemany().

        + + +

        Edit query_pandas.py and add this code at the bottom:

        + +
        +# Double everyone's salary and insert the Pandas DataFrame into Oracle Database
        +
        +df = df * 2
        +
        +cur = con.cursor()
        +cur.executemany("insert into pdtab (sal) values (:1)", df)
        +
        +# Check the inserted data
        +
        +print("\nNew Salaries")
        +cur.execute("select * from pdtab")
        +res = cur.fetchall()
        +print(res)
        +
        + +

        Run the script in a terminal window:

        + +
        python query_pandas.py
        + +

        This increases everyone's salary in the Pandas DataFrame and then + inserts the DataFrame into Oracle Database. The new salaries are queried + back to confirm the insertion was successful.

        + +
      • +

      17. Concurrent Programming with asyncio

      @@ -3153,7 +3198,7 @@

      17. Concurrent Programming with asyncio

      When all the awaitables executed by gather() have completed, the pool is closed.

      -

      Run the script:

      +

      Run the script in a terminal window:

      python async_gather.py
      @@ -3245,7 +3290,7 @@

      18. Pipelining multiple operations

      When all the awaitables executed by gather() have completed, the results are displayed.

      -

      Run the script:

      +

      Run the script in a terminal window:

      python pipeline.py
      @@ -3356,7 +3401,7 @@

      19.2 Review the configuration files for thick mode

      These are included in other Python and SQL files for setting up the database connection.

      -

      Edit db_config_thick.py file and change the default values to +

      Edit db_config_thick.py and change the default values to match the connection information for your environment. Alternatively, you can set the given environment variables in your terminal window. For example, the default username is "pythondemo" unless the environment variable @@ -3476,7 +3521,7 @@

      20.1 Inserting JSON Documents

      content is a dictionary. You can also get a JSON string by calling doc.getContentAsString().

      -

      Run the script:

      +

      Run the script in a terminal window:

      python soda.py
      diff --git a/samples/tutorial/query_pandas.py b/samples/tutorial/query_pandas.py index 06f29146..7c991b7d 100644 --- a/samples/tutorial/query_pandas.py +++ b/samples/tutorial/query_pandas.py @@ -35,7 +35,7 @@ user=db_config.user, password=db_config.pw, dsn=db_config.dsn ) -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = con.fetch_df_all( statement="select sal from emp order by empno", arraysize=100 diff --git a/samples/tutorial/run_sql_script.py b/samples/tutorial/run_sql_script.py index b70b82c7..4f4846bc 100644 --- a/samples/tutorial/run_sql_script.py +++ b/samples/tutorial/run_sql_script.py @@ -29,13 +29,6 @@ import os import sys -# default values -DEFAULT_MAIN_USER = "pythondemo" -DEFAULT_EDITION_USER = "pythoneditions" -DEFAULT_EDITION_NAME = "python_e1" -DEFAULT_CONNECT_STRING = "localhost/freepdb1" -DEFAULT_DRCP_CONNECT_STRING = "localhost/freepdb1:pooled" - def run_sql_script(conn, script_name, **kwargs): statement_parts = [] diff --git a/samples/tutorial/setup_tutorial.py b/samples/tutorial/setup_tutorial.py index 64ba3c2a..e3ee7986 100644 --- a/samples/tutorial/setup_tutorial.py +++ b/samples/tutorial/setup_tutorial.py @@ -30,7 +30,7 @@ import db_config import run_sql_script -# Connect using the System User ID and password +# Connect using the tutorial username and password con = oracledb.connect( user=db_config.user, password=db_config.pw, dsn=db_config.dsn ) diff --git a/samples/tutorial/solutions/query_pandas.py b/samples/tutorial/solutions/query_pandas.py index 6b6716a5..ee7a2b81 100644 --- a/samples/tutorial/solutions/query_pandas.py +++ b/samples/tutorial/solutions/query_pandas.py @@ -35,21 +35,33 @@ user=db_config.user, password=db_config.pw, dsn=db_config.dsn ) -# Get an OracleDataFrame +# Get a python-oracledb DataFrame # Adjust arraysize to tune the query fetch performance odf = con.fetch_df_all( statement="select sal from emp order by empno", arraysize=100 ) # Get a Pandas DataFrame from the data -df = pyarrow.Table.from_arrays( - odf.column_arrays(), names=odf.column_names() -).to_pandas() +df = pyarrow.table(odf).to_pandas() -# Perform various Pandas operations on the DataFrame +# Perform various operations on the Pandas DataFrame print("\nSum:") print(df.sum()) print("\nMedian:") print(df.median()) + +# Double everyone's salary and insert the Pandas DataFrame into Oracle Database + +df = df * 2 + +cur = con.cursor() +cur.executemany("insert into pdtab (sal) values (:1)", df) + +# Check the inserted data + +print("\nNew Salaries") +cur.execute("select * from pdtab") +res = cur.fetchall() +print(res) diff --git a/samples/tutorial/sql/setup_tutorial.sql b/samples/tutorial/sql/setup_tutorial.sql index 9d470228..716d4dc6 100644 --- a/samples/tutorial/sql/setup_tutorial.sql +++ b/samples/tutorial/sql/setup_tutorial.sql @@ -221,6 +221,22 @@ begin end; / +-- Table for query_pandas.py +begin + execute immediate 'drop table pdtab'; +exception +when others then + if sqlcode not in (-00942) then + raise; + end if; +end; +/ + +create table pdtab ( + sal number(7, 2)) +/ + + -- Table for json_insert.py (requires Oracle Database 21c or later) begin execute immediate 'drop table jtab'; diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index c905c152..f944cc03 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -49,13 +49,13 @@ cdef extern from "nanoarrow.h": void *private_data cdef struct ArrowSchema: - const char *format; - const char *name; - const char *metadata; + const char *format + const char *name + const char *metadata int64_t flags int64_t n_children - ArrowSchema** children - ArrowSchema* dictionary + ArrowSchema **children + ArrowSchema *dictionary void (*release)(ArrowSchema*) void *private_data @@ -64,6 +64,8 @@ cdef extern from "nanoarrow.h": NANOARROW_TYPE_BINARY NANOARROW_TYPE_DECIMAL128 NANOARROW_TYPE_DOUBLE + NANOARROW_TYPE_FIXED_SIZE_BINARY + NANOARROW_TYPE_FIXED_SIZE_LIST NANOARROW_TYPE_FLOAT NANOARROW_TYPE_INT8 NANOARROW_TYPE_INT64 @@ -89,6 +91,7 @@ cdef class ArrowArrayImpl: cdef: int32_t precision int32_t scale + int32_t fixed_size str name ArrowType arrow_type ArrowTimeUnit time_unit @@ -96,7 +99,13 @@ cdef class ArrowArrayImpl: ArrowArray *arrow_array ArrowSchema *arrow_schema ArrowType child_arrow_type + int child_element_size + cdef int _get_is_null(self, int64_t index, bint* is_null) except -1 + cdef int _get_list_info(self, int64_t index, ArrowArray* arrow_array, + int64_t* offset, int64_t* num_elements) except -1 + cdef bint _is_sparse_vector(self) except * + cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1 cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1 cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 @@ -110,6 +119,20 @@ cdef class ArrowArrayImpl: array.array values) except -1 cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 + cdef int get_bool(self, int64_t index, bint* is_null, + bint* value) except -1 + cdef int get_bytes(self, int64_t index, bint* is_null, char **ptr, + ssize_t *num_bytes) except -1 + cdef bytes get_decimal(self, int64_t index, bint* is_null) + cdef int get_double(self, int64_t index, bint* is_null, + double* value) except -1 + cdef int get_float(self, int64_t index, bint* is_null, + float* value) except -1 + cdef int get_int64(self, int64_t index, bint* is_null, + int64_t* value) except -1 + cdef int get_length(self, int64_t* length) except -1 + cdef object get_sparse_vector(self, int64_t index, bint* is_null) + cdef object get_vector(self, int64_t index, bint* is_null) cdef int populate_from_array(self, ArrowSchema* schema, ArrowArray* array) except -1 cdef int populate_from_metadata(self, ArrowType arrow_type, str name, diff --git a/src/oracledb/arrow_impl.pyx b/src/oracledb/arrow_impl.pyx index 5983aa0c..a0aea3c3 100644 --- a/src/oracledb/arrow_impl.pyx +++ b/src/oracledb/arrow_impl.pyx @@ -33,10 +33,24 @@ cimport cpython from libc.stdint cimport uintptr_t -from libc.string cimport memcpy, strlen, strchr +from libc.string cimport memcpy, memset, strlen, strchr +from cpython cimport array + +import array from . import errors +cdef array.array float_template = array.array('f') +cdef array.array double_template = array.array('d') +cdef array.array int8_template = array.array('b') +cdef array.array uint8_template = array.array('B') +cdef array.array uint32_template + +if array.array("I").itemsize == 4: + uint32_template = array.array("I") +else: + uint32_template = array.array("L") + include "impl/arrow/utils.pyx" include "impl/arrow/array.pyx" include "impl/arrow/dataframe.pyx" diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index d2e97929..bc2a06f8 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -449,6 +449,8 @@ cdef class OracleMetadata: cdef int _set_arrow_type(self) except -1 cdef OracleMetadata copy(self) @staticmethod + cdef OracleMetadata from_arrow_array(ArrowArrayImpl array) + @staticmethod cdef OracleMetadata from_type(object typ) @staticmethod cdef OracleMetadata from_value(object value) @@ -700,6 +702,7 @@ cdef class BaseCursorImpl: bint cache_statement) except -1 cdef int _reset_bind_vars(self, uint32_t num_rows) except -1 cdef int _verify_var(self, object var) except -1 + cdef object bind_arrow_arrays(self, object cursor, list arrays) cdef int bind_many(self, object cursor, list parameters) except -1 cdef int bind_one(self, object cursor, object parameters) except -1 cdef object _finish_building_arrow_arrays(self) @@ -742,6 +745,8 @@ cdef class BaseVarImpl: cdef object _get_scalar_value(self, uint32_t pos) cdef int _on_reset_bind(self, uint32_t num_rows) except -1 cdef int _resize(self, uint32_t new_size) except -1 + cdef int _set_metadata_from_arrow_array(self, + ArrowArrayImpl array) except -1 cdef int _set_metadata_from_type(self, object typ) except -1 cdef int _set_metadata_from_value(self, object value, bint is_plsql) except -1 @@ -848,6 +853,9 @@ cdef class BindVar: ssize_t pos bint has_value + cdef int _create_var_from_arrow_array(self, object conn, + BaseCursorImpl cursor_impl, + ArrowArrayImpl array) except -1 cdef int _create_var_from_type(self, object conn, BaseCursorImpl cursor_impl, object value) except -1 @@ -969,6 +977,10 @@ cdef struct OracleData: OracleDataBuffer buffer +cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, + OracleData* data, + ArrowArrayImpl arrow_array, + ssize_t array_index) cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, OracleMetadata to_metadatda, OracleData* data, @@ -978,6 +990,9 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, OracleData* data, const char* encoding_errors, bint from_dbobject) +cdef object convert_python_to_oracle_data(OracleMetadata metadata, + OracleData* data, + object value) cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, object vector) except -1 cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer) diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 1004b891..a0553d4e 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -55,6 +55,8 @@ from .arrow_impl cimport ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_DECIMAL128, NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_FIXED_SIZE_LIST, NANOARROW_TYPE_FLOAT, NANOARROW_TYPE_INT8, NANOARROW_TYPE_INT64, @@ -65,6 +67,7 @@ from .arrow_impl cimport ( NANOARROW_TYPE_STRUCT, NANOARROW_TYPE_TIMESTAMP, NANOARROW_TYPE_UINT8, + ArrowArrayImpl, ) import array diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 31053a50..5564f650 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1798,7 +1798,7 @@ async def execute( await cursor.execute(statement, parameters) async def executemany( - self, statement: Union[str, None], parameters: Union[list, int] + self, statement: Union[str, None], parameters: Any ) -> None: """ Prepare a statement for execution against a database and then execute diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index 596b2e20..ec268e36 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -712,7 +712,7 @@ def execute( def executemany( self, statement: Optional[str], - parameters: Union[list, int], + parameters: Any, batcherrors: bool = False, arraydmlrowcounts: bool = False, ) -> None: @@ -733,6 +733,9 @@ def executemany( the number of iterations can be specified as an integer instead of needing to provide a list of empty mappings or sequences. + A data frame can also be supplied as the parameters, in which case the + Arrow arrays found within it are extracted and used as the parameters. + When true, the batcherrors parameter enables batch error support within Oracle and ensures that the call succeeds even if an exception takes place in one or more of the sequence of parameters. The errors can then @@ -965,7 +968,7 @@ async def execute( async def executemany( self, statement: Optional[str], - parameters: Union[list, int], + parameters: Any, batcherrors: bool = False, arraydmlrowcounts: bool = False, ) -> None: diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index b4bd9766..de0bb6d4 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -323,6 +323,7 @@ def _raise_not_supported(feature: str) -> None: ERR_ARROW_UNSUPPORTED_DATA_TYPE = 3030 ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT = 3031 ERR_ARROW_UNSUPPORTED_DATA_FORMAT = 3032 +ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT = 3033 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -880,7 +881,8 @@ def _raise_not_supported(feature: str) -> None: ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE: ( '"parameters" argument should be a list of sequences or ' "dictionaries, or an integer specifying the number of " - "times to execute the statement" + "times to execute the statement, or an object implementing the Arrow " + "PyCapsule interface __arrow_c_stream__()" ), ERR_WRONG_NUMBER_OF_POSITIONAL_BINDS: ( "{expected_num} positional bind values are required but " @@ -901,6 +903,10 @@ def _raise_not_supported(feature: str) -> None: "Apache Arrow format does not support sparse vectors with flexible " "dimensions" ), + ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT: ( + 'conversion from list with child Arrow format "{schema_format}" to ' + "Oracle Database vector is not supported" + ), ERR_ARROW_UNSUPPORTED_DATA_FORMAT: ( 'conversion from Arrow format "{schema_format}" to Oracle Database ' "is not supported" diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 2d4527bc..6fd3f385 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -46,6 +46,82 @@ cdef class ArrowArrayImpl: ArrowSchemaRelease(self.arrow_schema) cpython.PyMem_Free(self.arrow_schema) + cdef int _get_is_null(self, int64_t index, bint* is_null) except -1: + """ + Returns whether or not the value at the specified index is null. + """ + cdef: + ArrowBitmap *bitamp + int8_t as_bool + bitmap = ArrowArrayValidityBitmap(self.arrow_array) + if bitmap != NULL and bitmap.buffer.data != NULL: + as_bool = ArrowBitGet(bitmap.buffer.data, index) + is_null[0] = not as_bool + else: + is_null[0] = False + + cdef int _get_list_info(self, int64_t index, ArrowArray* arrow_array, + int64_t* offset, int64_t* num_elements) except -1: + """ + Returns the number of elements in the list stored in the array at the + given index. + """ + cdef: + int32_t end_offset + int32_t* offsets + offsets = arrow_array.buffers[1] + offset[0] = offsets[index] + if index >= arrow_array.length - 1: + end_offset = arrow_array.children[0].length + else: + end_offset = offsets[index + 1] + num_elements[0] = end_offset - offsets[index] + + cdef bint _is_sparse_vector(self) except *: + """ + Returns a boolean indicating if the schema refers to a sparse vector. + This requires a structure containing the keys for number of dimensions, + indices and values. + """ + cdef: + ArrowSchemaView view + ArrowSchema *schema + if self.arrow_schema.n_children != 3: + return False + schema = self.arrow_schema.children[0] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_INT64 \ + or schema.name != b"num_dimensions": + return False + schema = self.arrow_schema.children[1] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_LIST or schema.name != b"indices": + return False + _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) + if view.type != NANOARROW_TYPE_UINT32: + return False + schema = self.arrow_schema.children[2] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_LIST or schema.name != b"values": + return False + _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) + self._set_child_arrow_type(view.type) + return True + + cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1: + """ + Set the child Arrow type and the corresponding element size in bytes. + """ + self.child_arrow_type = child_arrow_type + if child_arrow_type == NANOARROW_TYPE_DOUBLE: + self.child_element_size = sizeof(double) + elif child_arrow_type == NANOARROW_TYPE_FLOAT: + self.child_element_size = sizeof(float) + elif child_arrow_type == NANOARROW_TYPE_INT8: + self.child_element_size = sizeof(int8_t) + elif child_arrow_type == NANOARROW_TYPE_UINT8: + self.child_element_size = sizeof(uint8_t) + cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1: """ Sets the time unit and the corresponding factor. @@ -120,19 +196,17 @@ cdef class ArrowArrayImpl: float *as_float int8_t as_bool int64_t index + bint is_null uint8_t *ptr void* temp - ArrowBitmap *bitamp if array is None: array = self index = array.arrow_array.length - 1 - bitmap = ArrowArrayValidityBitmap(array.arrow_array) - if bitmap != NULL and bitmap.buffer.data != NULL: - as_bool = ArrowBitGet(bitmap.buffer.data, index) - if not as_bool: - self.append_null() - return 0 - if array.arrow_type in (NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP): + array._get_is_null(index, &is_null) + if is_null: + self.append_null() + elif array.arrow_type in (NANOARROW_TYPE_INT64, + NANOARROW_TYPE_TIMESTAMP): data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_int64 = data_buffer.data self.append_int64(as_int64[index]) @@ -250,6 +324,192 @@ cdef class ArrowArrayImpl: _check_nanoarrow(ArrowArrayFinishBuildingDefault(self.arrow_array, NULL)) + cdef int get_bool(self, int64_t index, bint* is_null, + bint* value) except -1: + """ + Return boolean at the specified index from the Arrow array. + """ + cdef uint8_t *ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + value[0] = ArrowBitGet(ptr, index) + + cdef int get_bytes(self, int64_t index, bint* is_null, char **ptr, + ssize_t *num_bytes) except -1: + """ + Return bytes at the specified index from the Arrow array. + """ + cdef: + int64_t start_offset, end_offset + int64_t *as_in64 + int32_t *as_int32 + char *source_ptr + self._get_is_null(index, is_null) + if not is_null[0]: + if self.arrow_type == NANOARROW_TYPE_FIXED_SIZE_BINARY: + source_ptr = self.arrow_array.buffers[1] + start_offset = index * self.fixed_size + end_offset = start_offset + self.fixed_size + elif self.arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING + ): + source_ptr = self.arrow_array.buffers[2] + as_int32 = self.arrow_array.buffers[1] + start_offset = as_int32[index] + end_offset = as_int32[index + 1] + else: + source_ptr = self.arrow_array.buffers[2] + as_int64 = self.arrow_array.buffers[1] + start_offset = as_int64[index] + end_offset = as_int64[index + 1] + ptr[0] = source_ptr + start_offset + num_bytes[0] = end_offset - start_offset + + cdef bytes get_decimal(self, int64_t index, bint* is_null): + """ + Return bytes corresponding to the decimal value. + """ + cdef: + ArrowDecimal decimal + ArrowBuffer buf + uint8_t *ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ArrowDecimalSetBytes(&decimal, ptr + index * 16) + ArrowBufferInit(&buf) + try: + _check_nanoarrow(ArrowDecimalAppendDigitsToBuffer( + &decimal, &buf + )) + return buf.data[:buf.size_bytes] + finally: + ArrowBufferReset(&buf) + + cdef int get_double(self, int64_t index, bint* is_null, + double* value) except -1: + """ + Return a double value at the specified index from the Arrow array. + """ + cdef double* ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + value[0] = ptr[index] + + cdef int get_float(self, int64_t index, bint* is_null, + float* value) except -1: + """ + Return a float value at the specified index from the Arrow array. + """ + cdef float* ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + value[0] = ptr[index] + + cdef int get_int64(self, int64_t index, bint* is_null, + int64_t* value) except -1: + """ + Return an int64_t value at the specified index from the Arrow array. + """ + cdef int64_t* ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + value[0] = ptr[index] + + cdef int get_length(self, int64_t* length) except -1: + """ + Return the number of rows in the array. + """ + length[0] = self.arrow_array.length + + cdef object get_sparse_vector(self, int64_t index, bint* is_null): + """ + Return a sparse vector value at the specified index from the Arrow + array. + """ + cdef: + int64_t num_dimensions, offset, num_elements + array.array indices, values + ArrowArray *arrow_array + uint32_t* uint32_ptr + int64_t* int64_ptr + char *source_buf + self._get_is_null(index, is_null) + if not is_null[0]: + + # get the number of dimensions from the sparse vector + int64_ptr = self.arrow_array.children[0].buffers[1] + num_dimensions = int64_ptr[index] + + # get the indices from the sparse vector + arrow_array = self.arrow_array.children[1] + self._get_list_info(index, arrow_array, &offset, &num_elements) + indices = array.clone(uint32_template, num_elements, False) + uint32_ptr = arrow_array.children[0].buffers[1] + memcpy(indices.data.as_voidptr, &uint32_ptr[offset], + num_elements * sizeof(uint32_t)) + + # get the values from the sparse vector + arrow_array = self.arrow_array.children[2] + self._get_list_info(index, arrow_array, &offset, &num_elements) + source_buf = arrow_array.children[0].buffers[1] + \ + offset * self.child_element_size + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + values = array.clone(float_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + values = array.clone(double_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + values = array.clone(int8_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + values = array.clone(uint8_template, num_elements, False) + else: + errors._raise_err(errors.ERR_UNEXPECTED_DATA, + data=self.child_arrow_type) + memcpy(values.data.as_voidptr, source_buf, + num_elements * self.child_element_size) + return (num_dimensions, indices, values) + + cdef object get_vector(self, int64_t index, bint* is_null): + """ + Return a vector value at the specified index from the Arrow array. + """ + cdef: + int64_t offset, end_offset, num_elements + ArrowBuffer *offsets_buffer + array.array result + int32_t *as_int32 + char *source_buf + self._get_is_null(index, is_null) + if not is_null[0]: + if self.arrow_type == NANOARROW_TYPE_FIXED_SIZE_LIST: + offset = index * self.fixed_size + num_elements = self.fixed_size + else: + self._get_list_info(index, self.arrow_array, &offset, + &num_elements) + source_buf = self.arrow_array.children[0].buffers[1] + \ + offset * self.child_element_size + if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + result = array.clone(float_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + result = array.clone(double_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_INT8: + result = array.clone(int8_template, num_elements, False) + elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + result = array.clone(uint8_template, num_elements, False) + else: + errors._raise_err(errors.ERR_UNEXPECTED_DATA, + data=self.child_arrow_type) + memcpy(result.data.as_voidptr, source_buf, + num_elements * self.child_element_size) + return result + @classmethod def from_arrow_array(cls, obj): """ @@ -276,46 +536,53 @@ cdef class ArrowArrayImpl: """ Populate the array from another array. """ - cdef str schema_format + cdef ArrowSchemaView schema_view ArrowSchemaMove(schema, self.arrow_schema) ArrowArrayMove(array, self.arrow_array) - schema_format = schema.format.decode() + memset(&schema_view, 0, sizeof(ArrowSchemaView)) + _check_nanoarrow( + ArrowSchemaViewInit(&schema_view, self.arrow_schema, NULL) + ) + self.arrow_type = schema_view.type self.name = schema.name.decode() - if schema_format == "u": - self.arrow_type = NANOARROW_TYPE_STRING - elif schema_format == "U": - self.arrow_type = NANOARROW_TYPE_LARGE_STRING - elif schema_format == "z": - self.arrow_type = NANOARROW_TYPE_BINARY - elif schema_format == "Z": - self.arrow_type = NANOARROW_TYPE_LARGE_BINARY - elif schema_format == "g": - self.arrow_type = NANOARROW_TYPE_DOUBLE - elif schema_format == "f": - self.arrow_type = NANOARROW_TYPE_FLOAT - elif schema_format == "l": - self.arrow_type = NANOARROW_TYPE_INT64 - elif schema_format == "tss:": - self.arrow_type = NANOARROW_TYPE_TIMESTAMP - self._set_time_unit(NANOARROW_TIME_UNIT_SECOND) - elif schema_format == "tsm:": - self.arrow_type = NANOARROW_TYPE_TIMESTAMP - self._set_time_unit(NANOARROW_TIME_UNIT_MILLI) - elif schema_format == "tsu:": - self.arrow_type = NANOARROW_TYPE_TIMESTAMP - self._set_time_unit(NANOARROW_TIME_UNIT_MICRO) - elif schema_format == "tsn:": - self.arrow_type = NANOARROW_TYPE_TIMESTAMP - self._set_time_unit(NANOARROW_TIME_UNIT_NANO) - elif schema_format.startswith("d:"): - self.arrow_type = NANOARROW_TYPE_DECIMAL128 - self.precision, self.scale = \ - [int(s) for s in schema_format[2:].split(",")] - elif schema_format == "b": - self.arrow_type = NANOARROW_TYPE_BOOL - else: + self.precision = schema_view.decimal_precision + self.scale = schema_view.decimal_scale + self.fixed_size = schema_view.fixed_size + if schema_view.type == NANOARROW_TYPE_TIMESTAMP: + self._set_time_unit(schema_view.time_unit) + elif schema_view.type in ( + NANOARROW_TYPE_FIXED_SIZE_LIST, + NANOARROW_TYPE_LIST + ): + _check_nanoarrow( + ArrowSchemaViewInit( + &schema_view, self.arrow_schema.children[0], NULL + ) + ) + self._set_child_arrow_type(schema_view.type) + elif schema_view.type not in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_BOOL, + NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING, + NANOARROW_TYPE_STRING, + ) and not ( + schema_view.type == NANOARROW_TYPE_STRUCT + and self._is_sparse_vector() + ): errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_FORMAT, - schema_format=schema_format) + schema_format=schema.format.decode()) + if self.child_arrow_type != 0 and self.child_element_size == 0: + errors._raise_err( + errors.ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT, + schema_format=schema.children[0].format.decode() + ) + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, int8_t precision, int8_t scale, @@ -327,8 +594,8 @@ cdef class ArrowArrayImpl: cdef ArrowType storage_type = arrow_type self.arrow_type = arrow_type self._set_time_unit(time_unit) + self._set_child_arrow_type(child_arrow_type) self.name = name - self.child_arrow_type = child_arrow_type if arrow_type == NANOARROW_TYPE_TIMESTAMP: storage_type = NANOARROW_TYPE_INT64 diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index 4e572b2c..1d5857b6 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -66,47 +66,60 @@ cdef extern from "nanoarrow.c": cdef struct ArrowError: pass + cdef struct ArrowSchemaView: + ArrowType type + ArrowType storage_type + int32_t fixed_size + int32_t decimal_precision + int32_t decimal_scale + ArrowTimeUnit time_unit + cdef struct ArrowStringView: const char* data int64_t size_bytes cdef ArrowErrorCode NANOARROW_OK - ArrowErrorCode ArrowArrayAllocateChildren(ArrowArray *array, + ArrowErrorCode ArrowArrayAllocateChildren(ArrowArray* arrow_array, int64_t n_children) - ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* array, + ArrowErrorCode ArrowArrayAppendBytes(ArrowArray* arrow_array, ArrowBufferView value) - ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* array, + ArrowErrorCode ArrowArrayAppendDecimal(ArrowArray* arrow_array, const ArrowDecimal* value) - ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* array, double value) - ArrowErrorCode ArrowArrayAppendInt(ArrowArray* array, int64_t value) - ArrowErrorCode ArrowArrayAppendNull(ArrowArray* array, int64_t n) - ArrowBuffer* ArrowArrayBuffer(ArrowArray* array, int64_t i) - ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* array, + ArrowErrorCode ArrowArrayAppendDouble(ArrowArray* arrow_array, + double value) + ArrowErrorCode ArrowArrayAppendInt(ArrowArray* arrow_array, int64_t value) + ArrowErrorCode ArrowArrayAppendNull(ArrowArray* arrow_array, int64_t n) + ArrowBuffer* ArrowArrayBuffer(ArrowArray* arrow_array, int64_t i) + ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* arrow_array, ArrowError* error) - ArrowErrorCode ArrowArrayFinishElement(ArrowArray *array) - ArrowErrorCode ArrowArrayInitFromSchema(ArrowArray *array, + ArrowErrorCode ArrowArrayFinishElement(ArrowArray* arrow_array) + ArrowErrorCode ArrowArrayInitFromSchema(ArrowArray* arrow_array, ArrowSchema *schema, ArrowError *error) - ArrowErrorCode ArrowArrayInitFromType(ArrowArray* array, + ArrowErrorCode ArrowArrayInitFromType(ArrowArray* arrow_array, ArrowType storage_type) void ArrowArrayMove(ArrowArray* src, ArrowArray* dst) - void ArrowArrayRelease(ArrowArray *array) - ArrowErrorCode ArrowArrayReserve(ArrowArray* array, + void ArrowArrayRelease(ArrowArray* arrow_array) + ArrowErrorCode ArrowArrayReserve(ArrowArray* arrow_array, int64_t additional_size_elements) - ArrowErrorCode ArrowArrayStartAppending(ArrowArray* array) + ArrowErrorCode ArrowArrayStartAppending(ArrowArray* arrow_array) void ArrowArrayStreamRelease(ArrowArrayStream *array_stream) - ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* array) + ArrowBitmap* ArrowArrayValidityBitmap(ArrowArray* arrow_array) ArrowErrorCode ArrowArrayViewInitFromArray(ArrowArrayView* array_view, - ArrowArray* array) + ArrowArray* arrow_array) ArrowErrorCode ArrowBasicArrayStreamInit(ArrowArrayStream* array_stream, ArrowSchema* schema, int64_t n_arrays) void ArrowBasicArrayStreamSetArray(ArrowArrayStream* array_stream, - int64_t i, ArrowArray* array) + int64_t i, ArrowArray* arrow_array) int8_t ArrowBitGet(const uint8_t* bits, int64_t i) ArrowBufferAllocator ArrowBufferDeallocator(ArrowBufferDeallocatorCallback, void *private_data) + void ArrowBufferInit(ArrowBuffer* buffer) + void ArrowBufferReset(ArrowBuffer* buffer) + ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const ArrowDecimal* decimal, + ArrowBuffer* buffer) void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, int32_t precision, int32_t scale) void ArrowDecimalSetBytes(ArrowDecimal *decimal, const uint8_t* value) @@ -134,6 +147,9 @@ cdef extern from "nanoarrow.c": int32_t decimal_scale) int64_t ArrowSchemaToString(const ArrowSchema* schema, char* out, int64_t n, char recursive) + ArrowErrorCode ArrowSchemaViewInit(ArrowSchemaView* schema_view, + const ArrowSchema* schema, + ArrowError* error) cdef int _check_nanoarrow(int code) except -1: """ diff --git a/src/oracledb/impl/base/bind_var.pyx b/src/oracledb/impl/base/bind_var.pyx index a3896255..f754cc01 100644 --- a/src/oracledb/impl/base/bind_var.pyx +++ b/src/oracledb/impl/base/bind_var.pyx @@ -32,6 +32,23 @@ @cython.freelist(20) cdef class BindVar: + cdef int _create_var_from_arrow_array(self, object conn, + BaseCursorImpl cursor_impl, + ArrowArrayImpl array) except -1: + """ + Creates a variable given an Arrow array. + """ + cdef: + BaseVarImpl var_impl + int64_t length + var_impl = cursor_impl._create_var_impl(conn) + array.get_length(&length) + var_impl.num_elements = length + var_impl._set_metadata_from_arrow_array(array) + var_impl._arrow_array = array + var_impl._finalize_init() + self.var_impl = var_impl + cdef int _create_var_from_type(self, object conn, BaseCursorImpl cursor_impl, object value) except -1: diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index dbda323c..fdd62f76 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -29,6 +29,76 @@ # form returned by the decoders to an appropriate Python value. #------------------------------------------------------------------------------ +cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, + OracleData* data, + ArrowArrayImpl arrow_array, + ssize_t array_index): + """ + Converts the value stored in Arrow format to an OracleData structure. + """ + cdef: + SparseVectorImpl sparse_impl + int seconds, useconds + ArrowType arrow_type + int64_t int64_value + OracleRawBytes* rb + tuple sparse_info + bytes temp_bytes + + arrow_type = metadata._arrow_type + if arrow_type == NANOARROW_TYPE_INT64: + arrow_array.get_int64(array_index, &data.is_null, &int64_value) + if not data.is_null: + temp_bytes = str(int64_value).encode() + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif arrow_type == NANOARROW_TYPE_DOUBLE: + arrow_array.get_double(array_index, &data.is_null, + &data.buffer.as_double) + elif arrow_type == NANOARROW_TYPE_FLOAT: + arrow_array.get_float(array_index, &data.is_null, + &data.buffer.as_float) + elif arrow_type == NANOARROW_TYPE_BOOL: + arrow_array.get_bool(array_index, &data.is_null, &data.buffer.as_bool) + elif arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING + ): + rb = &data.buffer.as_raw_bytes + arrow_array.get_bytes(array_index, &data.is_null, &rb.ptr, + &rb.num_bytes) + elif arrow_type == NANOARROW_TYPE_TIMESTAMP: + arrow_array.get_int64(array_index, &data.is_null, &int64_value) + if not data.is_null: + seconds = int64_value // arrow_array.time_factor + useconds = int64_value % arrow_array.time_factor + if arrow_array.time_factor == 1_000: + useconds *= 1_000 + elif arrow_array.time_factor == 1_000_000_000: + useconds //= 1_000 + return EPOCH_DATE + cydatetime.timedelta_new(0, seconds, useconds) + elif arrow_type == NANOARROW_TYPE_DECIMAL128: + temp_bytes = arrow_array.get_decimal(array_index, &data.is_null) + if not data.is_null: + temp_bytes = temp_bytes[:-arrow_array.scale] + b"." + \ + temp_bytes[-arrow_array.scale:] + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif arrow_type in (NANOARROW_TYPE_LIST, NANOARROW_TYPE_FIXED_SIZE_LIST): + return arrow_array.get_vector(array_index, &data.is_null) + elif arrow_type == NANOARROW_TYPE_STRUCT: + sparse_info = arrow_array.get_sparse_vector(array_index, &data.is_null) + if sparse_info is not None: + sparse_impl = SparseVectorImpl.__new__(SparseVectorImpl) + sparse_impl.num_dimensions = sparse_info[0] + sparse_impl.indices = sparse_info[1] + sparse_impl.values = sparse_info[2] + return PY_TYPE_SPARSE_VECTOR._from_impl(sparse_impl) + + cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer): """ Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTAMP @@ -209,6 +279,15 @@ cdef object convert_raw_to_python(OracleDataBuffer *buffer): return rb.ptr[:rb.num_bytes] +cdef int convert_bytes_to_oracle_data(OracleDataBuffer *buffer, + bytes value) except -1: + """ + Converts Python bytes to the format required by the OracleDataBuffer. + """ + cdef OracleRawBytes *rb = &buffer.as_raw_bytes + cpython.PyBytes_AsStringAndSize(value, &rb.ptr, &rb.num_bytes) + + cdef object convert_str_to_python(OracleDataBuffer *buffer, uint8_t csfrm, const char* encoding_errors): """ @@ -440,6 +519,43 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, output_type=to_metadata.dbtype.name) +cdef object convert_python_to_oracle_data(OracleMetadata metadata, + OracleData* data, + object value): + """ + Converts a Python value to the OracleData structure. The object returned is + any temporary object that is required to be retained (if any). + """ + cdef: + uint8_t ora_type_num = metadata.dbtype._ora_type_num + bytes temp_bytes + data.is_null = value is None + if data.is_null: + return None + elif ora_type_num in (ORA_TYPE_NUM_VARCHAR, + ORA_TYPE_NUM_CHAR, + ORA_TYPE_NUM_LONG): + if metadata.dbtype._csfrm == CS_FORM_IMPLICIT: + temp_bytes = ( value).encode() + else: + temp_bytes = ( value).encode(ENCODING_UTF16) + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif ora_type_num in (ORA_TYPE_NUM_RAW, ORA_TYPE_NUM_LONG_RAW): + convert_bytes_to_oracle_data(&data.buffer, value) + elif ora_type_num in (ORA_TYPE_NUM_NUMBER, ORA_TYPE_NUM_BINARY_INTEGER): + if isinstance(value, bool): + return b'1' if value is True else b'0' + return ( cpython.PyObject_Str(value)).encode() + elif ora_type_num == ORA_TYPE_NUM_BINARY_FLOAT: + data.buffer.as_float = value + elif ora_type_num == ORA_TYPE_NUM_BINARY_DOUBLE: + data.buffer.as_double = value + elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: + data.buffer.as_bool = value + return value + + cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, object vector) except -1: """ diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index cc827b87..5805fdae 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -422,6 +422,7 @@ cdef class BaseCursorImpl: """ Internal method for preparing a statement for execution multiple times. """ + cdef DataFrameImpl df_impl # prepare statement, if necessary if statement is None and self.statement is None: @@ -441,6 +442,12 @@ cdef class BaseCursorImpl: num_execs = len(parameters) if parameters: self.bind_many(cursor, parameters) + elif isinstance(parameters, PY_TYPE_DATAFRAME): + df_impl = parameters._impl + num_execs = self.bind_arrow_arrays(cursor, df_impl.arrays) + elif hasattr(parameters, "__arrow_c_stream__"): + df_impl = DataFrameImpl.from_arrow_stream(parameters) + num_execs = self.bind_arrow_arrays(cursor, df_impl.arrays) else: errors._raise_err(errors.ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE) @@ -482,6 +489,28 @@ cdef class BaseCursorImpl: var_arraysize=var.num_elements, required_arraysize=self.arraysize) + cdef object bind_arrow_arrays(self, object cursor, list arrays): + """ + Internal method for binding Arrow arrays. The number of elements in the + array is returned for use by the caller. + """ + cdef: + ArrowArrayImpl array + int64_t num_rows + BindVar bind_var + ssize_t i + conn = cursor.connection + array = arrays[0] + array.get_length(&num_rows) + self._reset_bind_vars(num_rows) + self.bind_vars = [] + for i, array in enumerate(arrays): + bind_var = BindVar.__new__(BindVar) + bind_var.pos = i + 1 + bind_var._create_var_from_arrow_array(conn, self, array) + self.bind_vars.append(bind_var) + return num_rows + cdef int bind_many(self, object cursor, list parameters) except -1: """ Internal method used for binding multiple rows of data. diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 765dbe29..45c647a3 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -123,6 +123,46 @@ cdef class OracleMetadata: metadata.vector_flags = self.vector_flags return metadata + @staticmethod + cdef OracleMetadata from_arrow_array(ArrowArrayImpl array): + """ + Returns a new OracleMetadata instance with attributes set from an Arrow + array. + """ + cdef OracleMetadata metadata = OracleMetadata.__new__(OracleMetadata) + metadata.name = array.name + if array.arrow_type in (NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_INT64): + metadata.dbtype = DB_TYPE_NUMBER + elif array.arrow_type == NANOARROW_TYPE_STRING: + metadata.dbtype = DB_TYPE_VARCHAR + elif array.arrow_type in (NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_FIXED_SIZE_BINARY): + metadata.dbtype = DB_TYPE_RAW + elif array.arrow_type == NANOARROW_TYPE_FLOAT: + metadata.dbtype = DB_TYPE_BINARY_FLOAT + elif array.arrow_type == NANOARROW_TYPE_DOUBLE: + metadata.dbtype = DB_TYPE_BINARY_DOUBLE + elif array.arrow_type == NANOARROW_TYPE_BOOL: + metadata.dbtype = DB_TYPE_BOOLEAN + elif array.arrow_type == NANOARROW_TYPE_TIMESTAMP: + metadata.dbtype = DB_TYPE_TIMESTAMP + elif array.arrow_type == NANOARROW_TYPE_LARGE_STRING: + metadata.dbtype = DB_TYPE_LONG + elif array.arrow_type == NANOARROW_TYPE_LARGE_BINARY: + metadata.dbtype = DB_TYPE_LONG_RAW + elif array.arrow_type in (NANOARROW_TYPE_LIST, + NANOARROW_TYPE_STRUCT, + NANOARROW_TYPE_FIXED_SIZE_LIST): + metadata.dbtype = DB_TYPE_VECTOR + else: + errors._raise_err(errors.ERR_UNEXPECTED_DATA, + data=array.arrow_type) + metadata._arrow_type = array.arrow_type + metadata.precision = array.precision + metadata.scale = array.scale + return metadata + @staticmethod cdef OracleMetadata from_type(object typ): """ diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index e3ff1716..149d907e 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -359,6 +359,13 @@ cdef class BaseVarImpl: self.metadata.buffer_size = 0 self.metadata._finalize_init() + cdef int _set_metadata_from_arrow_array(self, + ArrowArrayImpl array) except -1: + """ + Sets the type and size of the variable given an Arrow Array. + """ + self.metadata = OracleMetadata.from_arrow_array(array) + cdef int _set_metadata_from_type(self, object typ) except -1: """ Sets the type and size of the variable given a Python type. diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index d5d95ae8..39823a3b 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -94,6 +94,7 @@ cdef class ThickVarImpl(BaseVarImpl): """ Internal method that finalizes initialization of the variable. """ + cdef uint32_t i BaseVarImpl._finalize_init(self) if self.metadata.dbtype._native_num in ( DPI_NATIVE_TYPE_LOB, @@ -102,6 +103,9 @@ cdef class ThickVarImpl(BaseVarImpl): ): self._values = [None] * self.num_elements self._create_handle() + if self._arrow_array is not None: + for i in range(self.num_elements): + self._transform_element_from_arrow(i) cdef list _get_array_value(self): """ @@ -352,6 +356,63 @@ cdef class ThickVarImpl(BaseVarImpl): cpython.PyList_SET_ITEM(return_value, i, element_value) return return_value + cdef int _transform_element_from_arrow(self, uint32_t pos): + """ + Transforms a single element from an Arrow array to the value required + by ODPI-C. + """ + cdef: + dpiData *data = &self._data[pos] + dpiTimestamp *timestamp + uint32_t ora_type_num + OracleData ora_data + object value + value = convert_arrow_to_oracle_data(self.metadata, &ora_data, + self._arrow_array, pos) + data.isNull = ora_data.is_null + if not ora_data.is_null: + ora_type_num = self.metadata.dbtype.num + if ora_type_num == DPI_ORACLE_TYPE_NATIVE_DOUBLE: + data.value.asDouble = ora_data.buffer.as_double + elif ora_type_num == DPI_ORACLE_TYPE_NATIVE_FLOAT: + data.value.asFloat = ora_data.buffer.as_float + elif ora_type_num == DPI_ORACLE_TYPE_BOOLEAN: + data.value.asBoolean = ora_data.buffer.as_bool + elif ora_type_num in ( + DPI_ORACLE_TYPE_CHAR, + DPI_ORACLE_TYPE_LONG_NVARCHAR, + DPI_ORACLE_TYPE_LONG_VARCHAR, + DPI_ORACLE_TYPE_LONG_RAW, + DPI_ORACLE_TYPE_NCHAR, + DPI_ORACLE_TYPE_NUMBER, + DPI_ORACLE_TYPE_NVARCHAR, + DPI_ORACLE_TYPE_RAW, + DPI_ORACLE_TYPE_VARCHAR, + ): + if dpiVar_setFromBytes( + self._handle, + pos, + ora_data.buffer.as_raw_bytes.ptr, + ora_data.buffer.as_raw_bytes.num_bytes + ) < 0: + _raise_from_odpi() + elif ora_type_num in ( + DPI_ORACLE_TYPE_DATE, + DPI_ORACLE_TYPE_TIMESTAMP, + DPI_ORACLE_TYPE_TIMESTAMP_LTZ, + DPI_ORACLE_TYPE_TIMESTAMP_TZ, + ): + timestamp = &data.value.asTimestamp + memset(timestamp, 0, sizeof(data.value.asTimestamp)) + timestamp.year = cydatetime.PyDateTime_GET_YEAR(value) + timestamp.month = cydatetime.PyDateTime_GET_MONTH(value) + timestamp.day = cydatetime.PyDateTime_GET_DAY(value) + timestamp.hour = cydatetime.PyDateTime_DATE_GET_HOUR(value) + timestamp.minute = cydatetime.PyDateTime_DATE_GET_MINUTE(value) + timestamp.second = cydatetime.PyDateTime_DATE_GET_SECOND(value) + timestamp.fsecond = \ + cydatetime.PyDateTime_DATE_GET_MICROSECOND(value) * 1000 + cdef int _transform_element_to_arrow(self, uint32_t pos): """ Transforms a single element from the value supplied by ODPI-C to its diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index d56a4297..2ab9b57c 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -1328,16 +1328,28 @@ cdef class MessageWithData(Message): buf.write_ub4(0) # oaccolid cdef int _write_bind_params_column(self, WriteBuffer buf, - OracleMetadata metadata, - object value) except -1: + ThinVarImpl var_impl, + uint32_t offset) except -1: cdef: - uint8_t ora_type_num = metadata.dbtype._ora_type_num ThinDbObjectTypeImpl typ_impl BaseThinCursorImpl cursor_impl BaseThinLobImpl lob_impl + OracleMetadata metadata + uint8_t ora_type_num uint32_t num_bytes bytes temp_bytes - if value is None: + OracleData data + bint is_null + object value + metadata = var_impl.metadata + if var_impl._arrow_array is not None: + value = convert_arrow_to_oracle_data(metadata, &data, + var_impl._arrow_array, offset) + else: + value = convert_python_to_oracle_data(metadata, &data, + var_impl._values[offset]) + ora_type_num = metadata.dbtype._ora_type_num + if data.is_null: if ora_type_num == ORA_TYPE_NUM_BOOLEAN: buf.write_uint8(TNS_ESCAPE_CHAR) buf.write_uint8(1) @@ -1350,34 +1362,25 @@ cdef class MessageWithData(Message): buf.write_ub4(TNS_OBJ_TOP_LEVEL) # flags else: buf.write_uint8(0) - elif ora_type_num == ORA_TYPE_NUM_VARCHAR \ - or ora_type_num == ORA_TYPE_NUM_CHAR \ - or ora_type_num == ORA_TYPE_NUM_LONG: - if metadata.dbtype._csfrm == CS_FORM_IMPLICIT: - temp_bytes = ( value).encode() - else: - buf._caps._check_ncharset_id() - temp_bytes = ( value).encode(ENCODING_UTF16) - buf.write_bytes_with_length(temp_bytes) - elif ora_type_num == ORA_TYPE_NUM_RAW \ - or ora_type_num == ORA_TYPE_NUM_LONG_RAW: - buf.write_bytes_with_length(value) + elif ora_type_num in (ORA_TYPE_NUM_VARCHAR, + ORA_TYPE_NUM_CHAR, + ORA_TYPE_NUM_LONG, + ORA_TYPE_NUM_RAW, + ORA_TYPE_NUM_LONG_RAW): + buf._write_raw_bytes_and_length(data.buffer.as_raw_bytes.ptr, + data.buffer.as_raw_bytes.num_bytes) elif ora_type_num == ORA_TYPE_NUM_NUMBER \ or ora_type_num == ORA_TYPE_NUM_BINARY_INTEGER: - if isinstance(value, bool): - temp_bytes = b'1' if value is True else b'0' - else: - temp_bytes = ( cpython.PyObject_Str(value)).encode() - buf.write_oracle_number(temp_bytes) + buf.write_oracle_number(value) elif ora_type_num == ORA_TYPE_NUM_DATE \ or ora_type_num == ORA_TYPE_NUM_TIMESTAMP \ or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_TZ \ or ora_type_num == ORA_TYPE_NUM_TIMESTAMP_LTZ: buf.write_oracle_date(value, metadata.dbtype._buffer_size_factor) elif ora_type_num == ORA_TYPE_NUM_BINARY_DOUBLE: - buf.write_binary_double(value) + buf.write_binary_double(data.buffer.as_double) elif ora_type_num == ORA_TYPE_NUM_BINARY_FLOAT: - buf.write_binary_float(value) + buf.write_binary_float(data.buffer.as_float) elif ora_type_num == ORA_TYPE_NUM_CURSOR: cursor_impl = value._impl if cursor_impl is None: @@ -1392,7 +1395,7 @@ cdef class MessageWithData(Message): buf.write_ub4(cursor_impl._statement._cursor_id) cursor_impl.statement = None elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: - buf.write_bool(value) + buf.write_bool(data.buffer.as_bool) elif ora_type_num == ORA_TYPE_NUM_INTERVAL_DS: buf.write_interval_ds(value) elif ora_type_num == ORA_TYPE_NUM_INTERVAL_YM: @@ -1423,7 +1426,7 @@ cdef class MessageWithData(Message): first followed by any LONG values. """ cdef: - uint32_t num_elements, offset = self.offset + uint32_t i, num_elements, offset = self.offset bint found_long = False OracleMetadata metadata ThinVarImpl var_impl @@ -1436,15 +1439,14 @@ cdef class MessageWithData(Message): if var_impl.is_array: num_elements = var_impl.num_elements_in_array buf.write_ub4(num_elements) - for value in var_impl._values[:num_elements]: - self._write_bind_params_column(buf, metadata, value) + for i in range(num_elements): + self._write_bind_params_column(buf, var_impl, i) else: if not self.cursor_impl._statement._is_plsql \ and metadata.buffer_size > buf._caps.max_string_size: found_long = True continue - self._write_bind_params_column(buf, metadata, - var_impl._values[pos + offset]) + self._write_bind_params_column(buf, var_impl, pos + offset) if found_long: for bind_info in params: if bind_info._is_return_bind: @@ -1453,8 +1455,7 @@ cdef class MessageWithData(Message): metadata = var_impl.metadata if metadata.buffer_size <= buf._caps.max_string_size: continue - self._write_bind_params_column(buf, metadata, - var_impl._values[pos + offset]) + self._write_bind_params_column(buf, var_impl, pos + offset) cdef int postprocess(self) except -1: """ diff --git a/src/oracledb/thick_impl.pyx b/src/oracledb/thick_impl.pyx index 4c763d7d..72d52b62 100644 --- a/src/oracledb/thick_impl.pyx +++ b/src/oracledb/thick_impl.pyx @@ -66,6 +66,7 @@ from .base_impl cimport ( C_DEFAULTS, char_type, ConnectParamsImpl, + convert_arrow_to_oracle_data, convert_oracle_data_to_arrow, convert_vector_to_arrow, DbType, diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index b9c48bf7..e6d95475 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -109,8 +109,10 @@ from .base_impl cimport ( BindVar, Buffer, ConnectParamsImpl, + convert_arrow_to_oracle_data, convert_oracle_data_to_python, convert_oracle_data_to_arrow, + convert_python_to_oracle_data, convert_vector_to_arrow, convert_date_to_python, CS_FORM_IMPLICIT, diff --git a/utils/templates/connection.py b/utils/templates/connection.py index f137f3b3..f4818c96 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1544,7 +1544,7 @@ async def execute( await cursor.execute(statement, parameters) async def executemany( - self, statement: Union[str, None], parameters: Union[list, int] + self, statement: Union[str, None], parameters: Any ) -> None: """ Prepare a statement for execution against a database and then execute From 4001b93c18038377c00dca49e35370953856f544 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:50:55 -0600 Subject: [PATCH 143/239] Doc improvements. --- doc/src/api_manual/async_cursor.rst | 31 +++++++-------- doc/src/api_manual/cursor.rst | 33 ++++++++-------- doc/src/api_manual/module.rst | 44 ++++++++++++++++++---- doc/src/user_guide/appendix_b.rst | 7 ++++ doc/src/user_guide/connection_handling.rst | 43 +++++++++++++++++++-- doc/src/user_guide/tuning.rst | 18 ++++----- 6 files changed, 126 insertions(+), 50 deletions(-) diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 487b8717..64876856 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -206,25 +206,25 @@ AsyncCursor Methods .. method:: AsyncCursor.fetchall() Fetches all (remaining) rows of a query result, returning them as a list of - tuples. An empty list is returned if no more rows are available. Note that - the cursor's ``arraysize`` attribute can affect the performance of this - operation, as internally reads from the database are done in batches - corresponding to ``arraysize``. + tuples. An empty list is returned if no more rows are available. An + exception is raised if the previous call to :meth:`AsyncCursor.execute()` + did not produce any result set or no call was issued yet. - An exception is raised if the previous call to - :meth:`AsyncCursor.execute()` did not produce any result set or no call - was issued yet. + Note that the cursor's :attr:`~AsyncCursor.arraysize` attribute can affect + the performance of this operation, as internally data is fetched in batches + of that size from the database. .. method:: AsyncCursor.fetchmany(size=cursor.arraysize) Fetches the next set of rows of a query result, returning a list of tuples. An empty list is returned if no more rows are available. Note that the - cursor's arraysize attribute can affect the performance of this operation. + cursor's :attr:`~AsyncCursor.arraysize` attribute can affect the + performance of this operation. The number of rows to fetch is specified by the parameter. If it is not - given, the cursor's arraysize attribute determines the number of rows to be - fetched. If the number of rows available to be fetched is fewer than the - amount requested, fewer rows will be returned. + given, the cursor's :attr:`~AsyncCursor.arraysize` attribute determines the + number of rows to be fetched. If the number of rows available to be fetched + is fewer than the amount requested, fewer rows will be returned. An exception is raised if the previous call to :meth:`AsyncCursor.execute()` did not produce any result set or no call @@ -457,14 +457,15 @@ AsyncCursor Attributes the performance of a query since it directly affects the number of network round trips between Python and the database. For methods like :meth:`AsyncCursor.fetchone()` and :meth:`AsyncCursor.fetchall()` it - does not change how many rows are returned to the application. For - :meth:`AsyncCursor.fetchmany()` it is the default number of rows to fetch. + affects internal behavior but does not change how many rows are returned to + the application. For :meth:`AsyncCursor.fetchmany()` it is the default + number of rows to fetch. The attribute is only used for tuning row and SODA document fetches from the database. It does not affect data inserts. - Due to the performance benefits, the default ``Cursor.arraysize`` is *100* - instead of the *1* that the Python DB API recommends. + Due to the performance benefits, the default ``arraysize`` is *100* instead + of the *1* that the Python DB API recommends. See :ref:`Tuning Fetch Performance ` for more information. diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index b6efe05b..87ef779f 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -212,13 +212,14 @@ Cursor Methods .. method:: Cursor.fetchall() Fetches all (remaining) rows of a query result, returning them as a list of - tuples. An empty list is returned if no more rows are available. Note that - the cursor's arraysize attribute can affect the performance of this - operation, as internally reads from the database are done in batches - corresponding to the arraysize. + tuples. An empty list is returned if no more rows are available. An + exception is raised if the previous call to :meth:`Cursor.execute()` did + not produce any result set or no call was issued yet. - An exception is raised if the previous call to :meth:`Cursor.execute()` - did not produce any result set or no call was issued yet. + Note that the cursor's :attr:`~Cursor.arraysize` attribute can affect the + performance of this operation, as internally data is fetched in batches of + that size from the database. See :ref:`Tuning Fetch Performance + `. See :ref:`fetching` for an example. @@ -226,12 +227,13 @@ Cursor Methods Fetches the next set of rows of a query result, returning a list of tuples. An empty list is returned if no more rows are available. Note that the - cursor's arraysize attribute can affect the performance of this operation. + cursor's :attr:`~Cursor.arraysize` attribute can affect the performance of + this operation. The number of rows to fetch is specified by the parameter. If it is not - given, the cursor's ``arraysize`` attribute determines the number of rows - to be fetched. If the number of rows available to be fetched is fewer than - the amount requested, fewer rows will be returned. + given, the cursor's :attr:`~Cursor.arraysize` attribute determines the + number of rows to be fetched. If the number of rows available to be fetched + is fewer than the amount requested, fewer rows will be returned. An exception is raised if the previous call to :meth:`Cursor.execute()` did not produce any result set or no call was issued yet. @@ -480,15 +482,16 @@ Cursor Attributes from SELECT statements and REF CURSORS. The value can drastically affect the performance of a query since it directly affects the number of network round trips between Python and the database. For methods like - :meth:`Cursor.fetchone()` and :meth:`Cursor.fetchall()` it does not change - how many rows are returned to the application. For - :meth:`Cursor.fetchmany()` it is the default number of rows to fetch. + :meth:`Cursor.fetchone()` and :meth:`Cursor.fetchall()` it affects internal + behavior but does not change how many rows are returned to the + application. For :meth:`Cursor.fetchmany()` it is the default number of + rows to fetch. The attribute is only used for tuning row and SODA document fetches from the database. It does not affect data inserts. - Due to the performance benefits, the default ``Cursor.arraysize`` is *100* - instead of the *1* that the Python DB API recommends. + Due to the performance benefits, the default ``arraysize`` is *100* instead + of the *1* that the Python DB API recommends. See :ref:`Tuning Fetch Performance ` for more information. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 3496c843..52a80bdb 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -412,8 +412,15 @@ Oracledb Methods The ``pool_name`` parameter is expected to be a string which specifies the name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. + or later. This parameter can be used in both python-oracledb Thin and Thick + modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` + value in effect is *True*, it can only be used if the ``dsn`` parameter is + not specified. For Thick mode, you may prefer to set the Oracle Net + Services parameter `POOL_NAME `__ parameter in the + :ref:`easy connect string ` or + :ref:`connect descriptor `. See + :ref:`DRCP Pool Names `. If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* and is only of use when embedding Python in an application (like @@ -771,8 +778,15 @@ Oracledb Methods The ``pool_name`` parameter is expected to be a string which specifies the name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. + or later. This parameter can be used in both python-oracledb Thin and Thick + modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` + value in effect is *True*, it can only be used if the ``dsn`` parameter is + not specified. For Thick mode, you may prefer to set the Oracle Net + Services parameter `POOL_NAME `__ parameter in the + :ref:`easy connect string ` or + :ref:`connect descriptor `. See + :ref:`DRCP Pool Names `. The ``thick_mode_dsn_passthrough`` and ``handle`` parameters are ignored in python-oracledb Thin mode. @@ -1663,8 +1677,15 @@ Oracledb Methods The ``pool_name`` parameter is expected to be a string which specifies the name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. + or later. This parameter can be used in both python-oracledb Thin and Thick + modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` + value in effect is *True*, it can only be used if the ``dsn`` parameter is + not specified. For Thick mode, you may prefer to set the Oracle Net + Services parameter `POOL_NAME `__ parameter in the + :ref:`easy connect string ` or + :ref:`connect descriptor `. See + :ref:`DRCP Pool Names `. If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* and is only of use when embedding Python in an application (like @@ -2085,8 +2106,15 @@ Oracledb Methods The ``pool_name`` parameter is expected to be a string which specifies the name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. + or later. This parameter can be used in both python-oracledb Thin and Thick + modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` + value in effect is *True*, it can only be used if the ``dsn`` parameter is + not specified. For Thick mode, you may prefer to set the Oracle Net + Services parameter `POOL_NAME `__ parameter in the + :ref:`easy connect string ` or + :ref:`connect descriptor `. See + :ref:`DRCP Pool Names `. The ``handle`` and ``thick_mode_dsn_passthrough`` parameters are ignored in python-oracledb Thin mode. diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index 68242bdc..191e0440 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -188,6 +188,13 @@ differs from the python-oracledb Thick mode in the following ways: * In python-oracledb Thin mode, the connection pool supports all the :ref:`connection mode privileges `. +* In python-oracledb Thick mode, when the ``thick_mode_dsn_passthrough`` value + in effect is *True*, the ``pool_name`` parameter can be used to specify a + DRCP pool name only if the ``dsn`` parameter is not set. If both of these + parameters are specified, then the ``pool_name`` parameter is ignored. In + python-oracledb Thin mode, both of these parameters can be set and the value + defined in the ``pool_name`` parameter will be used as the DRCP pool name. + Supported Database Data Types in Thin and Thick Modes ===================================================== diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 133c6742..016e7e6c 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -3115,9 +3115,9 @@ The overheads can impact ultimate scalability. **DRCP Pool Names** From Oracle Database 23ai, multiple DRCP pools can be created by setting a pool -name at DRCP pool creation time. Applications can then specifiy which DRCP pool -to use by passing the ``pool_name`` parameter during connection, or connection -pool, creation, for example: +name at DRCP pool creation time. Applications using python-oracledb Thin mode +can specify which DRCP pool to use by passing the ``pool_name`` parameter +during connection or connection pool creation, for example: .. code-block:: python @@ -3129,6 +3129,43 @@ pool, creation, for example: When specifying a pool name, you should still set a connection class name to allow efficient use of the pool's resources. +If you are using python-oracledb Thick mode and the +``thick_mode_dsn_passthrough`` value in effect is *True*, you can use the +``pool_name`` parameter only if the ``dsn`` parameter is not specified when +creating a standalone or pooled connection, for example: + +.. code-block:: python + + oracledb.init_oracle_client() + + pool = oracledb.create_pool(user="hr", password=userpwd, + host="localhost", service_name="orclpdb", + server_type="pooled", min=2, max=5, + increment=1, cclass="MYAPP", + pool_name="MYPOOL") + +If both the ``pool_name`` and ``dsn`` parameters are set when using Thick mode, +the ``pool_name`` parameter is ignored. + +For Thick mode, you may prefer to set the Oracle Net +Services parameter `POOL_NAME `__ parameter in the +:ref:`easy connect string ` or +:ref:`connect descriptor `, for example: + +.. code-block:: python + + oracledb.init_oracle_client() + + pool = oracledb.create_pool(user="hr", password=userpwd, + dsn="dbhost.example.com/orclpdb:pooled?pool_name=mypool", + min=2, max=5, increment=1, + cclass="MYAPP") + +You can also define the DRCP pool name with the +:ref:`ConnectParams class ` when using python-oracledb Thin or Thick +mode. See :ref:`usingconnparams`. + **Acquiring a DRCP Connection** Once DRCP has been enabled and the driver connection pool has been created with diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index 70d36ed1..de5c3e49 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -79,10 +79,15 @@ Some general tuning tips are: Tuning Fetch Performance ======================== -To tune queries, you can adjust python-oracledb's internal buffer sizes to -improve the speed of fetching rows across the network from the database, and to -optimize memory usage. This can reduce :ref:`round-trips ` which -helps performance and scalability. Tune "array fetching" with +To improve application performance and scalability you can adjust the sizes of +python-oracledb's internal query result buffers. Increasing the buffers can +reduce :ref:`round-trips ` to improve the overall speed of fetching +rows across the network from the database. The buffer sizes can be used to tune +the behavior of all python-oracledb :ref:`row fetching methods ` but +do not affect how many rows are returned to your application by those methods. +You should tune the buffers for optimal performance and memory usage. + +Tune "array fetching" with :attr:`Cursor.arraysize` and tune "row prefetching" with :attr:`Cursor.prefetchrows`. Set these before calling :meth:`Cursor.execute()`. The value used for prefetching can also be set in an @@ -92,11 +97,6 @@ separate, so increasing both settings will require more Python process memory. Queries that return LOBs and similar types will never prefetch rows, so the ``prefetchrows`` value is ignored in those cases. -The internal buffer sizes do not affect how or when rows are returned to your -application regardless of which :ref:`python-oracledb method ` is -used to fetch query results. They do not affect the minimum or maximum number -of rows returned by a query. - The difference between row prefetching and array fetching is when the internal buffering occurs. Internally python-oracledb performs separate "execute SQL statement" and "fetch data" steps. Prefetching allows query results to be From d014f86d77d7a88cb63719a4752e74d5c5e8f820 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:51:39 -0600 Subject: [PATCH 144/239] Simplify extended suite test conditions. --- tests/ext/test_env.py | 24 +++++++++++++++++++- tests/ext/test_ext_1000_pool_shrink.py | 14 ++++-------- tests/ext/test_ext_1100_external_auth.py | 13 +++-------- tests/ext/test_ext_1500_pool_grow.py | 8 ++----- tests/ext/test_ext_1900_pool_shrink_async.py | 10 ++------ tests/ext/test_ext_2000_pool_grow_async.py | 12 +++------- tests/ext/test_ext_2100_bfile_type.py | 8 ++----- tests/ext/test_ext_2200_bfile_type_async.py | 12 +++------- tests/ext/test_ext_2500_config_cache.py | 6 +---- 9 files changed, 44 insertions(+), 63 deletions(-) diff --git a/tests/ext/test_env.py b/tests/ext/test_env.py index b2fa1129..a5c4b9fd 100644 --- a/tests/ext/test_env.py +++ b/tests/ext/test_env.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,6 +29,7 @@ import configparser import os +import unittest dir_name = os.path.dirname(os.path.dirname(__file__)) file_name = os.path.join(dir_name, os.path.basename(__file__)) @@ -73,3 +74,24 @@ def get_extended_config_str(name, fallback=None): return _extended_config.parser.get( _extended_config.section_name, name, fallback=fallback ) + + +def skip_unless_has_orapki(): + return unittest.skipUnless( + get_extended_config_bool("has_orapki"), + "extended configuration has_orapki is disabled", + ) + + +def skip_unless_local_database(): + return unittest.skipUnless( + get_extended_config_bool("local_database"), + "extended configuration local_database is disabled", + ) + + +def skip_unless_run_long_tests(): + return unittest.skipUnless( + get_extended_config_bool("run_long_tests"), + "extended configuration run_long_tests is disabled", + ) diff --git a/tests/ext/test_ext_1000_pool_shrink.py b/tests/ext/test_ext_1000_pool_shrink.py index 710d923b..58575b08 100644 --- a/tests/ext/test_ext_1000_pool_shrink.py +++ b/tests/ext/test_ext_1000_pool_shrink.py @@ -28,15 +28,11 @@ """ import time -import unittest import test_env -@unittest.skipUnless( - test_env.get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", -) +@test_env.skip_unless_run_long_tests() class TestCase(test_env.BaseTestCase): def test_ext_1000(self): "E1000 - test pool timeout with simple acquire after waiting" @@ -62,7 +58,7 @@ def test_ext_1001(self): conn = pool.acquire() self.assertEqual(pool.opened, 3) - @unittest.skipUnless(test_env.get_is_thin(), "doesn't occur in thick mode") + @test_env.skip_unless_thin_mode() def test_ext_1002(self): "E1002 - test pool timeout shrinks to min on pool inactivity" pool = test_env.get_pool(min=3, max=10, increment=2, timeout=4) @@ -73,7 +69,7 @@ def test_ext_1002(self): time.sleep(6) self.assertEqual(pool.opened, 3) - @unittest.skipUnless(test_env.get_is_thin(), "doesn't occur in thick mode") + @test_env.skip_unless_thin_mode() def test_ext_1003(self): "E1003 - test pool timeout eliminates extra connections on inactivity" pool = test_env.get_pool(min=4, max=10, increment=4, timeout=3) @@ -85,7 +81,7 @@ def test_ext_1003(self): self.assertEqual(pool.opened, 5) del conns - @unittest.skipUnless(test_env.get_is_thin(), "doesn't occur in thick mode") + @test_env.skip_unless_thin_mode() def test_ext_1004(self): "E1004 - test pool max_lifetime_session on release" pool = test_env.get_pool( @@ -101,7 +97,7 @@ def test_ext_1004(self): time.sleep(2) self.assertEqual(pool.opened, 4) - @unittest.skipUnless(test_env.get_is_thin(), "doesn't occur in thick mode") + @test_env.skip_unless_thin_mode() def test_ext_1005(self): "E1005 - test pool max_lifetime_session on acquire" pool = test_env.get_pool( diff --git a/tests/ext/test_ext_1100_external_auth.py b/tests/ext/test_ext_1100_external_auth.py index 7ab7e890..1af4e9c1 100644 --- a/tests/ext/test_ext_1100_external_auth.py +++ b/tests/ext/test_ext_1100_external_auth.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,20 +33,13 @@ import os import subprocess import tempfile -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_extended_config_bool("has_orapki"), - "extended configuration has_orapki is disabled", -) -@unittest.skipIf( - test_env.get_is_thin(), - "thin mode doesn't support external authentication yet", -) +@test_env.skip_unless_thick_mode() +@test_env.skip_unless_has_orapki() class TestCase(test_env.BaseTestCase): alias_name = "ext_test_1100" user = "ext_test_1100_user" diff --git a/tests/ext/test_ext_1500_pool_grow.py b/tests/ext/test_ext_1500_pool_grow.py index c7185f59..4f9b2dd6 100644 --- a/tests/ext/test_ext_1500_pool_grow.py +++ b/tests/ext/test_ext_1500_pool_grow.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,15 +29,11 @@ """ import time -import unittest import test_env -@unittest.skipUnless( - test_env.get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", -) +@test_env.skip_unless_run_long_tests() class TestCase(test_env.BaseTestCase): def test_ext_1500(self): "E1500 - test static pool grows back to the min after sessions killed" diff --git a/tests/ext/test_ext_1900_pool_shrink_async.py b/tests/ext/test_ext_1900_pool_shrink_async.py index 1cf2cfcd..7bfc38cd 100644 --- a/tests/ext/test_ext_1900_pool_shrink_async.py +++ b/tests/ext/test_ext_1900_pool_shrink_async.py @@ -29,18 +29,12 @@ """ import asyncio -import unittest import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) -@unittest.skipUnless( - test_env.get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", -) +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_run_long_tests() class TestCase(test_env.BaseAsyncTestCase): requires_connection = False diff --git a/tests/ext/test_ext_2000_pool_grow_async.py b/tests/ext/test_ext_2000_pool_grow_async.py index 93aa25b2..5addc777 100644 --- a/tests/ext/test_ext_2000_pool_grow_async.py +++ b/tests/ext/test_ext_2000_pool_grow_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -36,18 +36,12 @@ # ----------------------------------------------------------------------------- import asyncio -import unittest import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) -@unittest.skipUnless( - test_env.get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", -) +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_run_long_tests() class TestCase(test_env.BaseAsyncTestCase): requires_connection = False diff --git a/tests/ext/test_ext_2100_bfile_type.py b/tests/ext/test_ext_2100_bfile_type.py index ad9c0ec4..4c7f4acc 100644 --- a/tests/ext/test_ext_2100_bfile_type.py +++ b/tests/ext/test_ext_2100_bfile_type.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,16 +29,12 @@ import os import tempfile -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_extended_config_bool("local_database"), - "extended configuration local_database is disabled", -) +@test_env.skip_unless_local_database() class TestCase(test_env.BaseTestCase): dir_name = "EXT_TEST_2100_DIR" diff --git a/tests/ext/test_ext_2200_bfile_type_async.py b/tests/ext/test_ext_2200_bfile_type_async.py index b8543396..9d793d46 100644 --- a/tests/ext/test_ext_2200_bfile_type_async.py +++ b/tests/ext/test_ext_2200_bfile_type_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,19 +29,13 @@ import os import tempfile -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) -@unittest.skipUnless( - test_env.get_extended_config_bool("local_database"), - "extended configuration local_database is disabled", -) +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_local_database() class TestCase(test_env.BaseAsyncTestCase): dir_name = "EXT_TEST_2200_DIR" diff --git a/tests/ext/test_ext_2500_config_cache.py b/tests/ext/test_ext_2500_config_cache.py index c5da8c38..229545ab 100644 --- a/tests/ext/test_ext_2500_config_cache.py +++ b/tests/ext/test_ext_2500_config_cache.py @@ -29,16 +29,12 @@ """ import time -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", -) +@test_env.skip_unless_run_long_tests() class TestCase(test_env.BaseTestCase): def test_ext_2500(self): "E2500 - test config is cached" From f8341a13e8677224a869d9e049b8000abe09260b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:52:35 -0600 Subject: [PATCH 145/239] Added support for sessionless transactions available in Oracle Database 23ai. --- doc/src/api_manual/async_connection.rst | 88 +++ doc/src/api_manual/async_cursor.rst | 23 +- doc/src/api_manual/connection.rst | 88 +++ doc/src/api_manual/cursor.rst | 20 +- doc/src/release_notes.rst | 2 + doc/src/user_guide/appendix_a.rst | 4 + doc/src/user_guide/asyncio.rst | 34 + doc/src/user_guide/txn_management.rst | 308 ++++++++ src/oracledb/__init__.py | 10 +- src/oracledb/base_impl.pxd | 8 + src/oracledb/connection.py | 221 +++++- src/oracledb/constants.py | 8 +- src/oracledb/cursor.py | 24 + src/oracledb/errors.py | 19 + src/oracledb/impl/thick/connection.pyx | 45 ++ src/oracledb/impl/thick/cursor.pyx | 4 + src/oracledb/impl/thick/odpi.pxd | 15 + src/oracledb/impl/thin/capabilities.pyx | 3 +- src/oracledb/impl/thin/connection.pyx | 149 ++++ src/oracledb/impl/thin/constants.pxi | 15 + src/oracledb/impl/thin/messages/base.pyx | 54 ++ src/oracledb/impl/thin/messages/execute.pyx | 27 + src/oracledb/thin_impl.pyx | 3 + src/oracledb/utils.py | 30 + .../test_ext_2600_sessionless_transaction.py | 93 +++ ..._ext_2700_sessionless_transaction_async.py | 90 +++ tests/test_8700_sessionless_transaction.py | 658 +++++++++++++++++ ...test_8800_sessionless_transaction_async.py | 683 ++++++++++++++++++ tests/test_env.py | 7 + utils/templates/connection.py | 221 +++++- 30 files changed, 2883 insertions(+), 71 deletions(-) create mode 100644 tests/ext/test_ext_2600_sessionless_transaction.py create mode 100644 tests/ext/test_ext_2700_sessionless_transaction_async.py create mode 100644 tests/test_8700_sessionless_transaction.py create mode 100644 tests/test_8800_sessionless_transaction_async.py diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 8dd20964..5fd6f486 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -41,6 +41,38 @@ AsyncConnection Methods The exit point for the asynchronous connection as a context manager. This will close the connection and roll back any uncommitted transaction. +.. method:: AsyncConnection.begin_sessionless_transaction(transaction_id=None, \ + timeout=60, defer_round_trip=False) + + Begins a new sessionless transaction using the specified transaction + identifier. This method returns the transaction identifier specified by the + user or generated by python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value is + not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) `__ value when + ``AsyncConnection.begin_sessionless_transaction()`` is called. An example + is "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot + exceed 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this transaction + can be resumed by a connection the next time that it is suspended. The + default value is *60* seconds. If a transaction is not resumed within this + specified duration, the transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with the + next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: AsyncConnection.callfunc(name, return_type, parameters=None, \ keyword_parameters=None) @@ -292,6 +324,48 @@ AsyncConnection Methods .. versionadded:: 3.1.0 +.. method:: AsyncConnection.resume_sessionless_transaction(transaction_id, \ + timeout=60, defer_round_trip=False) + + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using it. + This timeout is only effective when the transaction is in use by another + connection. In this case, the current connection waits for the transaction + to be suspended within this timeout period. When ``defer_round_trip`` is + set to *False*, the wait happens in the + ``resume_sessionless_transaction()`` call itself, and the function blocks + until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and the + wait occurs at the time of the next database operation instead. At the + start of the wait period, if the transaction is not in use by any other + connection, the resume happens immediately. If the transaction remains in + use by the other connection after the timeout period, the error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. These + error messages are only thrown for non-RAC instances. For information on + using Oracle RAC, see + :ref:`Sessionless Transactions with Oracle RAC `. + The default value is *60* seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with the + next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: AsyncConnection.rollback() Rolls back any pending transaction. @@ -324,6 +398,20 @@ AsyncConnection Methods .. versionadded:: 2.4.0 +.. method:: AsyncConnection.suspend_sessionless_transaction() + + Suspends the currently active sessionless transaction immediately. + + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. Also, the timeout value defined in + :meth:`AsyncConnection.begin_sessionless_transaction()` comes into effect + and determines how long the transaction can stay suspended. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: AsyncConnection.tpc_begin(xid, flags, timeout) Begins a Two-Phase Commit (TPC) on a global transaction using the specified diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 64876856..d0e345de 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -119,7 +119,8 @@ AsyncCursor Methods `__ ``with`` block. -.. method:: AsyncCursor.execute(statement, parameters=None, ** keyword_parameters) +.. method:: AsyncCursor.execute(statement, parameters=None, \ + suspend_on_success=False, ** keyword_parameters) Executes a statement against the database. See :ref:`sqlexecution`. @@ -144,6 +145,11 @@ AsyncCursor Methods that are not passed in during subsequent executions will retain the value passed in during the last execution that contained them. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active sessionless + transaction will be suspended when ``execute()`` completes successfully. + See :ref:`suspendtxns`. + For maximum efficiency when reusing a statement, it is best to use the :meth:`AsyncCursor.setinputsizes()` method to specify the parameter types and sizes ahead of time; in particular, *None* is assumed to be a string of @@ -154,8 +160,12 @@ AsyncCursor Methods caller (so it can be used directly as an iterator over the rows in the cursor); otherwise, *None* is returned. + .. versionchanged:: 3.3.0 + + The ``suspend_on_success`` parameter was added. + .. method:: AsyncCursor.executemany(statement, parameters, batcherrors=False, \ - arraydmlrowcounts=False) + arraydmlrowcounts=False, suspend_on_success=False) Executes a SQL statement once using all bind value mappings or sequences found in the sequence parameters. This can be used to insert, update, or @@ -193,6 +203,11 @@ AsyncCursor Methods can only be True when executing an insert, update, delete, or merge statement. In all other cases, an error will be raised. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active sessionless + transaction will be suspended when ``executemany()`` completes + successfully. See :ref:`suspendtxns`. + For maximum efficiency, it is best to use the :meth:`AsyncCursor.setinputsizes()` method to specify the parameter types and sizes ahead of time. In particular, the value *None* is assumed to be a @@ -203,6 +218,10 @@ AsyncCursor Methods Added support for passing data frames in the ``parameters`` parameter. + .. versionadded:: 3.3.0 + + The ``suspend_on_success`` parameter was added. + .. method:: AsyncCursor.fetchall() Fetches all (remaining) rows of a query result, returning them as a list of diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 33e4ca7f..debf85a3 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -46,6 +46,38 @@ Connection Methods .. dbapimethodextension:: +.. method:: Connection.begin_sessionless_transaction(transaction_id=None, \ + timeout=60, defer_round_trip=False) + + Begins a new sessionless transaction using the specified transaction + identifier. This method returns the transaction identifier specified by the + user or generated by python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value is + not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) `__ value when + ``Connection.begin_sessionless_transaction()`` is called. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot exceed + 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this transaction + can be resumed by a connection the next time that it is suspended. The + default value is *60* seconds. If a transaction is not resumed within this + specified duration, the transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with the + next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: Connection.cancel() Breaks a long-running statement. @@ -277,6 +309,48 @@ Connection Methods .. dbapimethodextension:: +.. method:: Connection.resume_sessionless_transaction(transaction_id, \ + timeout=60, defer_round_trip=False) + + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using it. + This timeout is only effective when the transaction is in use by another + connection. In this case, the current connection waits for the transaction + to be suspended within this timeout period. When ``defer_round_trip`` is + set to *False*, the wait happens in the + ``resume_sessionless_transaction()`` call itself, and the function blocks + until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and the + wait occurs at the time of the next database operation instead. At the + start of the wait period, if the transaction is not in use by any other + connection, the resume happens immediately. If the transaction remains in + use by the other connection after the timeout period, the error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. These + error messages are only thrown for non-RAC instances. For information on + using Oracle RAC, see + :ref:`Sessionless Transactions with Oracle RAC `. + The default value is *60* seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with the + next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: Connection.rollback() Rolls back any pending transactions. @@ -419,6 +493,20 @@ Connection Methods explicitly closed using the function :meth:`~Connection.close()`, the subscription will not be deregistered in the database. +.. method:: Connection.suspend_sessionless_transaction() + + Suspends the currently active sessionless transaction immediately. + + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. Also, the timeout value defined in + :meth:`Connection.begin_sessionless_transaction()` comes into effect and + determines how long the transaction can stay suspended. + + See :ref:`sessionlesstxns`. + + .. versionadded:: 3.3.0 + .. method:: Connection.tpc_begin(xid, flags, timeout) Begins a Two-Phase Commit (TPC) on a global transaction using the specified diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index 87ef779f..edd15dec 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -121,7 +121,8 @@ Cursor Methods will be unusable from this point forward; an Error exception will be raised if any operation is attempted with the cursor. -.. method:: Cursor.execute(statement, parameters=[], ** keyword_parameters) +.. method:: Cursor.execute(statement, parameters=[], suspend_on_success=False, \ + ** keyword_parameters) Executes a statement against the database. See :ref:`sqlexecution`. @@ -146,6 +147,11 @@ Cursor Methods that are not passed in during subsequent executions will retain the value passed in during the last execution that contained them. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active sessionless + transaction will be suspended when ``execute()`` completes successfully. + See :ref:`suspendtxns`. + For maximum efficiency when reusing a statement, it is best to use the :meth:`Cursor.setinputsizes()` method to specify the parameter types and sizes ahead of time; in particular, *None* is assumed to be a string of @@ -156,12 +162,16 @@ Cursor Methods caller (so it can be used directly as an iterator over the rows in the cursor); otherwise, *None* is returned. + .. versionchanged:: 3.3.0 + + The ``suspend_on_success`` parameter was added. + .. note:: The DB API definition does not define the return value of this method. .. method:: Cursor.executemany(statement, parameters, batcherrors=False, \ - arraydmlrowcounts=False) + arraydmlrowcounts=False, suspend_on_success=False) Executes a SQL statement once using all bind value mappings or sequences found in the sequence parameters. This can be used to insert, update, or @@ -199,6 +209,11 @@ Cursor Methods can only be *True* when executing an insert, update, delete, or merge statement; in all other cases an error will be raised. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active sessionless + transaction will be suspended when ``executemany()`` completes + successfully. See :ref:`suspendtxns`. + For maximum efficiency, it is best to use the :meth:`Cursor.setinputsizes()` method to specify the bind value types and sizes. In particular, if the type is not explicitly specified, the value @@ -208,6 +223,7 @@ Cursor Methods .. versionchanged:: 3.3.0 Added support for passing data frames in the ``parameters`` parameter. + The ``suspend_on_success`` parameter was added. .. method:: Cursor.fetchall() diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e6c2ec20..613c5356 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -40,6 +40,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added support for Oracle Database 23ai :ref:`Sessionless Transactions + `. #) Changes to :ref:`data frame ` support: - Added support for binding data frames to :meth:`Cursor.executemany()` and diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 350987ae..2867fad2 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -319,6 +319,10 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - Yes - Yes - Yes + * - Oracle Database 23ai Sessionless Transactions (see :ref:`sessionlesstxns`) + - Yes + - Yes + - No * - Two-phase Commit (TPC) (see :ref:`tpc`) - Yes - Yes diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index c808aff8..7ce0c212 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -311,6 +311,40 @@ is recommended to use autocommit mode only for the last DML statement in the sequence of operations. Unnecessarily committing causes extra database load, and can destroy transactional consistency. +.. _sessionlesstxnasync: + +Managing Sessionless Transactions Using Asynchronous Methods +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +When :meth:`AsyncConnection.begin_sessionless_transaction()` is executed using +a user-chosen or python-oracledb generated transaction identifier, a +sessionless transaction is started. Once started, all the SQL statements are +executed as a part of that sessionless transaction. Use +:meth:`AsyncConnection.suspend_sessionless_transaction()` to explicitly +suspend an active transaction once the database operations have been executed. +This releases the connection which can be used by another user while the +transaction remains open and can be resumed later by a connection using +:meth:`AsyncConnection.resume_sessionless_transaction()`. The methods +:meth:`AsyncConnection.commit()` and :meth:`AsyncConnection.rollback()` can be +used to explicitly commit or roll back a transaction. For example: + +.. code-block:: python + + async def main(): + txn_id = b"new_sessionless_txn" + # Begin and suspend a sessionless transaction in a connection + async with oracledb.connect_async(user="hr", password=userpwd, + dsn="localhost/orclpdb") as connection1: + await connection1.begin_sessionless_transaction(transaction_id=txn_id, timeout=120) + await connection1.execute("INSERT INTO mytab (name) VALUES ('John')") + await connection1.suspend_sessionless_transaction() + + # Resume the sessionless transaction in another connection + async with oracledb.connect_async(user="hr", password=userpwd, + dsn="localhost/orclpdb") as connection2: + await connection2.resume_sessionless_transaction(transaction_id=txn_id) + await connection2.commit() + .. _pipelining: Pipelining Database Operations diff --git a/doc/src/user_guide/txn_management.rst b/doc/src/user_guide/txn_management.rst index 84e035f9..a63ddc66 100644 --- a/doc/src/user_guide/txn_management.rst +++ b/doc/src/user_guide/txn_management.rst @@ -30,6 +30,10 @@ When `Data Definition Language (DDL) `__ statements such as CREATE are executed, Oracle Database will always perform a commit. +This chapter covers python-oracledb's synchronous programming model. For +information on managing transactions in asynchronous connections, see +:ref:`txnasync`. + Autocommitting ============== @@ -89,7 +93,311 @@ Note that in order to make use of global (distributed) transactions, the attributes :attr:`Connection.internal_name` and :attr:`Connection.external_name` attributes must be set. +.. _distributedtxns: + Distributed Transactions ======================== For information on distributed transactions, see the chapter :ref:`tpc`. + +.. _sessionlesstxns: + +Sessionless Transactions +======================== + +A Sessionless Transaction is a transaction that can be suspended and resumed +during its lifecycle. It breaks the coupling between transactions and +connections, that is, a transaction is no longer tied to a specific +connection. This enables connections to be released for use by other users +while a transaction remains open and can be resumed later. With Sessionless +Transactions, you do not need to use a transaction manager since Oracle +Database manages coordination of transactions. + +Sessionless Transactions are supported in both python-oracledb Thin and +:ref:`Thick ` modes. Oracle Database 23.6 (or later) is +required. For python-oracledb Thick mode, Oracle Client 23.6 (or later) is +additionally required. + +Each sessionless transaction is identified by a unique transaction identifier. +This can either be user-chosen or generated by python-oracledb. + +Sessionless Transactions are ideal for interactive applications with user +"think time". If one user starts a database transaction and then does not +perform database operations for some time (that is the "think time"), the +transaction can be suspended and the database connection can be released +and used by another user. When the first user is ready to continue work, a +database connection can be obtained and their transaction resumed. Without +Sessionless Transactions, both users would need their own connections for the +entire duration of their interaction with the system, including during any +think time. + +With python-oracledb, you can: + +- Start a sessionless transaction on a database connection by specifying a + unique transaction identifier +- Perform database operations in the transaction +- Suspend the transaction from the connection after the database operations + are completed +- Resume the transaction on the same connection or a different connection + using the same transaction identifier +- Commit or roll back the transaction on the same connection or on a different + connection if the transaction has been suspended by the previous connection + +.. _sessionlesstxnswithrac: + +You can use Sessionless Transactions on all Oracle Databases including with +`Oracle Real Application Clusters (RAC) `__. For RAC +databases, you can start and suspend a sessionless transaction on one RAC +database instance and resume it on another RAC database instance. To commit or +rollback a sessionless transaction, it must be active on only one of the RAC +instances. If multiple RAC instances have this sessionless transaction active, +the database server waits for the `DISTRIBUTED_LOCK_TIMEOUT +`__ time to allow other instances to suspend this +transaction before proceeding with a commit or rollback. + +Note that there are some constraints when using Sessionless Transactions. +You cannot rollback to a savepoint of the sessionless transaction in a +previous connection. Sessionless Transactions cannot be promoted to +:ref:`distributedtxns`. Session states such as all parameters set by +ALTER SESSION, temporary LOB states, and PL/SQL states are not carried over to +the new connection. For more information on other constraints, see +`Restrictions for Sessionless Transactions `__. + +For more information on Sessionless Transactions, see `Developing Applications +with Sessionless Transactions `__ in the Oracle +Database Development Guide. + +.. _starttxns: + +Starting Sessionless Transactions +--------------------------------- + +To start a sessionless transaction, use +:meth:`Connection.begin_sessionless_transaction()`, for example: + +.. code-block:: python + + txn_id = b"new_sessionless_txn" + connection.begin_sessionless_transaction(transaction_id=txn_id, timeout=120, + defer_round_trip=False) + +You can pass the following parameters to +:meth:`Connection.begin_sessionless_transaction()`: + +- ``transaction_id``: This parameter is the unique identifier of the + transaction which is used to manage the transaction from start to end. If you + do not specify the ``transaction_id`` value, a unique `universally-unique + identifier (UUID) `__ is + generated and returned by + :meth:`~Connection.begin_sessionless_transaction`. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". + +- ``timeout``: This parameter determines the duration that this transaction + can be resumed by a connection the next time that it is suspended. The + default value is *60* seconds. If the transaction is not resumed within + the specified duration, the transaction will be rolled back. + +- ``defer_round_trip``: This parameter determines whether the request to start + a sessionless transaction should be sent immediately or with the next + database operation. The default value is *False*, that is, the request is + sent immediately. When set to *True*, the request is sent with the next + database operation on the connection which reduces the number of + :ref:`round-trips ` to the database. + +Once a transaction has been started, all SQL statements are executed as a part +of it. + +A sessionless transaction is active from the time it is newly started or +resumed to the time it is suspended, committed, or rolled back. + +.. _suspendtxns: + +Suspending Sessionless Transactions +----------------------------------- + +After you execute database operations, an active sessionless transaction can +be explicitly suspended, or optionally can be automatically suspended on the +next database operation if an execute operation completes successfully. This +detaches the transaction from the current connection. + +**Explicitly Suspending Transactions** + +To explicitly suspend an active transaction, use +:meth:`Connection.suspend_sessionless_transaction()`: + +.. code-block:: python + + connection.suspend_sessionless_transaction() + +This suspends the active transaction. This transaction is no longer tied to +the connection. + +**Suspending a Transaction After a Database Operation** + +To automatically suspend an active transaction after the next database +operation, set the ``suspend_on_success`` parameter to *True* in +:meth:`Cursor.execute()` or :meth:`Cursor.executemany()`. This setting +suspends the transaction if the executed statement or PL/SQL block completes +successfully. This helps reduce the number of +:ref:`round-trips ` to the database which in turn improves +performance. For example: + +.. code-block:: python + + # Suspend after execute + cursor.execute( + "insert into slt_table (name) values ('John')", + suspend_on_success=True + ) + +Once the transaction is suspended, further database operations are not part of +that transaction until it is resumed. + +If the execute operation throws an exception, then the transaction will not be +suspended. + +If there are no active Sessionless Transactions, this parameter is ignored. + +.. _resumetxns: + +Resuming Sessionless Transactions +--------------------------------- + +To resume a suspended sessionless transaction, use +:meth:`Connection.resume_sessionless_transaction()`, for example: + +.. code-block:: python + + connection.resume_sessionless_transaction(transaction_id=txn_id, timeout=80, + defer_round_trip=False) + +The ``transaction_id`` parameter must contain the identifier of an existing +transaction. + +You can set the following parameters in +:meth:`Connection.resume_sessionless_transaction()`: + +- ``timeout``: This parameter specifies how long this connection should wait to + resume a sessionless transaction if it is currently in use by another + connection. In this case, the current connection waits for the transaction to + be suspended within this timeout period. If the transaction remains in use by + the other connection after the timeout period, the error `ORA-25351 + `__ is raised. If another + connection completes the transaction, the error `ORA-24756 + `__ is raised. These + error messages are only thrown for non-RAC instances. For information on + using Oracle RAC, see :ref:`Sessionless Transactions with Oracle RAC + `. + +- ``defer_round_trip``: This parameter determines whether the request to resume + a sessionless transaction should be sent immediately or with the next + database operation. The default value is *False*, that is, the request is + sent immediately. When set to *True*, the request is sent with the next + database operation on the connection which reduce the number of + :ref:`round-trips ` to the database. + +Once resumed, the transaction is considered to be active and database +operations are part of that transaction. + +.. _commitorrollbacktxns: + +Committing or Rolling Back Sessionless Transactions +--------------------------------------------------- + +A new or resumed transaction can be committed using :meth:`Connection.commit()` +and rolled back using :meth:`Connection.rollback()`. + +Once a transaction has been committed or rolled back, it ends, and cannot be +resumed, suspended, or used for additional database operations. + +.. _examplesessionlesstxns: + +Example of Using Sessionless Transactions +----------------------------------------- + +An example of using Sessionless Transactions is: + +.. code-block:: python + + import oracledb + + connection1 = oracledb.connect(user="hr", password=userpwd, dsn="localhost/orclpdb") + + txn_id = b"sessionless_txnid" + cursor1 = connection1.cursor() + + cursor1.execute("create table sessionlessTxnTab (id number, name varchar2(50))") + + # Start a new sessionless transaction + connection1.begin_sessionless_transaction(transaction_id=txn_id, timeout=15) + + # Execute a database operation + cursor1.execute("insert into sessionlessTxnTab values(1, 'row1')") + + # Insert another row + cursor1.execute("insert into sessionlessTxnTab values(2, 'row2')") + + # Suspend the sessionless transaction + connection1.suspend_sessionless_transaction() + + result = cursor1.execute("select * from sessionlessTxnTab") + rows = result.fetchall() + print(rows) + connection1.close() + +In the above sample, the transaction is not committed before being suspended. +Hence the inserted data will not be visible and this prints ``[]`` as the +output. + +The transaction, txn_id, has a *15* second timeout in which it needs to be +resumed successfully on another connection. In the example below, a different +connection resumes the transaction. The same transaction identifier must be +used: + +.. code-block:: python + + # Resume the transaction in another connection + connection2 = oracledb.connect(user="hr", password=userpwd, dsn="localhost/orclpdb") + connection2.resume_sessionless_transaction(transaction_id=txn_id) + + cursor2 = connection2.cursor() + cursor2.execute("insert into sessionlessTxnTab values(3, 'row3')") + + connection2.commit() + + result = cursor2.execute("select * from sessionlessTxnTab") + rows = result.fetchall() + print(rows) + +This prints the following output (including the rows inserted in the first +code snippet):: + + [(1, 'row1'), (2, 'row2'), (3, 'row3')] + +.. _viewsessionlesstxns: + +Viewing Sessionless Transactions +-------------------------------- + +The Oracle Database `V$GLOBAL_TRANSACTION `__ view +displays information on the currently active transactions on the database +server. + +To view the active transaction in the current connection, you can use the +following query with `NVL() `__: + +.. code-block:: sql + + SELECT NVL(dbms_transaction.get_transaction_id, 'NULL transactionId') + FROM dual; + +The `GET_TRANSACTION_ID Function `__ of the +DBMS_TRANSACTION package returns the transaction identifier that is used in +the current connection. diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index 42e7e854..dacf46cf 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -81,6 +81,11 @@ NUMBER as NUMBER, ROWID as ROWID, STRING as STRING, + # flags for tpc_begin() + TPC_TXN_FLAGS_JOIN as TPC_BEGIN_JOIN, # noqa: F401 + TPC_TXN_FLAGS_NEW as TPC_BEGIN_NEW, # noqa: F401 + TPC_TXN_FLAGS_PROMOTE as TPC_BEGIN_PROMOTE, # noqa: F401 + TPC_TXN_FLAGS_RESUME as TPC_BEGIN_RESUME, # noqa: F401 ) from .enums import ( @@ -205,11 +210,6 @@ OPCODE_DROP as OPCODE_DROP, OPCODE_INSERT as OPCODE_INSERT, OPCODE_UPDATE as OPCODE_UPDATE, - # flags for tpc_begin() - TPC_BEGIN_JOIN as TPC_BEGIN_JOIN, - TPC_BEGIN_NEW as TPC_BEGIN_NEW, - TPC_BEGIN_PROMOTE as TPC_BEGIN_PROMOTE, - TPC_BEGIN_RESUME as TPC_BEGIN_RESUME, # flags for tpc_end() TPC_END_NORMAL as TPC_END_NORMAL, TPC_END_SUSPEND as TPC_END_SUSPEND, diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index bc2a06f8..cba7475c 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -180,6 +180,13 @@ cpdef enum: PURITY_NEW = 1 PURITY_SELF = 2 +cpdef enum: + TPC_TXN_FLAGS_JOIN = 0x00000002 + TPC_TXN_FLAGS_NEW = 0x00000001 + TPC_TXN_FLAGS_PROMOTE = 0x00000008 + TPC_TXN_FLAGS_RESUME = 0x00000004 + TPC_TXN_FLAGS_SESSIONLESS = 0x00000010 + cpdef enum: VECTOR_FORMAT_BINARY = 5 VECTOR_FORMAT_FLOAT32 = 2 @@ -667,6 +674,7 @@ cdef class BaseCursorImpl: public dict bind_vars_by_name public object warning public bint fetching_arrow + public bint suspend_on_success uint32_t _buffer_rowcount uint32_t _buffer_index uint32_t _fetch_array_size diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 5564f650..f6ae6629 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -42,7 +42,7 @@ from . import __name__ as MODULE_NAME -from . import base_impl, constants, driver_mode, errors, thick_impl, thin_impl +from . import base_impl, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module from .aq import AsyncQueue, Queue, MessageProperties from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType @@ -53,6 +53,7 @@ from .pipeline import Pipeline from .soda import SodaDatabase from .subscr import Subscription +from .utils import normalize_sessionless_transaction_id # named tuple used for representing global transactions Xid = collections.namedtuple( @@ -121,6 +122,38 @@ def autocommit(self, value: bool) -> None: self._verify_connected() self._impl.autocommit = value + def begin_sessionless_transaction( + self, + transaction_id: Optional[Union[str, bytes]] = None, + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Begins a new sessionless transaction. + + Parameters: + transaction_id (str or bytes, optional): A Transaction Identifier. + If None, a random transaction_id will be generated. + timeout (int, optional): Timeout value in seconds. + Must be a positive integer. Defaults to 60 if not provided. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used for the transaction. + """ + self._verify_connected() + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not isinstance(timeout, int) or timeout <= 0: + raise TypeError("timeout must be a positive integer") + + self._impl.begin_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + @property def call_timeout(self) -> int: """ @@ -474,6 +507,44 @@ def outputtypehandler(self, value: Callable) -> None: self._verify_connected() self._impl.outputtypehandler = value + def resume_sessionless_transaction( + self, + transaction_id: Union[str, bytes], + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Resumes an existing sessionless transaction using the given + transaction_id. + + Parameters: + transaction_id (str or bytes): A Transaction Identifier that + uniquely identifies the sessionless transaction to be + resumed. This parameter is mandatory. + timeout (int, optional): Timeout in seconds for the resumed + transaction. Must be a positive integer. Defaults to 60. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used to resume the + sessionless transaction. + """ + self._verify_connected() + if transaction_id is None: + raise ValueError("transaction_id is required for resuming") + + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not (isinstance(timeout, int) and timeout >= 0): + raise TypeError("timeout must be a non-negative integer") + + self._impl.resume_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + @property def sdu(self) -> int: """ @@ -936,24 +1007,24 @@ def startup( def subscribe( self, - namespace: int = constants.SUBSCR_NAMESPACE_DBCHANGE, - protocol: int = constants.SUBSCR_PROTO_CALLBACK, + namespace: int = oracledb.SUBSCR_NAMESPACE_DBCHANGE, + protocol: int = oracledb.SUBSCR_PROTO_CALLBACK, callback: Optional[Callable] = None, timeout: int = 0, - operations: int = constants.OPCODE_ALLOPS, + operations: int = oracledb.OPCODE_ALLOPS, port: int = 0, - qos: int = constants.SUBSCR_QOS_DEFAULT, + qos: int = oracledb.SUBSCR_QOS_DEFAULT, ip_address: Optional[str] = None, - grouping_class: int = constants.SUBSCR_GROUPING_CLASS_NONE, + grouping_class: int = oracledb.SUBSCR_GROUPING_CLASS_NONE, grouping_value: int = 0, - grouping_type: int = constants.SUBSCR_GROUPING_TYPE_SUMMARY, + grouping_type: int = oracledb.SUBSCR_GROUPING_TYPE_SUMMARY, name: Optional[str] = None, client_initiated: bool = False, *, ipAddress: Optional[str] = None, - groupingClass: int = constants.SUBSCR_GROUPING_CLASS_NONE, + groupingClass: int = oracledb.SUBSCR_GROUPING_CLASS_NONE, groupingValue: int = 0, - groupingType: int = constants.SUBSCR_GROUPING_TYPE_SUMMARY, + groupingType: int = oracledb.SUBSCR_GROUPING_TYPE_SUMMARY, clientInitiated: bool = False, ) -> Subscription: """ @@ -1027,8 +1098,8 @@ def subscribe( new_name="ip_address", ) ip_address = ipAddress - if groupingClass != constants.SUBSCR_GROUPING_CLASS_NONE: - if grouping_class != constants.SUBSCR_GROUPING_CLASS_NONE: + if groupingClass != oracledb.SUBSCR_GROUPING_CLASS_NONE: + if grouping_class != oracledb.SUBSCR_GROUPING_CLASS_NONE: errors._raise_err( errors.ERR_DUPLICATED_PARAMETER, deprecated_name="groupingClass", @@ -1043,8 +1114,8 @@ def subscribe( new_name="grouping_value", ) grouping_value = groupingValue - if groupingType != constants.SUBSCR_GROUPING_TYPE_SUMMARY: - if grouping_type != constants.SUBSCR_GROUPING_TYPE_SUMMARY: + if groupingType != oracledb.SUBSCR_GROUPING_TYPE_SUMMARY: + if grouping_type != oracledb.SUBSCR_GROUPING_TYPE_SUMMARY: errors._raise_err( errors.ERR_DUPLICATED_PARAMETER, deprecated_name="groupingType", @@ -1079,6 +1150,19 @@ def subscribe( impl.subscribe(subscr, self._impl) return subscr + def suspend_sessionless_transaction(self) -> None: + """ + Suspends the currently active sessionless transaction. + + This temporarily detaches the transaction from the session, + allowing it to be resumed later using its transaction_id. + + Returns: + None + """ + self._verify_connected() + self._impl.suspend_sessionless_transaction() + @property def tag(self) -> str: """ @@ -1102,7 +1186,7 @@ def tag(self, value: str) -> None: self._impl.tag = value def tpc_begin( - self, xid: Xid, flags: int = constants.TPC_BEGIN_NEW, timeout: int = 0 + self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ Begins a TPC (two-phase commit) transaction with the given transaction @@ -1112,10 +1196,10 @@ def tpc_begin( self._verify_connected() self._verify_xid(xid) if flags not in ( - constants.TPC_BEGIN_NEW, - constants.TPC_BEGIN_JOIN, - constants.TPC_BEGIN_RESUME, - constants.TPC_BEGIN_PROMOTE, + oracledb.TPC_BEGIN_NEW, + oracledb.TPC_BEGIN_JOIN, + oracledb.TPC_BEGIN_RESUME, + oracledb.TPC_BEGIN_PROMOTE, ): errors._raise_err(errors.ERR_INVALID_TPC_BEGIN_FLAGS) self._impl.tpc_begin(xid, flags, timeout) @@ -1143,7 +1227,7 @@ def tpc_commit( self._impl.tpc_commit(xid, one_phase) def tpc_end( - self, xid: Optional[Xid] = None, flags: int = constants.TPC_END_NORMAL + self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ Ends (detaches from) a TPC (two-phase commit) transaction. @@ -1151,7 +1235,7 @@ def tpc_end( self._verify_connected() if xid is not None: self._verify_xid(xid) - if flags not in (constants.TPC_END_NORMAL, constants.TPC_END_SUSPEND): + if flags not in (oracledb.TPC_END_NORMAL, oracledb.TPC_END_SUSPEND): errors._raise_err(errors.ERR_INVALID_TPC_END_FLAGS) self._impl.tpc_end(xid, flags) @@ -1701,6 +1785,38 @@ def _verify_can_execute( errors._raise_err(errors.ERR_WRONG_EXECUTE_PARAMETERS_TYPE) return parameters + async def begin_sessionless_transaction( + self, + transaction_id: Optional[Union[str, bytes]] = None, + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Begins a new sessionless transaction. + + Parameters: + transaction_id (str or bytes, optional): A Transaction Identifier. + If None, a random transaction_id will be generated. + timeout (int, optional): Timeout value in seconds. + Must be a positive integer. Defaults to 60 if not provided. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used for the transaction. + """ + self._verify_connected() + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not isinstance(timeout, int) or timeout <= 0: + raise TypeError("timeout must be a positive integer") + + await self._impl.begin_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + async def callfunc( self, name: str, @@ -1922,6 +2038,44 @@ async def ping(self) -> None: self._verify_connected() await self._impl.ping() + async def resume_sessionless_transaction( + self, + transaction_id: Union[str, bytes], + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Resumes an existing sessionless transaction using the given + transaction_id. + + Parameters: + transaction_id (str or bytes): A Transaction Identifier that + uniquely identifies the sessionless transaction to be + resumed. This parameter is mandatory. + timeout (int, optional): Timeout in seconds for the resumed + transaction. Must be a positive integer. Defaults to 60. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used to resume the + sessionless transaction. + """ + self._verify_connected() + if transaction_id is None: + raise ValueError("transaction_id is required for resuming") + + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not (isinstance(timeout, int) and timeout >= 0): + raise TypeError("timeout must be a non-negative integer") + + await self._impl.resume_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + async def rollback(self) -> None: """ Rolls back any pending transaction. @@ -1958,8 +2112,21 @@ async def run_pipeline( ) return results + async def suspend_sessionless_transaction(self) -> None: + """ + Suspends the currently active sessionless transaction. + + This temporarily detaches the transaction from the session, + allowing it to be resumed later using its transaction_id. + + Returns: + None + """ + self._verify_connected() + await self._impl.suspend_sessionless_transaction() + async def tpc_begin( - self, xid: Xid, flags: int = constants.TPC_BEGIN_NEW, timeout: int = 0 + self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ Begins a TPC (two-phase commit) transaction with the given transaction @@ -1969,10 +2136,10 @@ async def tpc_begin( self._verify_connected() self._verify_xid(xid) if flags not in ( - constants.TPC_BEGIN_NEW, - constants.TPC_BEGIN_JOIN, - constants.TPC_BEGIN_RESUME, - constants.TPC_BEGIN_PROMOTE, + oracledb.TPC_BEGIN_NEW, + oracledb.TPC_BEGIN_JOIN, + oracledb.TPC_BEGIN_RESUME, + oracledb.TPC_BEGIN_PROMOTE, ): errors._raise_err(errors.ERR_INVALID_TPC_BEGIN_FLAGS) await self._impl.tpc_begin(xid, flags, timeout) @@ -2000,7 +2167,7 @@ async def tpc_commit( await self._impl.tpc_commit(xid, one_phase) async def tpc_end( - self, xid: Optional[Xid] = None, flags: int = constants.TPC_END_NORMAL + self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ Ends (detaches from) a TPC (two-phase commit) transaction. @@ -2008,7 +2175,7 @@ async def tpc_end( self._verify_connected() if xid is not None: self._verify_xid(xid) - if flags not in (constants.TPC_END_NORMAL, constants.TPC_END_SUSPEND): + if flags not in (oracledb.TPC_END_NORMAL, oracledb.TPC_END_SUSPEND): errors._raise_err(errors.ERR_INVALID_TPC_END_FLAGS) await self._impl.tpc_end(xid, flags) diff --git a/src/oracledb/constants.py b/src/oracledb/constants.py index b9a2d194..6f0cdb9c 100644 --- a/src/oracledb/constants.py +++ b/src/oracledb/constants.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2023, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -123,12 +123,6 @@ OPCODE_INSERT = 0x02 OPCODE_UPDATE = 0x04 -# flags for tpc_begin() -TPC_BEGIN_JOIN = 0x00000002 -TPC_BEGIN_NEW = 0x00000001 -TPC_BEGIN_PROMOTE = 0x00000008 -TPC_BEGIN_RESUME = 0x00000004 - # flags for tpc_end() TPC_END_NORMAL = 0 TPC_END_SUSPEND = 0x00100000 diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index ec268e36..eb11d227 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -666,6 +666,7 @@ def execute( self, statement: Optional[str], parameters: Optional[Union[list, tuple, dict]] = None, + suspend_on_success: bool = False, **keyword_parameters: Any, ) -> Any: """ @@ -702,8 +703,13 @@ def execute( If the statement is a query, the cursor is returned as a convenience to the caller (so it can be used directly as an iterator over the rows in the cursor); otherwise, None is returned. + + suspend_on_success parameter is specific to sessionless transactions. + When set to True, the active sessionless transaction will be suspended + after the successful execution of the current statement. """ self._prepare_for_execute(statement, parameters, keyword_parameters) + self._impl.suspend_on_success = suspend_on_success impl = self._impl impl.execute(self) if impl.fetch_vars is not None: @@ -715,6 +721,7 @@ def executemany( parameters: Any, batcherrors: bool = False, arraydmlrowcounts: bool = False, + suspend_on_success: bool = False, ) -> None: """ Prepare a statement for execution against a database and then execute @@ -753,11 +760,16 @@ def executemany( specify the parameter types and sizes ahead of time; in particular, None is assumed to be a string of length 1 so any values that are later bound as numbers or dates will raise a TypeError exception. + + suspend_on_success parameter is specific to sessionless transactions. + When set to True, the active sessionless transaction will be suspended + after the successful execution of the current statement. """ self._verify_open() num_execs = self._impl._prepare_for_executemany( self, statement, parameters ) + self._impl.suspend_on_success = suspend_on_success if num_execs > 0: self._impl.executemany( self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) @@ -929,6 +941,7 @@ async def execute( self, statement: Optional[str], parameters: Optional[Union[list, tuple, dict]] = None, + suspend_on_success: bool = False, **keyword_parameters: Any, ) -> None: """ @@ -961,8 +974,13 @@ async def execute( of time; in particular, None is assumed to be a string of length 1 so any values that are later bound as numbers or dates will raise a TypeError exception. + + suspend_on_success parameter is specific to sessionless transactions. + When set to True, the active sessionless transaction will be suspended + after the successful execution of the current statement. """ self._prepare_for_execute(statement, parameters, keyword_parameters) + self._impl.suspend_on_success = suspend_on_success await self._impl.execute(self) async def executemany( @@ -971,6 +989,7 @@ async def executemany( parameters: Any, batcherrors: bool = False, arraydmlrowcounts: bool = False, + suspend_on_success: bool = False, ) -> None: """ Prepare a statement for execution against a database and then execute @@ -1006,11 +1025,16 @@ async def executemany( specify the parameter types and sizes ahead of time; in particular, None is assumed to be a string of length 1 so any values that are later bound as numbers or dates will raise a TypeError exception. + + suspend_on_success parameter is specific to sessionless transactions. + When set to True, the active sessionless transaction will be suspended + after the successful execution of the current statement. """ self._verify_open() num_execs = self._impl._prepare_for_executemany( self, statement, parameters ) + self._impl.suspend_on_success = suspend_on_success if num_execs > 0: await self._impl.executemany( self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index de0bb6d4..16ee37dc 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -324,6 +324,9 @@ def _raise_not_supported(feature: str) -> None: ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT = 3031 ERR_ARROW_UNSUPPORTED_DATA_FORMAT = 3032 ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT = 3033 +ERR_SESSIONLESS_DIFFERING_METHODS = 3034 +ERR_SESSIONLESS_ALREADY_ACTIVE = 3035 +ERR_SESSIONLESS_INACTIVE = 3036 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -374,6 +377,7 @@ def _raise_not_supported(feature: str) -> None: ERR_UNEXPECTED_PIPELINE_FAILURE = 5011 ERR_NOT_IMPLEMENTED = 5012 ERR_INTERNAL_CREATION_REQUIRED = 5013 +ERR_UNKNOWN_TRANSACTION_SYNC_VERSION = 5014 # error numbers that result in OperationalError ERR_LISTENER_REFUSED_CONNECTION = 6000 @@ -429,6 +433,9 @@ def _raise_not_supported(feature: str) -> None: 24496: ERR_POOL_NO_CONNECTION_AVAILABLE, 24338: ERR_INVALID_REF_CURSOR, 24344: WRN_COMPILATION_ERROR, + 26202: ERR_SESSIONLESS_INACTIVE, + 26211: ERR_SESSIONLESS_DIFFERING_METHODS, + 26216: ERR_SESSIONLESS_ALREADY_ACTIVE, 27146: ERR_CONNECTION_CLOSED, 28511: ERR_CONNECTION_CLOSED, 38902: ERR_TOO_MANY_BATCH_ERRORS, @@ -850,6 +857,9 @@ def _raise_not_supported(feature: str) -> None: ERR_UNKNOWN_TRANSACTION_STATE: ( "internal error: unknown transaction state {state}" ), + ERR_UNKNOWN_TRANSACTION_SYNC_VERSION: ( + "internal error: unknown transaction sync version {version}" + ), ERR_UNSUPPORTED_PIPELINE_OPERATION: ( "unsupported pipeline operation type: {op_type}" ), @@ -922,4 +932,13 @@ def _raise_not_supported(feature: str) -> None: "flexible vector formats are not supported. Only fixed 'FLOAT32', " "'FLOAT64', 'INT8' or 'BINARY' formats are supported" ), + ERR_SESSIONLESS_DIFFERING_METHODS: ( + "suspending or resuming a Sessionless Transaction can be done with " + "DBMS_TRANSACTION or with python-oracledb, but not both" + ), + ERR_SESSIONLESS_ALREADY_ACTIVE: ( + "suspend, commit, or rollback the current active sessionless " + "transaction before beginning or resuming another one" + ), + ERR_SESSIONLESS_INACTIVE: ("no Sessionless Transaction is active"), } diff --git a/src/oracledb/impl/thick/connection.pyx b/src/oracledb/impl/thick/connection.pyx index a49edb83..5bf887c9 100644 --- a/src/oracledb/impl/thick/connection.pyx +++ b/src/oracledb/impl/thick/connection.pyx @@ -277,6 +277,23 @@ cdef class ThickConnImpl(BaseConnImpl): if func(self._handle, value_ptr, value_length) < 0: _raise_from_odpi() + def begin_sessionless_transaction(self, bytes transaction_id, + uint32_t timeout, bint defer_round_trip): + """ + Begin a new sessionless transaction. + """ + cdef: + dpiSessionlessTransactionId txn_id + int status + txn_id.length = len(transaction_id) + memcpy(txn_id.value, transaction_id, txn_id.length) + with nogil: + status = dpiConn_beginSessionlessTransaction( + self._handle, &txn_id, timeout, defer_round_trip + ) + if status < 0: + _raise_from_odpi() + def cancel(self): cdef int status with nogil: @@ -750,6 +767,24 @@ cdef class ThickConnImpl(BaseConnImpl): if status < 0: _raise_from_odpi() + def resume_sessionless_transaction(self, bytes transaction_id, + uint32_t timeout, + bint defer_round_trip): + """ + Resume a sessionless transaction. + """ + cdef: + dpiSessionlessTransactionId txn_id + int status + txn_id.length = len(transaction_id) + memcpy(txn_id.value, transaction_id, txn_id.length) + with nogil: + status = dpiConn_resumeSessionlessTransaction( + self._handle, &txn_id, timeout, defer_round_trip + ) + if status < 0: + _raise_from_odpi() + def rollback(self): cdef int status with nogil: @@ -802,6 +837,16 @@ cdef class ThickConnImpl(BaseConnImpl): if status < 0: _raise_from_odpi() + def suspend_sessionless_transaction(self): + """ + Suspend the currently active sessionless transaction. + """ + cdef int status + with nogil: + status = dpiConn_suspendSessionlessTransaction(self._handle) + if status < 0: + _raise_from_odpi() + def tpc_begin(self, xid, uint32_t flags, uint32_t timeout): cdef: ThickXid thick_xid = ThickXid(xid) diff --git a/src/oracledb/impl/thick/cursor.pyx b/src/oracledb/impl/thick/cursor.pyx index 600fd591..b08c1b90 100644 --- a/src/oracledb/impl/thick/cursor.pyx +++ b/src/oracledb/impl/thick/cursor.pyx @@ -308,6 +308,8 @@ cdef class ThickCursorImpl(BaseCursorImpl): mode = DPI_MODE_EXEC_COMMIT_ON_SUCCESS else: mode = DPI_MODE_EXEC_DEFAULT + if self.suspend_on_success: + mode |= DPI_MODE_EXEC_SUSPEND_ON_SUCCESS with nogil: status = dpiStmt_execute(self._handle, mode, &num_query_cols) if status == DPI_SUCCESS: @@ -342,6 +344,8 @@ cdef class ThickCursorImpl(BaseCursorImpl): mode |= DPI_MODE_EXEC_ARRAY_DML_ROWCOUNTS if batcherrors: mode |= DPI_MODE_EXEC_BATCH_ERRORS + if self.suspend_on_success: + mode |= DPI_MODE_EXEC_SUSPEND_ON_SUCCESS if self.bind_vars is not None: self._perform_binds(cursor.connection, num_execs_int) diff --git a/src/oracledb/impl/thick/odpi.pxd b/src/oracledb/impl/thick/odpi.pxd index 81d016d8..e325f735 100644 --- a/src/oracledb/impl/thick/odpi.pxd +++ b/src/oracledb/impl/thick/odpi.pxd @@ -62,6 +62,7 @@ cdef extern from "impl/thick/odpi/embed/dpi.c": DPI_MODE_EXEC_DEFAULT DPI_MODE_EXEC_DESCRIBE_ONLY DPI_MODE_EXEC_PARSE_ONLY + DPI_MODE_EXEC_SUSPEND_ON_SUCCESS # connection/pool creation modes enum: @@ -598,7 +599,15 @@ cdef extern from "impl/thick/odpi/embed/dpi.c": const char *name uint32_t nameLength + ctypedef struct dpiSessionlessTransactionId: + char[64] value + uint32_t length + # functions + int dpiConn_beginSessionlessTransaction(dpiConn *conn, + dpiSessionlessTransactionId* transactionId, uint32_t timeout, + bint deferRoundTrip) nogil + int dpiConn_breakExecution(dpiConn *conn) nogil int dpiConn_changePassword(dpiConn *conn, const char *userName, @@ -702,6 +711,10 @@ cdef extern from "impl/thick/odpi/embed/dpi.c": int dpiConn_release(dpiConn *conn) nogil + int dpiConn_resumeSessionlessTransaction(dpiConn *conn, + dpiSessionlessTransactionId* transactionId, uint32_t timeout, + bint deferRoundTrip) nogil + int dpiConn_rollback(dpiConn *conn) nogil int dpiConn_setAction(dpiConn *conn, const char *value, @@ -746,6 +759,8 @@ cdef extern from "impl/thick/odpi/embed/dpi.c": int dpiConn_subscribe(dpiConn *conn, dpiSubscrCreateParams *params, dpiSubscr **subscr) nogil + int dpiConn_suspendSessionlessTransaction(dpiConn *conn) nogil + int dpiConn_tpcBegin(dpiConn *conn, dpiXid *xid, uint32_t transactionTimeout, uint32_t flags) nogil diff --git a/src/oracledb/impl/thin/capabilities.pyx b/src/oracledb/impl/thin/capabilities.pyx index 3cf319c4..b07cd143 100644 --- a/src/oracledb/impl/thin/capabilities.pyx +++ b/src/oracledb/impl/thin/capabilities.pyx @@ -138,10 +138,11 @@ cdef class Capabilities: TNS_CCAP_EXPLICIT_BOUNDARY self.compile_caps[TNS_CCAP_TTC5] = TNS_CCAP_VECTOR_SUPPORT | \ TNS_CCAP_TOKEN_SUPPORTED | TNS_CCAP_PIPELINING_SUPPORT | \ - TNS_CCAP_PIPELINING_BREAK + TNS_CCAP_PIPELINING_BREAK | TNS_CCAP_TTC5_SESSIONLESS_TXNS self.compile_caps[TNS_CCAP_VECTOR_FEATURES] = \ TNS_CCAP_VECTOR_FEATURE_BINARY | \ TNS_CCAP_VECTOR_FEATURE_SPARSE + self.compile_caps[TNS_CCAP_OCI3] = TNS_CCAP_OCI3_OCSSYNC @cython.boundscheck(False) cdef void _init_runtime_caps(self): diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index 9b333a8d..25634b4e 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -29,6 +29,33 @@ # thin_impl.pyx). #------------------------------------------------------------------------------ +cdef class _SessionlessData: + + cdef: + bytes transaction_id + uint32_t operation + uint32_t flags + uint32_t timeout + bint piggyback_pending + bint started_on_server + + cdef TransactionSwitchMessage create_message(self, + BaseThinConnImpl conn_impl): + """ + Returns the message used for sending the request to the database. + """ + cdef: + uint32_t sessionless_format_id = 0x4e5c3e + TransactionSwitchMessage message + message = conn_impl._create_message(TransactionSwitchMessage) + if self.operation & TNS_TPC_TXN_START: + message.xid = (sessionless_format_id, self.transaction_id, b"") + message.timeout = self.timeout + message.operation = self.operation + message.flags = self.flags | TPC_TXN_FLAGS_SESSIONLESS + return message + + cdef class BaseThinConnImpl(BaseConnImpl): cdef: @@ -74,6 +101,7 @@ cdef class BaseThinConnImpl(BaseConnImpl): bytes _transaction_context uint8_t pipeline_mode uint8_t _session_state_desired + _SessionlessData _sessionless_data def __init__(self, str dsn, ConnectParamsImpl params): _check_cryptography() @@ -220,6 +248,29 @@ cdef class BaseThinConnImpl(BaseConnImpl): """ self._statement_cache.return_statement(statement) + cdef TransactionSwitchMessage _start_sessionless_transaction( + self, + bytes transaction_id, + uint32_t timeout, + uint32_t flags, + bint defer_round_trip + ): + """ + Starts (either begins or resumes) a sessionless transaction. A message + is returned if the request is not going to be deferred. + """ + if self._sessionless_data is not None: + errors._raise_err(errors.ERR_SESSIONLESS_ALREADY_ACTIVE) + self._sessionless_data = _SessionlessData.__new__(_SessionlessData) + self._sessionless_data.transaction_id = transaction_id + self._sessionless_data.timeout = timeout + self._sessionless_data.operation = TNS_TPC_TXN_START + self._sessionless_data.flags = flags + if defer_round_trip: + self._sessionless_data.piggyback_pending = True + if not defer_round_trip: + return self._sessionless_data.create_message(self) + def cancel(self): self._protocol._break_external() @@ -431,6 +482,24 @@ cdef class ThinConnImpl(BaseThinConnImpl): """ return ThinCursorImpl.__new__(ThinCursorImpl, self) + def begin_sessionless_transaction( + self, + bytes transaction_id, + int timeout, + bint defer_round_trip + ): + """ + Internal method for beginning a sessionless transaction. + """ + cdef: + Protocol protocol = self._protocol + TransactionSwitchMessage message + message = self._start_sessionless_transaction( + transaction_id, timeout, TPC_TXN_FLAGS_NEW, defer_round_trip + ) + if message is not None: + protocol._process_single_message(message) + def change_password(self, str old_password, str new_password): cdef: Protocol protocol = self._protocol @@ -489,6 +558,24 @@ cdef class ThinConnImpl(BaseThinConnImpl): message = self._create_message(PingMessage) protocol._process_single_message(message) + def resume_sessionless_transaction( + self, + bytes transaction_id, + int timeout, + bint defer_round_trip + ): + """ + Internal method for resuming a sessionless transaction. + """ + cdef: + Protocol protocol = self._protocol + TransactionSwitchMessage message + message = self._start_sessionless_transaction( + transaction_id, timeout, TPC_TXN_FLAGS_RESUME, defer_round_trip + ) + if message is not None: + protocol._process_single_message(message) + def rollback(self): cdef: Protocol protocol = self._protocol @@ -500,6 +587,19 @@ cdef class ThinConnImpl(BaseThinConnImpl): self._protocol._transport.set_timeout(value / 1000) self._call_timeout = value + def suspend_sessionless_transaction(self): + cdef: + Protocol protocol = self._protocol + TransactionSwitchMessage message + if self._sessionless_data is None: + errors._raise_err(errors.ERR_SESSIONLESS_INACTIVE) + elif self._sessionless_data.started_on_server: + errors._raise_err(errors.ERR_SESSIONLESS_DIFFERING_METHODS) + message = self._create_message(TransactionSwitchMessage) + message.operation = TNS_TPC_TXN_DETACH + message.flags = TPC_TXN_FLAGS_SESSIONLESS + protocol._process_single_message(message) + def tpc_begin(self, xid, uint32_t flags, uint32_t timeout): cdef: Protocol protocol = self._protocol @@ -944,6 +1044,24 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): raise message.pipeline_result_impl._capture_err(e) + async def begin_sessionless_transaction( + self, + bytes transaction_id, + int timeout, + bint defer_round_trip + ): + """ + Internal method for beginning a sessionless transaction. + """ + cdef: + BaseAsyncProtocol protocol = self._protocol + TransactionSwitchMessage message + message = self._start_sessionless_transaction( + transaction_id, timeout, TPC_TXN_FLAGS_NEW, defer_round_trip + ) + if message is not None: + await protocol._process_single_message(message) + async def change_password(self, str old_password, str new_password): cdef: BaseAsyncProtocol protocol = self._protocol @@ -1009,6 +1127,24 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): message = self._create_message(PingMessage) await protocol._process_single_message(message) + async def resume_sessionless_transaction( + self, + bytes transaction_id, + int timeout, + bint defer_round_trip + ): + """ + Internal method for resuming a sessionless transaction. + """ + cdef: + BaseAsyncProtocol protocol = self._protocol + TransactionSwitchMessage message + message = self._start_sessionless_transaction( + transaction_id, timeout, TPC_TXN_FLAGS_RESUME, defer_round_trip + ) + if message is not None: + await protocol._process_single_message(message) + async def rollback(self): """ Sends the message to roll back any pending transaction. @@ -1078,6 +1214,19 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): """ return self._protocol._caps.supports_pipelining + async def suspend_sessionless_transaction(self): + cdef: + BaseAsyncProtocol protocol = self._protocol + TransactionSwitchMessage message + if self._sessionless_data is None: + errors._raise_err(errors.ERR_SESSIONLESS_INACTIVE) + elif self._sessionless_data.started_on_server: + errors._raise_err(errors.ERR_SESSIONLESS_DIFFERING_METHODS) + message = self._create_message(TransactionSwitchMessage) + message.operation = TNS_TPC_TXN_DETACH + message.flags = TPC_TXN_FLAGS_SESSIONLESS + await protocol._process_single_message(message) + async def tpc_begin(self, xid, uint32_t flags, uint32_t timeout): cdef: BaseAsyncProtocol protocol = self._protocol diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index c08f4f39..3acc08cd 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -156,6 +156,7 @@ cdef enum: cdef enum: TNS_KEYWORD_NUM_CURRENT_SCHEMA = 168 TNS_KEYWORD_NUM_EDITION = 172 + TNS_KEYWORD_NUM_TRANSACTION_ID = 201 # bind flags cdef enum: @@ -416,6 +417,7 @@ cdef enum: TNS_CCAP_UB2_DTY = 27 TNS_CCAP_OCI2 = 31 TNS_CCAP_CLIENT_FN = 34 + TNS_CCAP_OCI3 = 35 TNS_CCAP_TTC3 = 37 TNS_CCAP_SESS_SIGNATURE_VERSION = 39 TNS_CCAP_TTC4 = 40 @@ -486,6 +488,8 @@ cdef enum: TNS_CCAP_PIPELINING_BREAK = 0x10 TNS_CCAP_VECTOR_FEATURE_BINARY = 0x01 TNS_CCAP_VECTOR_FEATURE_SPARSE = 0x02 + TNS_CCAP_TTC5_SESSIONLESS_TXNS = 0x20 + TNS_CCAP_OCI3_OCSSYNC = 0x20 # runtime capability indices cdef enum: @@ -526,6 +530,7 @@ cdef enum: cdef enum: TNS_TPC_TXN_START = 0x01 TNS_TPC_TXN_DETACH = 0x02 + TNS_TPC_TXN_POST_DETACH = 0x04 # transaction change state op codes cdef enum: @@ -534,6 +539,16 @@ cdef enum: TNS_TPC_TXN_PREPARE = 0x03 TNS_TPC_TXN_FORGET = 0x04 +# sessionless server states +cdef enum: + TNS_TPC_TXNID_SYNC_SET = 0x40 + TNS_TPC_TXNID_SYNC_UNSET = 0x80 + +# sessionless state reason +cdef enum: + TNS_TPC_TXNID_SYNC_SERVER = 0x01 + TNS_TPC_TXNID_SYNC_CLIENT = 0x02 + # transaction states cdef enum: TNS_TPC_TXN_STATE_PREPARE = 0 diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 2ab9b57c..3bfd8fee 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -123,6 +123,42 @@ cdef class Message: """ pass + cdef int _update_sessionless_txn_state(self, bytes data) except -1: + """ + Update the sessionless transaction state. + """ + cdef: + uint8_t sessionless_state + uint8_t sync_version + bytes transaction_id + const uint8_t* buf + ssize_t buf_len + + # extract the parts of the data + buf = data + buf_len = len(data) + transaction_id = data[:buf_len - 2] + sessionless_state = buf[buf_len - 2] + sync_version = buf[buf_len - 1] + + # verify the sync version is one understood by the driver + if sync_version != 1: + errors._raise_err(errors.ERR_UNKNOWN_TRANSACTION_SYNC_VERSION, + version=sync_version) + + # transaction was cleared (ended or suspended) + if sessionless_state & TNS_TPC_TXNID_SYNC_UNSET: + self.conn_impl._sessionless_data = None + self.conn_impl._protocol._txn_in_progress = False + + # transaction was set (started or resumed) + elif sessionless_state & TNS_TPC_TXNID_SYNC_SET: + self.conn_impl._sessionless_data = \ + _SessionlessData.__new__(_SessionlessData) + self.conn_impl._sessionless_data.started_on_server = \ + sessionless_state & TNS_TPC_TXNID_SYNC_SERVER + self.conn_impl._protocol._txn_in_progress = True + cdef int _process_error_info(self, ReadBuffer buf) except -1: cdef: uint32_t num_bytes, i, offset, num_offsets @@ -237,6 +273,8 @@ cdef class Message: self.conn_impl._current_schema = text_value.decode() elif keyword_num == TNS_KEYWORD_NUM_EDITION: self.conn_impl._edition = text_value.decode() + elif keyword_num == TNS_KEYWORD_NUM_TRANSACTION_ID: + self._update_sessionless_txn_state(binary_value) cdef int _process_message(self, ReadBuffer buf, uint8_t message_type) except -1: @@ -668,6 +706,22 @@ cdef class Message: self._write_close_temp_lobs_piggyback(buf) if self.conn_impl._session_state_desired != 0: self._write_session_state_piggyback(buf) + if self.conn_impl._sessionless_data is not None \ + and self.conn_impl._sessionless_data.piggyback_pending: + self._write_sessionless_piggyback(buf) + + cdef int _write_sessionless_piggyback(self, WriteBuffer buf): + """ + Writes the piggyback for starting a sessionless transaction. + """ + cdef: + _SessionlessData sessionless_data + TransactionSwitchMessage message + sessionless_data = self.conn_impl._sessionless_data + message = sessionless_data.create_message(self.conn_impl) + message.message_type = TNS_MSG_TYPE_PIGGYBACK + sessionless_data.piggyback_pending = False + message._write_message(buf) cdef int _write_session_state_piggyback(self, WriteBuffer buf) except -1: """ diff --git a/src/oracledb/impl/thin/messages/execute.pyx b/src/oracledb/impl/thin/messages/execute.pyx index 57447a22..cda1183a 100644 --- a/src/oracledb/impl/thin/messages/execute.pyx +++ b/src/oracledb/impl/thin/messages/execute.pyx @@ -37,6 +37,29 @@ cdef class ExecuteMessage(MessageWithData): uint32_t fetch_pos bint scroll_operation + cdef _handle_sessionless_suspend(self): + """ + Suspend the active sessionless transaction after execution of a + statement. + """ + cdef _SessionlessData sdata = self.conn_impl._sessionless_data + + # perform validation + if sdata is None: + errors._raise_err(errors.ERR_SESSIONLESS_INACTIVE) + elif sdata.started_on_server: + errors._raise_err(errors.ERR_SESSIONLESS_DIFFERING_METHODS) + + # if a piggyback is pending , ensure that it is suspended when the + # operation is completed; otherwise, create a new pending piggyback to + # suspend the transaction + if sdata.piggyback_pending: + sdata.operation |= TNS_TPC_TXN_POST_DETACH + else: + sdata.operation = TNS_TPC_TXN_POST_DETACH + sdata.flags = TPC_TXN_FLAGS_SESSIONLESS + sdata.piggyback_pending = True + cdef int _write_execute_message(self, WriteBuffer buf) except -1: """ Write the message for a full execute. @@ -85,6 +108,8 @@ cdef class ExecuteMessage(MessageWithData): exec_flags |= TNS_EXEC_FLAGS_DML_ROWCOUNTS if self.conn_impl.autocommit and not self.parse_only: options |= TNS_EXEC_OPTION_COMMIT + if self.cursor_impl.suspend_on_success: + self._handle_sessionless_suspend() # write body of message self._write_function_code(buf) @@ -202,6 +227,8 @@ cdef class ExecuteMessage(MessageWithData): if self.conn_impl.autocommit: options_2 |= TNS_EXEC_OPTION_COMMIT_REEXECUTE num_iters = self.num_execs + if self.cursor_impl.suspend_on_success: + self._handle_sessionless_suspend() self._write_function_code(buf) buf.write_ub4(stmt._cursor_id) diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index e6d95475..c63f55e3 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -176,6 +176,9 @@ from .base_impl cimport ( PY_TYPE_TIMEDELTA, TNS_LONG_LENGTH_INDICATOR, TNS_NULL_LENGTH_INDICATOR, + TPC_TXN_FLAGS_NEW, + TPC_TXN_FLAGS_RESUME, + TPC_TXN_FLAGS_SESSIONLESS, decode_uint16be, decode_uint32be, decode_date, diff --git a/src/oracledb/utils.py b/src/oracledb/utils.py index e58613b3..0f0129f3 100644 --- a/src/oracledb/utils.py +++ b/src/oracledb/utils.py @@ -36,6 +36,7 @@ from . import base_impl from . import driver_mode from . import errors +import uuid def enable_thin_mode(): @@ -70,6 +71,35 @@ def from_arrow(obj: Any) -> Union[DataFrame, ArrowArray]: raise ValueError(msg) +def normalize_sessionless_transaction_id( + value: bytes | str | None = None, +) -> bytes: + """ + Normalize and validate the transaction_id. + + - If `value` is a string, it's UTF-8 encoded. + - If `value` is None, a UUID4-based transaction_id is generated. + - If `value` is not str/bytes/None, raises TypeError. + - If transaction_id exceeds 64 bytes, raises ValueError. + + Returns: + bytes: Normalized transaction_id + """ + if value is None: + value = uuid.uuid4().bytes + elif isinstance(value, str): + value = value.encode("utf-8") + elif not isinstance(value, bytes): + raise TypeError("invalid transaction_id: must be str, bytes, or None") + + if len(value) > 64: + raise ValueError( + f"transaction_id size exceeds 64 bytes (got {len(value)})" + ) + + return value + + def params_initer(f): """ Decorator function which is used on the ConnectParams and PoolParams diff --git a/tests/ext/test_ext_2600_sessionless_transaction.py b/tests/ext/test_ext_2600_sessionless_transaction.py new file mode 100644 index 00000000..33d64c21 --- /dev/null +++ b/tests/ext/test_ext_2600_sessionless_transaction.py @@ -0,0 +1,93 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +E2600 - Module for testing timing of sessionless transactions. No special setup +is required but the tests here will only be run if the run_long_tests value is +enabled. +""" + +import time + +import test_env + + +@test_env.skip_unless_run_long_tests() +@test_env.skip_unless_sessionless_transactions_supported() +class TestCase(test_env.BaseTestCase): + def test_ext_2600(self): + "E2600 - test error conditions with client API" + self.cursor.execute("truncate table TestTempTable") + + transaction_id = "test_2600_transaction_id" + other_transaction_id = "test_2600_different_transaction_id" + with test_env.get_connection() as conn: + cursor = conn.cursor() + + # suspending a non-existent transaction will fail only in thin + # mode + if test_env.get_is_thin(): + with self.assertRaisesFullCode("DPY-3036"): + conn.suspend_sessionless_transaction() + + # start first sessionless transaction + conn.begin_sessionless_transaction( + transaction_id=transaction_id, timeout=5 + ) + + # starting another sessionless transaction will fail only in thin + # mode + if test_env.get_is_thin(): + with self.assertRaisesFullCode("DPY-3035"): + conn.begin_sessionless_transaction( + transaction_id=other_transaction_id, timeout=5 + ) + + cursor.execute( + """ + INSERT INTO TestTempTable(IntCol, StringCol1) + VALUES(:1, :2) + """, + (1, "test_row"), + ) + + # suspend using server API should fail + with self.assertRaisesFullCode("DPY-3034"): + cursor.callproc("dbms_transaction.suspend_transaction") + + # suspend using client API should succeed + conn.suspend_sessionless_transaction() + + # wait till it times out + time.sleep(10) + + # attmpting to resume the transaction should fail + with self.assertRaisesFullCode("ORA-26218"): + conn.resume_sessionless_transaction( + transaction_id=transaction_id + ) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/ext/test_ext_2700_sessionless_transaction_async.py b/tests/ext/test_ext_2700_sessionless_transaction_async.py new file mode 100644 index 00000000..14927d58 --- /dev/null +++ b/tests/ext/test_ext_2700_sessionless_transaction_async.py @@ -0,0 +1,90 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +E2700 - Module for testing timing of sessionless transactions using asyncio. No +special setup is required but the tests here will only be run if the +run_long_tests value is enabled. +""" + +import asyncio + +import test_env + + +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_run_long_tests() +@test_env.skip_unless_sessionless_transactions_supported() +class TestCase(test_env.BaseAsyncTestCase): + async def test_ext_2700(self): + "E2700 - test error conditions with client API" + await self.cursor.execute("truncate table TestTempTable") + + transaction_id = "test_2600_transaction_id" + other_transaction_id = "test_2600_different_transaction_id" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + + # suspending a non-existent transaction will fail + with self.assertRaisesFullCode("DPY-3036"): + await conn.suspend_sessionless_transaction() + + # start first sessionless transaction + await conn.begin_sessionless_transaction( + transaction_id=transaction_id, timeout=5 + ) + + # starting another sessionless transaction will fail + with self.assertRaisesFullCode("DPY-3035"): + await conn.begin_sessionless_transaction( + transaction_id=other_transaction_id, timeout=5 + ) + + await cursor.execute( + """ + INSERT INTO TestTempTable(IntCol, StringCol1) + VALUES(:1, :2) + """, + (1, "test_row"), + ) + + # suspend using server API should fail + with self.assertRaisesFullCode("DPY-3034"): + await cursor.callproc("dbms_transaction.suspend_transaction") + + # suspend using client API should succeed + await conn.suspend_sessionless_transaction() + + # wait till it times out + await asyncio.sleep(10) + + # attmpting to resume the transaction should fail + with self.assertRaisesFullCode("ORA-26218"): + await conn.resume_sessionless_transaction( + transaction_id=transaction_id + ) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py new file mode 100644 index 00000000..8ea9703d --- /dev/null +++ b/tests/test_8700_sessionless_transaction.py @@ -0,0 +1,658 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# test_8700_sessionless_transaction.py +# +# Tests for sessionless transactions using both client API and server-side +# procedures with DBMS_TRANSACTION package. +# ----------------------------------------------------------------------------- + +import test_env + + +@test_env.skip_unless_sessionless_transactions_supported() +class TestCase(test_env.BaseTestCase): + + transaction_id_client = b"test_8700_client" + transaction_id_server = b"test_8700_server" + + def _get_server_start_stmt(self, mode): + "Generate server-side transaction start statement" + return f""" + DECLARE + transaction_id RAW(128); + BEGIN + transaction_id := DBMS_TRANSACTION.START_TRANSACTION( + :transaction_id, + DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, + :timeout, + DBMS_TRANSACTION.TRANSACTION_{mode} + ); + END;""" + + def test_8700(self): + "8700 - test sessionless transaction using client API" + self.cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + with test_env.get_connection() as conn: + + cursor = conn.cursor() + + # start sessionless transaction + conn.begin_sessionless_transaction( + transaction_id=self.transaction_id_client, + timeout=15, + defer_round_trip=True, + ) + + # insert data within transaction + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + + # suspend the sessionless transaction + conn.suspend_sessionless_transaction() + + # ensure data is not visible outside transaction + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), []) + + # resume the transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + + conn.resume_sessionless_transaction( + transaction_id=self.transaction_id_client, + timeout=5, + defer_round_trip=True, + ) + + # suspend using suspend_on_success flag with executemany + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [(3, "row3")], + suspend_on_success=True, + ) + + # ensure data is not visible as the transaction is suspended + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), []) + + # resume the transaction and commit the changes + conn.resume_sessionless_transaction( + transaction_id=self.transaction_id_client + ) + conn.commit() + + # verify data after commit + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(len(cursor.fetchall()), 3) + + def test_8701(self): + "8701 - test sessionless transaction using server-side procedures" + self.cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + self._get_server_start_stmt("NEW"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + + # insert data within transaction + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + + # Suspend on server + cursor.callproc("dbms_transaction.suspend_transaction") + + # verify data is not visible after suspend + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), []) + + # resume the transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + self._get_server_start_stmt("RESUME"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + conn.commit() + + # verify data after commit in original connection + self.cursor.execute("SELECT IntCol, StringCol1 FROM TestTempTable") + self.assertEqual(len(self.cursor.fetchall()), 2) + + def test_8702(self): + "8702 - test error conditions with server API sessionless transactions" + self.cursor.execute("truncate table TestTempTable") + + # start a transaction via the server; verify that suspension via the + # client fails but suspension via the server succeeds + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + self._get_server_start_stmt("NEW"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "server_row"), + ) + with self.assertRaisesFullCode("DPY-3034"): + conn.suspend_sessionless_transaction() + cursor.callproc("dbms_transaction.suspend_transaction") + + # resume on a second connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + self._get_server_start_stmt("RESUME"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "server_row2"), + ) + + # resuming on a different session should fail + with test_env.get_connection() as other_conn: + other_cursor = other_conn.cursor() + with self.assertRaisesFullCode("ORA-25351"): + other_cursor.execute( + self._get_server_start_stmt("RESUME"), + { + "transaction_id": self.transaction_id_server, + "timeout": 2, + }, + ) + + def test_8703(self): + "8703 - test rollback of sessionless transaction" + self.cursor.execute("truncate table TestTempTable") + + # start and work with sessionless transaction + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"rollback_test", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "rollback_row"), + suspend_on_success=True, + ) + + # resume in new connection and rollback + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction( + transaction_id=b"rollback_test", timeout=5 + ) + conn.rollback() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), []) + + def test_8704(self): + "8704 - test multiple operations within same sessionless transaction" + self.cursor.execute("truncate table TestTempTable") + + # start transaction and perform multiple operations + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "original"), + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "second"), + ) + cursor.execute( + """ + update TestTempTable set StringCol1 = :v1 where IntCol = 1 + """, + v1="updated", + ) + cursor.execute("delete from TestTempTable where IntCol = 2") + conn.suspend_sessionless_transaction() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), []) + + # resume and commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=5 + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + self.assertEqual(cursor.fetchall(), [(1, "updated")]) + + def test_8705(self): + "8705 - test concurrent sessionless transactions" + self.cursor.execute("truncate table TestTempTable") + + # start first sessionless transaction + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"concurrent_1", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "concurrent_1"), + suspend_on_success=True, + ) + + # start second sessionless transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"concurrent_2", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "concurrent_2"), + suspend_on_success=True, + ) + + # resume and commit both transactions + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(transaction_id=b"concurrent_1") + conn.commit() + conn.resume_sessionless_transaction(transaction_id=b"concurrent_2") + conn.commit() + + # verify data from both transactions is present + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] + self.assertEqual(cursor.fetchall(), expected_data) + + def test_8706(self): + "8706 - test sessionless transaction with large data" + self.cursor.execute("delete from TestAllTypes") + self.conn.commit() + + # start sessionless transaction and insert large data + large_string = "X" * 250_000 + with test_env.get_connection() as conn: + cursor = conn.cursor() + transaction_id = conn.begin_sessionless_transaction() + cursor.execute( + """ + insert into TestAllTypes (IntValue, ClobValue) + values (:1, :2) + """, + (1, large_string), + suspend_on_success=True, + ) + + # resume transaction and commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + conn.commit() + with test_env.DefaultsContextManager("fetch_lobs", False): + cursor.execute("select ClobValue from TestAllTypes") + (result,) = cursor.fetchone() + self.assertEqual(result, large_string) + + def test_8707(self): + "8707 - test sessionless transaction with multiple suspends/resumes" + self.cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [ + (1, "first_insert"), + (2, "second_insert"), + (3, "third_insert"), + ] + + # start sessionless transaction and suspend + transaction_id = self.conn.begin_sessionless_transaction() + self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and insert second row + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and insert third row, then commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[2], + ) + conn.commit() + + # verify all data is present + self.cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(self.cursor.fetchall(), data) + + def test_8708(self): + "8708 - Test sessionless transaction with invalid resume attempts" + self.cursor.execute("truncate table TestTempTable") + + # start a sessionless transaction + transaction_id = self.conn.begin_sessionless_transaction() + + # try to resume with the wrong transaction id + if test_env.get_is_thin(): + with self.assertRaisesFullCode("DPY-3035"): + self.conn.resume_sessionless_transaction("wrong_id") + + # try to resume before suspend + if test_env.get_is_thin(): + with self.assertRaisesFullCode("DPY-3035"): + self.conn.resume_sessionless_transaction(transaction_id) + + # suspend and resume correctly + self.conn.suspend_sessionless_transaction() + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(transaction_id) + + def test_8709(self): + "8709 - test getting transaction ID of active sessionless transaction" + transaction_id = self.conn.begin_sessionless_transaction() + self.cursor.execute("select dbms_transaction.get_transaction_id()") + (server_transaction_id,) = self.cursor.fetchone() + self.assertEqual(server_transaction_id, transaction_id.hex().upper()) + self.conn.commit() + + def test_8710(self): + "8710 - test auto-generated transaction ID uniqueness" + + # start first transaction + transaction_id_1 = self.conn.begin_sessionless_transaction() + self.conn.suspend_sessionless_transaction() + + # start second transaction + with test_env.get_connection() as conn: + transaction_id_2 = conn.begin_sessionless_transaction() + conn.suspend_sessionless_transaction() + self.assertNotEqual(transaction_id_1, transaction_id_2) + conn.resume_sessionless_transaction(transaction_id_2) + conn.rollback() + + # cleanup + self.conn.resume_sessionless_transaction(transaction_id_1) + self.conn.rollback() + + def test_8711(self): + "8711 - test sessionless transactions with connection pool" + self.cursor.execute("truncate table TestTempTable") + + # initialization + data = [(1, "value 1"), (2, "value 2")] + pool = test_env.get_pool(min=2, max=5) + + # start transaction on first connection + with pool.acquire() as conn: + cursor = conn.cursor() + transaction_id = conn.begin_sessionless_transaction() + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume on second connection + with pool.acquire() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + ) + conn.commit() + + # verify data + with pool.acquire() as conn: + cursor = conn.cursor() + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(cursor.fetchall(), data) + + pool.close() + + def test_8712(self): + "8712 - Test sessionless transaction with special transaction ids" + self.cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [(1, "long_transaction_id"), (2, "special_chars")] + + # test with long transaction id + long_transaction_id = b"X" * 64 + self.conn.begin_sessionless_transaction(long_transaction_id) + self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and commit in different connection + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(long_transaction_id) + conn.commit() + + # test with special characters in transaction id + special_transaction_id = b"SPECIAL@#$%^&*()_+" + self.conn.begin_sessionless_transaction(special_transaction_id) + self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and commit in different connection + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(special_transaction_id) + conn.commit() + + # verify both transactions committed + self.cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(self.cursor.fetchall(), data) + + def test_8713(self): + "8713 - duplicate transaction id across different connections" + transaction_id = "test_8713_transaction_id" + self.conn.begin_sessionless_transaction(transaction_id) + with test_env.get_connection() as conn: + with self.assertRaisesFullCode("ORA-26217"): + conn.begin_sessionless_transaction(transaction_id) + + def test_8714(self): + "8714 - zero timeout behaviour in resume" + transaction_id = self.conn.begin_sessionless_transaction() + with test_env.get_connection() as conn: + with self.assertRaisesFullCode("ORA-25351"): + conn.resume_sessionless_transaction(transaction_id, timeout=0) + + # suspend transaction on first session, and resume will now succeed + self.conn.suspend_sessionless_transaction() + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(transaction_id, timeout=0) + conn.rollback() + + def test_8715(self): + "8715 - transaction behaviour with DDL operations" + + # create temp table + temp_table_name = "temp_test_8715" + self.cursor.execute(f"drop table if exists {temp_table_name}") + self.cursor.execute( + f""" + create table {temp_table_name} ( + id number, + data varchar2(50) + )""" + ) + + # beging sessionless transaction and perform DDL which performs an + # implicit commit + self.conn.begin_sessionless_transaction() + self.cursor.execute( + f"alter table {temp_table_name} add temp_col varchar2(20)" + ) + + # further DML operations are part of a local transaction + local_data = (1, "LOCAL_TRANSACTION", "abc") + self.cursor.execute( + f"insert into {temp_table_name} values (:1, :2, :3)", + local_data, + ) + + # suspend will fail now as a local transaction is active and only + # sessionless transactions are suspendable + with self.assertRaisesFullCode("DPY-3036"): + self.cursor.execute( + f""" + insert into {temp_table_name} + values (2, 'LOCAL_TRANSACTION', 'def') + """, + suspend_on_success=True, + ) + + # verify data from local transaction is all that is present + self.cursor.execute(f"select * from {temp_table_name}") + self.assertEqual(self.cursor.fetchall(), [local_data]) + + # drop temp table + self.cursor.execute(f"drop table {temp_table_name} purge") + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_8800_sessionless_transaction_async.py b/tests/test_8800_sessionless_transaction_async.py new file mode 100644 index 00000000..2fa3a843 --- /dev/null +++ b/tests/test_8800_sessionless_transaction_async.py @@ -0,0 +1,683 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# test_8800_sessionless_transaction_async.py +# +# Tests for async sessionless transactions using both client API and +# server-side procedures with the DBMS_TRANSACTION package. +# ----------------------------------------------------------------------------- + +import test_env + + +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_sessionless_transactions_supported() +class TestCase(test_env.BaseAsyncTestCase): + + transaction_id_client = b"test_8800_client" + transaction_id_server = b"test_8800_server" + + def _get_server_start_stmt(self, mode): + "Generate server-side transaction start statement" + return f""" + DECLARE + transaction_id RAW(128); + BEGIN + transaction_id := DBMS_TRANSACTION.START_TRANSACTION( + :transaction_id, + DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, + :timeout, + DBMS_TRANSACTION.TRANSACTION_{mode} + ); + END;""" + + async def test_8800(self): + "8800 - test sessionless transaction using client API" + await self.cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + async with test_env.get_connection_async() as conn: + + cursor = conn.cursor() + + # start sessionless transaction + await conn.begin_sessionless_transaction( + transaction_id=self.transaction_id_client, + timeout=15, + defer_round_trip=True, + ) + + # insert data within transaction + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + + # suspend the sessionless transaction + await conn.suspend_sessionless_transaction() + + # ensure data is not visible outside transaction + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), []) + + # resume the transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + + await conn.resume_sessionless_transaction( + transaction_id=self.transaction_id_client, + timeout=5, + defer_round_trip=True, + ) + + # suspend using suspend_on_success flag with executemany + await cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [(3, "row3")], + suspend_on_success=True, + ) + + # ensure data is not visible as the transaction is suspended + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), []) + + # resume the transaction and commit the changes + await conn.resume_sessionless_transaction( + transaction_id=self.transaction_id_client + ) + await conn.commit() + + # verify data after commit + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(len(await cursor.fetchall()), 3) + + async def test_8801(self): + "8801 - test sessionless transaction using server-side procedures" + await self.cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + self._get_server_start_stmt("NEW"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + + # insert data within transaction + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + + # Suspend on server + await cursor.callproc("dbms_transaction.suspend_transaction") + + # verify data is not visible after suspend + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), []) + + # resume the transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + self._get_server_start_stmt("RESUME"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + await conn.commit() + + # verify data after commit in original connection + await self.cursor.execute( + "SELECT IntCol, StringCol1 FROM TestTempTable" + ) + self.assertEqual(len(await self.cursor.fetchall()), 2) + + async def test_8802(self): + "8802 - test error conditions with server API sessionless transactions" + await self.cursor.execute("truncate table TestTempTable") + + # start a transaction via the server; verify that suspension via the + # client fails but suspension via the server succeeds + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + self._get_server_start_stmt("NEW"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "server_row"), + ) + with self.assertRaisesFullCode("DPY-3034"): + await conn.suspend_sessionless_transaction() + await cursor.callproc("dbms_transaction.suspend_transaction") + + # resume on a second connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + self._get_server_start_stmt("RESUME"), + {"transaction_id": self.transaction_id_server, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "server_row2"), + ) + + # resuming on a different session should fail + async with test_env.get_connection_async() as other_conn: + other_cursor = other_conn.cursor() + with self.assertRaisesFullCode("ORA-25351"): + await other_cursor.execute( + self._get_server_start_stmt("RESUME"), + { + "transaction_id": self.transaction_id_server, + "timeout": 2, + }, + ) + + async def test_8803(self): + "8803 - test rollback of sessionless transaction" + await self.cursor.execute("truncate table TestTempTable") + + # start and work with sessionless transaction + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"rollback_test", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "rollback_row"), + suspend_on_success=True, + ) + + # resume in new connection and rollback + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction( + transaction_id=b"rollback_test", timeout=5 + ) + await conn.rollback() + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), []) + + async def test_8804(self): + "8804 - test multiple operations within same sessionless transaction" + await self.cursor.execute("truncate table TestTempTable") + + # start transaction and perform multiple operations + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "original"), + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "second"), + ) + await cursor.execute( + """ + update TestTempTable set StringCol1 = :v1 where IntCol = 1 + """, + v1="updated", + ) + await cursor.execute("delete from TestTempTable where IntCol = 2") + await conn.suspend_sessionless_transaction() + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), []) + + # resume and commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=5 + ) + await conn.commit() + await cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + self.assertEqual(await cursor.fetchall(), [(1, "updated")]) + + async def test_8805(self): + "8805 - test concurrent sessionless transactions" + await self.cursor.execute("truncate table TestTempTable") + + # start first sessionless transaction + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"concurrent_1", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "concurrent_1"), + suspend_on_success=True, + ) + + # start second sessionless transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"concurrent_2", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "concurrent_2"), + suspend_on_success=True, + ) + + # resume and commit both transactions + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction( + transaction_id=b"concurrent_1" + ) + await conn.commit() + await conn.resume_sessionless_transaction( + transaction_id=b"concurrent_2" + ) + await conn.commit() + + # verify data from both transactions is present + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] + self.assertEqual(await cursor.fetchall(), expected_data) + + async def test_8806(self): + "8806 - test sessionless transaction with large data" + await self.cursor.execute("delete from TestAllTypes") + await self.conn.commit() + + # start sessionless transaction and insert large data + large_string = "X" * 250_000 + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + transaction_id = await conn.begin_sessionless_transaction() + await cursor.execute( + """ + insert into TestAllTypes (IntValue, ClobValue) + values (:1, :2) + """, + (1, large_string), + suspend_on_success=True, + ) + + # resume transaction and commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await conn.commit() + with test_env.DefaultsContextManager("fetch_lobs", False): + await cursor.execute("select ClobValue from TestAllTypes") + (result,) = await cursor.fetchone() + self.assertEqual(result, large_string) + + async def test_8807(self): + "8807 - test sessionless transaction with multiple suspends/resumes" + await self.cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [ + (1, "first_insert"), + (2, "second_insert"), + (3, "third_insert"), + ] + + # start sessionless transaction and suspend + transaction_id = await self.conn.begin_sessionless_transaction() + await self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and insert second row + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and insert third row, then commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[2], + ) + await conn.commit() + + # verify all data is present + await self.cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(await self.cursor.fetchall(), data) + + async def test_8808(self): + "8808 - Test sessionless transaction with invalid resume attempts" + await self.cursor.execute("truncate table TestTempTable") + + # start a sessionless transaction + transaction_id = await self.conn.begin_sessionless_transaction() + + # try to resume with the wrong transaction id + with self.assertRaisesFullCode("DPY-3035"): + await self.conn.resume_sessionless_transaction("wrong_id") + + # try to resume before suspend + with self.assertRaisesFullCode("DPY-3035"): + await self.conn.resume_sessionless_transaction(transaction_id) + + # suspend and resume correctly + await self.conn.suspend_sessionless_transaction() + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(transaction_id) + + async def test_8809(self): + "8809 - test getting transaction ID of active sessionless transaction" + transaction_id = await self.conn.begin_sessionless_transaction() + await self.cursor.execute( + "select dbms_transaction.get_transaction_id()" + ) + (server_transaction_id,) = await self.cursor.fetchone() + self.assertEqual(server_transaction_id, transaction_id.hex().upper()) + await self.conn.commit() + + async def test_8810(self): + "8810 - test auto-generated transaction ID uniqueness" + + # start first transaction + transaction_id_1 = await self.conn.begin_sessionless_transaction() + await self.conn.suspend_sessionless_transaction() + + # start second transaction + async with test_env.get_connection_async() as conn: + transaction_id_2 = await conn.begin_sessionless_transaction() + await conn.suspend_sessionless_transaction() + self.assertNotEqual(transaction_id_1, transaction_id_2) + await conn.resume_sessionless_transaction(transaction_id_2) + await conn.rollback() + + # cleanup + await self.conn.resume_sessionless_transaction(transaction_id_1) + await self.conn.rollback() + + async def test_8811(self): + "8811 - test sessionless transactions with connection pool" + await self.cursor.execute("truncate table TestTempTable") + + # initialization + data = [(1, "value 1"), (2, "value 2")] + pool = test_env.get_pool_async(min=2, max=5) + + # start transaction on first connection + async with pool.acquire() as conn: + cursor = conn.cursor() + transaction_id = await conn.begin_sessionless_transaction() + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume on second connection + async with pool.acquire() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + ) + await conn.commit() + + # verify data + async with pool.acquire() as conn: + cursor = conn.cursor() + await cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(await cursor.fetchall(), data) + + await pool.close() + + async def test_8812(self): + "8812 - Test sessionless transaction with special transaction ids" + await self.cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [(1, "long_transaction_id"), (2, "special_chars")] + + # test with long transaction id + long_transaction_id = b"X" * 64 + await self.conn.begin_sessionless_transaction(long_transaction_id) + await self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and commit in different connection + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(long_transaction_id) + await conn.commit() + + # test with special characters in transaction id + special_transaction_id = b"SPECIAL@#$%^&*()_+" + await self.conn.begin_sessionless_transaction(special_transaction_id) + await self.cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and commit in different connection + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(special_transaction_id) + await conn.commit() + + # verify both transactions committed + await self.cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + self.assertEqual(await self.cursor.fetchall(), data) + + async def test_8813(self): + "8813 - duplicate transaction id across different connections" + transaction_id = "test_8813_transaction_id" + await self.conn.begin_sessionless_transaction(transaction_id) + async with test_env.get_connection_async() as conn: + with self.assertRaisesFullCode("ORA-26217"): + await conn.begin_sessionless_transaction(transaction_id) + + async def test_8814(self): + "8814 - zero timeout behaviour in resume" + transaction_id = await self.conn.begin_sessionless_transaction() + async with test_env.get_connection_async() as conn: + with self.assertRaisesFullCode("ORA-25351"): + await conn.resume_sessionless_transaction( + transaction_id, timeout=0 + ) + + # suspend transaction on first session, and resume will now succeed + await self.conn.suspend_sessionless_transaction() + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction( + transaction_id, timeout=0 + ) + await conn.rollback() + + async def test_8815(self): + "8815 - transaction behaviour with DDL operations" + + # create temp table + temp_table_name = "temp_test_8815" + await self.cursor.execute(f"drop table if exists {temp_table_name}") + await self.cursor.execute( + f""" + create table {temp_table_name} ( + id number, + data varchar2(50) + )""" + ) + + # beging sessionless transaction and perform DDL which performs an + # implicit commit + await self.conn.begin_sessionless_transaction() + await self.cursor.execute( + f"alter table {temp_table_name} add temp_col varchar2(20)" + ) + + # further DML operations are part of a local transaction + local_data = (1, "LOCAL_TRANSACTION", "abc") + await self.cursor.execute( + f"insert into {temp_table_name} values (:1, :2, :3)", + local_data, + ) + + # suspend will fail now as a local transaction is active and only + # sessionless transactions are suspendable + with self.assertRaisesFullCode("DPY-3036"): + await self.cursor.execute( + f""" + insert into {temp_table_name} + values (2, 'LOCAL_TRANSACTION', 'def') + """, + suspend_on_success=True, + ) + + # verify data from local transaction is all that is present + await self.cursor.execute(f"select * from {temp_table_name}") + self.assertEqual(await self.cursor.fetchall(), [local_data]) + + # drop temp table + await self.cursor.execute(f"drop table {temp_table_name} purge") + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_env.py b/tests/test_env.py index 9ccda834..e0ab89d5 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -550,6 +550,13 @@ def skip_unless_pool_timed_wait_supported(): return unittest.skipUnless(supported, "no pool timed wait support") +def skip_unless_sessionless_transactions_supported(): + return unittest.skipUnless( + has_client_version(23, 6) and has_server_version(23, 6), + "no sessionless transactions support", + ) + + def skip_unless_sparse_vectors_supported(): supported = has_client_version(23, 7) and has_server_version(23, 7) return unittest.skipUnless(supported, "no sparse vector support") diff --git a/utils/templates/connection.py b/utils/templates/connection.py index f4818c96..80deeb74 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -40,7 +40,7 @@ from . import __name__ as MODULE_NAME -from . import base_impl, constants, driver_mode, errors, thick_impl, thin_impl +from . import base_impl, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module from .aq import AsyncQueue, Queue, MessageProperties from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType @@ -51,6 +51,7 @@ from .pipeline import Pipeline from .soda import SodaDatabase from .subscr import Subscription +from .utils import normalize_sessionless_transaction_id # named tuple used for representing global transactions Xid = collections.namedtuple( @@ -119,6 +120,38 @@ def autocommit(self, value: bool) -> None: self._verify_connected() self._impl.autocommit = value + def begin_sessionless_transaction( + self, + transaction_id: Optional[Union[str, bytes]] = None, + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Begins a new sessionless transaction. + + Parameters: + transaction_id (str or bytes, optional): A Transaction Identifier. + If None, a random transaction_id will be generated. + timeout (int, optional): Timeout value in seconds. + Must be a positive integer. Defaults to 60 if not provided. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used for the transaction. + """ + self._verify_connected() + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not isinstance(timeout, int) or timeout <= 0: + raise TypeError("timeout must be a positive integer") + + self._impl.begin_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + @property def call_timeout(self) -> int: """ @@ -472,6 +505,44 @@ def outputtypehandler(self, value: Callable) -> None: self._verify_connected() self._impl.outputtypehandler = value + def resume_sessionless_transaction( + self, + transaction_id: Union[str, bytes], + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Resumes an existing sessionless transaction using the given + transaction_id. + + Parameters: + transaction_id (str or bytes): A Transaction Identifier that + uniquely identifies the sessionless transaction to be + resumed. This parameter is mandatory. + timeout (int, optional): Timeout in seconds for the resumed + transaction. Must be a positive integer. Defaults to 60. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used to resume the + sessionless transaction. + """ + self._verify_connected() + if transaction_id is None: + raise ValueError("transaction_id is required for resuming") + + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not (isinstance(timeout, int) and timeout >= 0): + raise TypeError("timeout must be a non-negative integer") + + self._impl.resume_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + @property def sdu(self) -> int: """ @@ -934,24 +1005,24 @@ def startup( def subscribe( self, - namespace: int = constants.SUBSCR_NAMESPACE_DBCHANGE, - protocol: int = constants.SUBSCR_PROTO_CALLBACK, + namespace: int = oracledb.SUBSCR_NAMESPACE_DBCHANGE, + protocol: int = oracledb.SUBSCR_PROTO_CALLBACK, callback: Optional[Callable] = None, timeout: int = 0, - operations: int = constants.OPCODE_ALLOPS, + operations: int = oracledb.OPCODE_ALLOPS, port: int = 0, - qos: int = constants.SUBSCR_QOS_DEFAULT, + qos: int = oracledb.SUBSCR_QOS_DEFAULT, ip_address: Optional[str] = None, - grouping_class: int = constants.SUBSCR_GROUPING_CLASS_NONE, + grouping_class: int = oracledb.SUBSCR_GROUPING_CLASS_NONE, grouping_value: int = 0, - grouping_type: int = constants.SUBSCR_GROUPING_TYPE_SUMMARY, + grouping_type: int = oracledb.SUBSCR_GROUPING_TYPE_SUMMARY, name: Optional[str] = None, client_initiated: bool = False, *, ipAddress: Optional[str] = None, - groupingClass: int = constants.SUBSCR_GROUPING_CLASS_NONE, + groupingClass: int = oracledb.SUBSCR_GROUPING_CLASS_NONE, groupingValue: int = 0, - groupingType: int = constants.SUBSCR_GROUPING_TYPE_SUMMARY, + groupingType: int = oracledb.SUBSCR_GROUPING_TYPE_SUMMARY, clientInitiated: bool = False, ) -> Subscription: """ @@ -1025,8 +1096,8 @@ def subscribe( new_name="ip_address", ) ip_address = ipAddress - if groupingClass != constants.SUBSCR_GROUPING_CLASS_NONE: - if grouping_class != constants.SUBSCR_GROUPING_CLASS_NONE: + if groupingClass != oracledb.SUBSCR_GROUPING_CLASS_NONE: + if grouping_class != oracledb.SUBSCR_GROUPING_CLASS_NONE: errors._raise_err( errors.ERR_DUPLICATED_PARAMETER, deprecated_name="groupingClass", @@ -1041,8 +1112,8 @@ def subscribe( new_name="grouping_value", ) grouping_value = groupingValue - if groupingType != constants.SUBSCR_GROUPING_TYPE_SUMMARY: - if grouping_type != constants.SUBSCR_GROUPING_TYPE_SUMMARY: + if groupingType != oracledb.SUBSCR_GROUPING_TYPE_SUMMARY: + if grouping_type != oracledb.SUBSCR_GROUPING_TYPE_SUMMARY: errors._raise_err( errors.ERR_DUPLICATED_PARAMETER, deprecated_name="groupingType", @@ -1077,6 +1148,19 @@ def subscribe( impl.subscribe(subscr, self._impl) return subscr + def suspend_sessionless_transaction(self) -> None: + """ + Suspends the currently active sessionless transaction. + + This temporarily detaches the transaction from the session, + allowing it to be resumed later using its transaction_id. + + Returns: + None + """ + self._verify_connected() + self._impl.suspend_sessionless_transaction() + @property def tag(self) -> str: """ @@ -1100,7 +1184,7 @@ def tag(self, value: str) -> None: self._impl.tag = value def tpc_begin( - self, xid: Xid, flags: int = constants.TPC_BEGIN_NEW, timeout: int = 0 + self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ Begins a TPC (two-phase commit) transaction with the given transaction @@ -1110,10 +1194,10 @@ def tpc_begin( self._verify_connected() self._verify_xid(xid) if flags not in ( - constants.TPC_BEGIN_NEW, - constants.TPC_BEGIN_JOIN, - constants.TPC_BEGIN_RESUME, - constants.TPC_BEGIN_PROMOTE, + oracledb.TPC_BEGIN_NEW, + oracledb.TPC_BEGIN_JOIN, + oracledb.TPC_BEGIN_RESUME, + oracledb.TPC_BEGIN_PROMOTE, ): errors._raise_err(errors.ERR_INVALID_TPC_BEGIN_FLAGS) self._impl.tpc_begin(xid, flags, timeout) @@ -1141,7 +1225,7 @@ def tpc_commit( self._impl.tpc_commit(xid, one_phase) def tpc_end( - self, xid: Optional[Xid] = None, flags: int = constants.TPC_END_NORMAL + self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ Ends (detaches from) a TPC (two-phase commit) transaction. @@ -1149,7 +1233,7 @@ def tpc_end( self._verify_connected() if xid is not None: self._verify_xid(xid) - if flags not in (constants.TPC_END_NORMAL, constants.TPC_END_SUSPEND): + if flags not in (oracledb.TPC_END_NORMAL, oracledb.TPC_END_SUSPEND): errors._raise_err(errors.ERR_INVALID_TPC_END_FLAGS) self._impl.tpc_end(xid, flags) @@ -1447,6 +1531,38 @@ def _verify_can_execute( errors._raise_err(errors.ERR_WRONG_EXECUTE_PARAMETERS_TYPE) return parameters + async def begin_sessionless_transaction( + self, + transaction_id: Optional[Union[str, bytes]] = None, + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Begins a new sessionless transaction. + + Parameters: + transaction_id (str or bytes, optional): A Transaction Identifier. + If None, a random transaction_id will be generated. + timeout (int, optional): Timeout value in seconds. + Must be a positive integer. Defaults to 60 if not provided. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used for the transaction. + """ + self._verify_connected() + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not isinstance(timeout, int) or timeout <= 0: + raise TypeError("timeout must be a positive integer") + + await self._impl.begin_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + async def callfunc( self, name: str, @@ -1668,6 +1784,44 @@ async def ping(self) -> None: self._verify_connected() await self._impl.ping() + async def resume_sessionless_transaction( + self, + transaction_id: Union[str, bytes], + timeout: int = 60, + defer_round_trip: bool = False, + ) -> bytes: + """ + Resumes an existing sessionless transaction using the given + transaction_id. + + Parameters: + transaction_id (str or bytes): A Transaction Identifier that + uniquely identifies the sessionless transaction to be + resumed. This parameter is mandatory. + timeout (int, optional): Timeout in seconds for the resumed + transaction. Must be a positive integer. Defaults to 60. + defer_round_trip (bool, optional): + If True, the request is not sent immediately but included + with the next database operation. + + Returns: + bytes: The normalized transaction_id used to resume the + sessionless transaction. + """ + self._verify_connected() + if transaction_id is None: + raise ValueError("transaction_id is required for resuming") + + normalized_txnid = normalize_sessionless_transaction_id(transaction_id) + + if not (isinstance(timeout, int) and timeout >= 0): + raise TypeError("timeout must be a non-negative integer") + + await self._impl.resume_sessionless_transaction( + normalized_txnid, timeout, defer_round_trip + ) + return normalized_txnid + async def rollback(self) -> None: """ Rolls back any pending transaction. @@ -1704,8 +1858,21 @@ async def run_pipeline( ) return results + async def suspend_sessionless_transaction(self) -> None: + """ + Suspends the currently active sessionless transaction. + + This temporarily detaches the transaction from the session, + allowing it to be resumed later using its transaction_id. + + Returns: + None + """ + self._verify_connected() + await self._impl.suspend_sessionless_transaction() + async def tpc_begin( - self, xid: Xid, flags: int = constants.TPC_BEGIN_NEW, timeout: int = 0 + self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ Begins a TPC (two-phase commit) transaction with the given transaction @@ -1715,10 +1882,10 @@ async def tpc_begin( self._verify_connected() self._verify_xid(xid) if flags not in ( - constants.TPC_BEGIN_NEW, - constants.TPC_BEGIN_JOIN, - constants.TPC_BEGIN_RESUME, - constants.TPC_BEGIN_PROMOTE, + oracledb.TPC_BEGIN_NEW, + oracledb.TPC_BEGIN_JOIN, + oracledb.TPC_BEGIN_RESUME, + oracledb.TPC_BEGIN_PROMOTE, ): errors._raise_err(errors.ERR_INVALID_TPC_BEGIN_FLAGS) await self._impl.tpc_begin(xid, flags, timeout) @@ -1746,7 +1913,7 @@ async def tpc_commit( await self._impl.tpc_commit(xid, one_phase) async def tpc_end( - self, xid: Optional[Xid] = None, flags: int = constants.TPC_END_NORMAL + self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ Ends (detaches from) a TPC (two-phase commit) transaction. @@ -1754,7 +1921,7 @@ async def tpc_end( self._verify_connected() if xid is not None: self._verify_xid(xid) - if flags not in (constants.TPC_END_NORMAL, constants.TPC_END_SUSPEND): + if flags not in (oracledb.TPC_END_NORMAL, oracledb.TPC_END_SUSPEND): errors._raise_err(errors.ERR_INVALID_TPC_END_FLAGS) await self._impl.tpc_end(xid, flags) From 0e779c37932a533a7a1c335dd1477b8beffdb357 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:53:11 -0600 Subject: [PATCH 146/239] Update container info. --- doc/src/user_guide/installation.rst | 32 +++++++++++++++++++++++++++++ samples/README.md | 5 +++-- samples/containers/README.md | 19 +++++++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) create mode 100644 samples/containers/README.md diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 9325d1e7..9cbead34 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -1030,6 +1030,38 @@ Python versions. python -m pip install oracledb-3.1.0-cp312-cp312-macosx_10_13_universal2.whl +.. _docker: + +Using python-oracledb Containers +================================ + +Dockerfiles showing installation of Python and python-oracledb on Oracle Linux +are available from +`github.com/oracle/docker-images/tree/main/OracleLinuxDevelopers +`__. + +Containers built from these Dockerfiles can be pulled from the GitHub Container +Registry: + +- `oraclelinux9-python `__ +- `oraclelinux8-python `__ + +For example, you can pull a container for Python 3.12 on Oracle Linux 9 using:: + + docker pull ghcr.io/oracle/oraclelinux9-python:3.12-oracledb + +Or use it in a Dockerfile like:: + + FROM ghcr.io/oracle/oraclelinux9-python:3.12-oracledb + +**Sample Containers** + +There are two python-oracledb containers with samples located in +`/samples/containers +`__. + .. _configprovidermodules: Installing Centralized Configuration Provider Modules for python-oracledb diff --git a/samples/README.md b/samples/README.md index c387bc5a..ef0499f1 100644 --- a/samples/README.md +++ b/samples/README.md @@ -28,8 +28,9 @@ This directory contains samples for python-oracledb. ### Examples in a Container -The [sample_container](./sample_container) directory has a Dockerfile that will -build a container with the samples and a running Oracle Database. +The [containers](./containers) directory has Dockerfiles for building a +container with the samples and a running Oracle Database, and for creating a +development environment. ### Notebooks diff --git a/samples/containers/README.md b/samples/containers/README.md new file mode 100644 index 00000000..deda40ae --- /dev/null +++ b/samples/containers/README.md @@ -0,0 +1,19 @@ +# python-oracledb Containers + +## Samples + +This directory contains Dockerfiles for creating sample containers: + +- [Samples and Oracle Database](./samples_and_db) - a container with a running database, python-oracledb, and all the samples +- [Application development](./app_dev) - a container with python-oracledb and the Apache web server with WSGI. This is useful for creating your own applications + +## Container images for Linux Developers + +Additional Dockerfiles showing installation of Python and python-oracle on +Oracle Linux are available from +[github.com/oracle/docker-images/tree/main/OracleLinuxDevelopers](https://github.com/oracle/docker-images/tree/main/OracleLinuxDevelopers). + +Containers can be pulled from the GitHub Container Registry: + +- [oraclelinux9-python](https://github.com/oracle/docker-images/pkgs/container/oraclelinux9-python) +- [oraclelinux8-python](https://github.com/oracle/docker-images/pkgs/container/oraclelinux8-python) From e1fc085c3496fb9fede8471b63de267041a9bb22 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:54:01 -0600 Subject: [PATCH 147/239] Simplify code by making use of new function available in nanoarrow 0.7.0. --- src/oracledb/impl/arrow/array.pyx | 2 +- src/oracledb/impl/arrow/utils.pyx | 2 +- src/oracledb/impl/base/converters.pyx | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 6fd3f385..149cbb5d 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -382,7 +382,7 @@ cdef class ArrowArrayImpl: ArrowDecimalSetBytes(&decimal, ptr + index * 16) ArrowBufferInit(&buf) try: - _check_nanoarrow(ArrowDecimalAppendDigitsToBuffer( + _check_nanoarrow(ArrowDecimalAppendStringToBuffer( &decimal, &buf )) return buf.data[:buf.size_bytes] diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index 1d5857b6..6f55da86 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -118,7 +118,7 @@ cdef extern from "nanoarrow.c": void *private_data) void ArrowBufferInit(ArrowBuffer* buffer) void ArrowBufferReset(ArrowBuffer* buffer) - ArrowErrorCode ArrowDecimalAppendDigitsToBuffer(const ArrowDecimal* decimal, + ArrowErrorCode ArrowDecimalAppendStringToBuffer(const ArrowDecimal* decimal, ArrowBuffer* buffer) void ArrowDecimalInit(ArrowDecimal* decimal, int32_t bitwidth, int32_t precision, int32_t scale) diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index fdd62f76..5f2b3a04 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -83,8 +83,6 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, elif arrow_type == NANOARROW_TYPE_DECIMAL128: temp_bytes = arrow_array.get_decimal(array_index, &data.is_null) if not data.is_null: - temp_bytes = temp_bytes[:-arrow_array.scale] + b"." + \ - temp_bytes[-arrow_array.scale:] convert_bytes_to_oracle_data(&data.buffer, temp_bytes) return temp_bytes elif arrow_type in (NANOARROW_TYPE_LIST, NANOARROW_TYPE_FIXED_SIZE_LIST): From 767be685b20a029323d97fa8b47d208ec3b08b7a Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:54:33 -0600 Subject: [PATCH 148/239] Simplify code in test suite. --- tests/test_8000_dataframe.py | 35 +++++++++++++++++++----------- tests/test_8100_dataframe_async.py | 32 +++++++++++++++++---------- 2 files changed, 43 insertions(+), 24 deletions(-) diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 7ecd57a7..6f75deb3 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -235,6 +235,21 @@ ), ] +QUERY_SQL = """ + select + Id, + FirstName, + LastName, + City, + Country, + DateOfBirth, + Salary, + CreditScore, + LastUpdated + from TestDataFrame + order by id +""" + class TestCase(test_env.BaseTestCase): @@ -341,7 +356,7 @@ def __populate_table(self, data): """ Populate the test table with the given data. """ - self.cursor.execute("truncate table TestDataframe") + self.cursor.execute("delete from TestDataframe") types = [None] * len(data[0]) types[8] = oracledb.DB_TYPE_TIMESTAMP self.cursor.setinputsizes(*types) @@ -366,8 +381,7 @@ def __test_df_interop(self, data): """ self.__check_interop() self.__populate_table(data) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) + ora_df = self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, data) def __test_df_batches_interop(self, data, batch_size, num_batches): @@ -377,8 +391,7 @@ def __test_df_batches_interop(self, data, batch_size, num_batches): """ self.__check_interop() self.__populate_table(data) - statement = "select * from TestDataFrame order by Id" - batches = list(self.conn.fetch_df_batches(statement, size=batch_size)) + batches = list(self.conn.fetch_df_batches(QUERY_SQL, size=batch_size)) self.assertEqual(len(batches), num_batches) if num_batches == 1: self.__validate_df(batches[0], data) @@ -402,8 +415,7 @@ def __validate_df(self, ora_df, data): def test_8000(self): "8000 - test basic fetch of data frame" self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) + ora_df = self.conn.fetch_df_all(QUERY_SQL) self.assertEqual(ora_df.num_rows(), len(DATASET_1)) self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) @@ -452,8 +464,7 @@ def test_8010(self): "8010 - verify passing Arrow arrays twice works" self.__check_interop() self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) + ora_df = self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, DATASET_1) self.__validate_df(ora_df, DATASET_1) @@ -474,8 +485,7 @@ def test_8012(self): def test_8013(self): "8013 - negative checks on attributes" self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) + ora_df = self.conn.fetch_df_all(QUERY_SQL) with self.assertRaises(IndexError): ora_df.get_column(121) with self.assertRaises(IndexError): @@ -499,8 +509,7 @@ def test_8016(self): "8016 - verify get_column() returns the correct value" self.__check_interop() self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) + ora_df = self.conn.fetch_df_all(QUERY_SQL) array = pyarrow.array(ora_df.get_column(1)) self.assertEqual(array.to_pylist(), ["John", "Big"]) diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 9679e83e..bc3070fd 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -235,6 +235,21 @@ ), ] +QUERY_SQL = """ + select + Id, + FirstName, + LastName, + City, + Country, + DateOfBirth, + Salary, + CreditScore, + LastUpdated + from TestDataFrame + order by id +""" + @test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): @@ -342,7 +357,7 @@ async def __populate_table(self, data): """ Populate the test table with the given data. """ - await self.cursor.execute("truncate table TestDataframe") + await self.cursor.execute("delete from TestDataframe") types = [None] * len(data[0]) types[8] = oracledb.DB_TYPE_TIMESTAMP self.cursor.setinputsizes(*types) @@ -367,8 +382,7 @@ async def __test_df_interop(self, data): """ self.__check_interop() await self.__populate_table(data) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, data) async def __test_df_batches_interop(self, data, batch_size, num_batches): @@ -378,11 +392,10 @@ async def __test_df_batches_interop(self, data, batch_size, num_batches): """ self.__check_interop() await self.__populate_table(data) - statement = "select * from TestDataFrame order by Id" batches = [ df async for df in self.conn.fetch_df_batches( - statement, size=batch_size + QUERY_SQL, size=batch_size ) ] self.assertEqual(len(batches), num_batches) @@ -408,8 +421,7 @@ def __validate_df(self, ora_df, data): async def test_8100(self): "8100 - test basic fetch of data frame" await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) self.assertEqual(ora_df.num_rows(), len(DATASET_1)) self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) @@ -460,8 +472,7 @@ async def test_8110(self): "8110 - verify passing Arrow arrays twice works" self.__check_interop() await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, DATASET_1) self.__validate_df(ora_df, DATASET_1) @@ -482,8 +493,7 @@ async def test_8112(self): async def test_8113(self): "8113 - negative checks on attributes" await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame order by Id" - ora_df = await self.conn.fetch_df_all(statement) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) with self.assertRaises(IndexError): ora_df.get_column(121) with self.assertRaises(IndexError): From f402975e23520d226716b1975b93206f688d484d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:54:57 -0600 Subject: [PATCH 149/239] Simplified code and fixed bug when fetching numeric data that has no decimal point but the Arrow array has scale > 0. --- doc/src/release_notes.rst | 2 + src/oracledb/impl/base/converters.pyx | 76 +++++++++++++-------------- 2 files changed, 39 insertions(+), 39 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 613c5356..51b7618e 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -60,6 +60,8 @@ Common Changes by the consumer. This avoids a segfault seen in some circumstances. - Fixed bug when deciding Arrow datatype for numeric expressions (`issue 510 `__) + - Fixed bug when fetching numeric data that has no decimal point but the + Arrow array has scale > 0 Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 5f2b3a04..c277c67a 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -159,50 +159,48 @@ cdef int convert_number_to_arrow_decimal(ArrowArrayImpl arrow_array, Converts a NUMBER value stored in the buffer to Arrow DECIMAL128. """ cdef: - char_type c - bint has_sign = 0 - char_type digits[39] # 38 digits + sign OracleNumber *value = &buffer.as_number - uint8_t num_chars = 0, decimal_point_index = 0, allowed_max_chars = 0 - int64_t actual_scale = 0 + uint8_t num_digits, allowed_max_chars + char_type digits[40] + uint8_t actual_scale + + # determine if the number can be represented as an Arrow decimal128 value + # only 38 decimal digits are permitted (excluding the sign and decimal + # point) + allowed_max_chars = 38 + if value.chars[0] == b'-': + allowed_max_chars += 1 + if not value.is_integer: + allowed_max_chars += 1 + if value.is_max_negative_value or value.num_chars > allowed_max_chars: + raise ValueError("Value cannot be represented as Arrow Decimal128") - if value.chars[0] == 45: # minus sign - has_sign = True + # integers can be handled directly + if value.is_integer and arrow_array.scale == 0: + return arrow_array.append_decimal(value.chars, value.num_chars) + # Arrow expects a string of digits without the decimal point; if the number + # does not contain at least the number of digits after the decimal point + # required by the scale of the Arrow array, zeros are appended if value.is_integer: - if has_sign: - allowed_max_chars = 39 - else: - allowed_max_chars = 38 - else: # decimal point - if has_sign: - allowed_max_chars = 40 - else: - allowed_max_chars = 39 - - # Arrow Decimal128 can only represent values with 38 decimal digits - if value.is_max_negative_value or value.num_chars > allowed_max_chars: - raise ValueError("Value cannot be represented as " - "Arrow Decimal128") - if value.is_integer: - arrow_array.append_decimal(value.chars, value.num_chars) + actual_scale = 0 + num_digits = value.num_chars else: - for i in range(value.num_chars): - c = value.chars[i] - # count all characters except the decimal point - if c != 46: - digits[num_chars] = c - num_chars += 1 - else: - decimal_point_index = i - - # Append any trailing zeros. - actual_scale = num_chars - decimal_point_index - for i in range(abs(arrow_array.scale) - actual_scale): - digits[num_chars] = b'0' - num_chars += 1 - arrow_array.append_decimal(digits, num_chars) - + actual_scale = 0 + while True: + num_digits = value.num_chars - actual_scale - 1 + if value.chars[num_digits] == b'.': + break + actual_scale += 1 + memcpy(digits, value.chars, num_digits) + if actual_scale > 0: + memcpy(&digits[num_digits], &value.chars[num_digits + 1], actual_scale) + num_digits += actual_scale + while actual_scale < arrow_array.scale: + digits[num_digits] = b'0' + num_digits += 1 + actual_scale += 1 + arrow_array.append_decimal(digits, num_digits) cdef int convert_number_to_arrow_double(ArrowArrayImpl arrow_array, From a6d99ca889959594fbdf59eeeccbf30cda617c87 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:55:27 -0600 Subject: [PATCH 150/239] Fixed bug when fetching dates that are in the year 2038 or later. --- doc/src/release_notes.rst | 1 + src/oracledb/impl/base/converters.pyx | 8 +++++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 51b7618e..38b5fc14 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -62,6 +62,7 @@ Common Changes (`issue 510 `__) - Fixed bug when fetching numeric data that has no decimal point but the Arrow array has scale > 0 + - Fixed bug when fetching dates that are in the year 2038 or later Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index c277c67a..1a9cdeee 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -37,10 +37,9 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, Converts the value stored in Arrow format to an OracleData structure. """ cdef: + int64_t int64_value, days, seconds, useconds SparseVectorImpl sparse_impl - int seconds, useconds ArrowType arrow_type - int64_t int64_value OracleRawBytes* rb tuple sparse_info bytes temp_bytes @@ -75,11 +74,14 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, if not data.is_null: seconds = int64_value // arrow_array.time_factor useconds = int64_value % arrow_array.time_factor + days = seconds // (24 * 60 * 60) + seconds = seconds % (24 * 60 * 60) if arrow_array.time_factor == 1_000: useconds *= 1_000 elif arrow_array.time_factor == 1_000_000_000: useconds //= 1_000 - return EPOCH_DATE + cydatetime.timedelta_new(0, seconds, useconds) + return EPOCH_DATE + \ + cydatetime.timedelta_new(days, seconds, useconds) elif arrow_type == NANOARROW_TYPE_DECIMAL128: temp_bytes = arrow_array.get_decimal(array_index, &data.is_null) if not data.is_null: From d11d1faf20f6b6fa11838f7de75ca2224aea1143 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:56:39 -0600 Subject: [PATCH 151/239] Added support for ingestion of vectors in thick mode. --- src/oracledb/impl/thick/var.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 39823a3b..16d09fc3 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -412,6 +412,9 @@ cdef class ThickVarImpl(BaseVarImpl): timestamp.second = cydatetime.PyDateTime_DATE_GET_SECOND(value) timestamp.fsecond = \ cydatetime.PyDateTime_DATE_GET_MICROSECOND(value) * 1000 + elif ora_type_num == DPI_ORACLE_TYPE_VECTOR: + _convert_from_python(value, self.metadata, &data.value, + None) cdef int _transform_element_to_arrow(self, uint32_t pos): """ From aed092f57cb50cdc43ab9b5b8653f1c49db6d75b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:57:11 -0600 Subject: [PATCH 152/239] Added tests for ingestion of data frames. --- tests/sql/create_schema.sql | 8 +- tests/test_8900_dataframe_ingestion.py | 871 ++++++++++++++++++ tests/test_9000_dataframe_ingestion_async.py | 872 +++++++++++++++++++ 3 files changed, 1750 insertions(+), 1 deletion(-) create mode 100644 tests/test_8900_dataframe_ingestion.py create mode 100644 tests/test_9000_dataframe_ingestion_async.py diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index dd90ab74..fe6d1ce0 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -388,7 +388,13 @@ create table &main_user..TestDataframe ( DateOfBirth date, Salary number(9, 2), CreditScore number(3, 0), - LastUpdated timestamp + LastUpdated timestamp, + DecimalData number(15, 4), + FloatData binary_float, + DoubleData binary_double, + RawData raw(100), + LongData clob, + LongRawData blob ) / diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py new file mode 100644 index 00000000..1ad9a52e --- /dev/null +++ b/tests/test_8900_dataframe_ingestion.py @@ -0,0 +1,871 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing DataFrame ingestion +""" + +import datetime +import decimal +import unittest + +try: + import pyarrow + + HAS_INTEROP = True +except ImportError: + HAS_INTEROP = False + +import test_env + +SPARSE_VECTOR_FIELDS_FLOAT32 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.float32())), +] + +SPARSE_VECTOR_FIELDS_FLOAT64 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.float64())), +] + +SPARSE_VECTOR_FIELDS_INT8 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.int8())), +] + + +@unittest.skipUnless(HAS_INTEROP, "missing pyarrow module") +class TestCase(test_env.BaseTestCase): + + def test_8900(self): + "8900 - test basic ingestion of data frame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + datetime.datetime(2021, 2, 2), + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8901(self): + "8901 - test ingestion with null values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", None, "Bob"], pyarrow.string()), + pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + None, + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8902(self): + "8902 - test ingestion with single column" + arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] + names = ["Id"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + "insert into TestDataFrame (Id) values (:1)", df + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select Id as "Id" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8903(self): + "8903 - test ingestion with large data types" + long_str = "X" * 32_768 + long_raw = b"Y" * 32_768 + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([long_str], pyarrow.large_string()), + pyarrow.array([long_raw], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8904(self): + "8904 - test ingestion with decimal values" + arrays = [ + pyarrow.array( + [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + pyarrow.decimal128(9, 0), + ), + pyarrow.array( + [ + decimal.Decimal("1234567890.1234"), + decimal.Decimal("-9876543210.9876"), + decimal.Decimal("0.0001"), + ], + pyarrow.decimal128(15, 4), + ), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame + (Id, DecimalData) + values (:1, :2) + """, + df, + ) + self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_native_boolean_supported() + def test_8905(self): + "8905 - test ingestion with boolean values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array([True, False, True], pyarrow.bool_()), + pyarrow.array([False, True, None], pyarrow.bool_()), + ] + names = ["IntCol", "BooleanCol1", "BooleanCol2"] + df = pyarrow.table(arrays, names) + self.cursor.execute("truncate table TestBooleans") + self.cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + IntCol as "IntCol", + BooleanCol1 as "BooleanCol1", + BooleanCol2 as "BooleanCol2" + from TestBooleans + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8906(self): + "8906 - test ingestion with timestamp values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1, 0, 0, 0), + datetime.datetime(2021, 2, 2, 12, 34, 56), + datetime.datetime(2022, 3, 3, 23, 59, 59), + ], + pyarrow.timestamp("us"), + ), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame + (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8907(self): + "8907 - test ingestion with mismatched column count" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + ] + names = ["ID", "NAME"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): + self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + + def test_8908(self): + "8908 - test ingestion with invalid data type" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [["a", "b"], ["c"], ["d", "e", "f"]], + pyarrow.list_(pyarrow.string()), + ), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-3033"): + self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + + def test_8909(self): + "8909 - test execute() with DataFrame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-2003"): + self.cursor.execute( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + + def test_8910(self): + "8910 - test consecutive executemany() calls with same dataframe" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + for i in range(3): + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8911(self): + "8911 - test nulls/None for all datatypes" + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([None], pyarrow.float32()), + pyarrow.array([None], pyarrow.float64()), + pyarrow.array([None], pyarrow.string()), + pyarrow.array([None], pyarrow.timestamp("s")), + pyarrow.array([None], pyarrow.binary()), + ] + names = [ + "Id", + "FloatData", + "DoubleData", + "FirstName", + "DateOfBirth", + "RawData", + ] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + bind_names = ",".join(names) + bind_values = ",".join(f":{i + 1}" for i in range(len(names))) + self.cursor.executemany( + f""" + insert into TestDataFrame ({bind_names}) + values ({bind_values}) + """, + df, + ) + self.conn.commit() + query_values = ",".join(f'{name} as "{name}"' for name in names) + odf = self.conn.fetch_df_all( + f""" + select {query_values} + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8912(self): + "8912 - test LOB sizes around 32K boundary using DataFrame ingestion" + test_sizes = [32766, 32767, 32768, 32769, 32770] + arrays = [ + pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), + pyarrow.array( + ["X" * s for s in test_sizes], pyarrow.large_string() + ), + pyarrow.array( + [b"Y" * s for s in test_sizes], pyarrow.large_binary() + ), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8913(self): + "8913 - test ingestion with mixed characters using DataFrame" + if test_env.get_charset() != "AL32UTF8": + self.skipTest("Database character set must be AL32UTF8") + + test_data = [ + "ASCII: Hello World", # Pure ASCII + "Latin: café España", # Latin-1 Supplement + "Cyrillic: русский текст", # Actual Cyrillic + "Chinese: 中文测试", # Actual Chinese + "Emoji: 👍😊❤️", # Emojis + "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols + "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters + ] + arrays = [ + pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), + pyarrow.array(test_data, pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8914(self): + "8914 - test various numeric values" + test_data = [ + decimal.Decimal(0), + decimal.Decimal(1), + decimal.Decimal(-1), + decimal.Decimal("99999999999.9999"), + decimal.Decimal("-99999999999.9999"), + decimal.Decimal("10000000000.0001"), + decimal.Decimal("-10000000000.0001"), + decimal.Decimal(".0001"), + decimal.Decimal("-.0001"), + decimal.Decimal(".9"), + decimal.Decimal("-.9"), + decimal.Decimal(".09"), + decimal.Decimal("-.09"), + decimal.Decimal(".009"), + decimal.Decimal("-.009"), + ] + ids = [decimal.Decimal(i) for i in range(len(test_data))] + arrays = [ + pyarrow.array(ids, pyarrow.decimal128(9, 0)), + pyarrow.array(test_data, pyarrow.decimal128(15, 4)), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, DecimalData) + values (:1, :2) + """, + df, + ) + self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8915(self): + "8915 - test various timestamp values" + test_data = [ + datetime.datetime(2056, 2, 29), + datetime.datetime(2020, 2, 29), + datetime.datetime(1900, 1, 1), + datetime.datetime(2000, 1, 1), + datetime.datetime(1970, 1, 1), + datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), + datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), + datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), + ] + ids = list(range(len(test_data))) + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(test_data, pyarrow.timestamp("us")), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + def test_8916(self): + "8916 - test insertion with large data volumes" + num_rows = 10_000 + ids = list(range(1, num_rows + 1)) + names = [f"Employee-{i}" for i in ids] + salaries = [i * 100.25 for i in ids] + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(names, pyarrow.string()), + pyarrow.array(salaries, pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + self.conn.commit() + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_sparse_vectors_supported() + def test_8917(self): + "8917 - test ingestion of sparse vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), + dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), + ), + pyarrow.array( + [ + dict( + num_dimensions=16, indices=[1, 3], values=[1.1, -1.1] + ), + None, + dict( + num_dimensions=16, indices=[5, 10], values=[2.2, -2.2] + ), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), + ), + pyarrow.array( + [ + dict( + num_dimensions=16, indices=[1, 3], values=[1.25, -1.25] + ), + dict( + num_dimensions=16, indices=[5, 10], values=[2.5, -2.5] + ), + None, + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), + ), + ] + names = [ + "IntCol", + "SparseVector8Col", + "SparseVector32Col", + "SparseVector64Col", + ] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestSparseVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + self.cursor.executemany( + f""" + insert into TestSparseVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + self.conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = self.conn.fetch_df_all( + f""" + select {query_names} + from TestSparseVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_vectors_supported() + def test_8918(self): + "8918 - test ingestion of dense vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + [ + -127, + -100, + -5, + -1, + 0, + 0, + 0, + 0, + 1, + 5, + 7, + 25, + 13, + 0, + 10, + 127, + ], + [ + -25, + 25, + -15, + 15, + -5, + 5, + 0, + 0, + -127, + 127, + -25, + 25, + -105, + 105, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.int8()), + ), + pyarrow.array( + [ + None, + [ + -12.5, + -578.625, + -100.25, + -87.5, + 0, + 25, + 0, + 0, + 1, + 1.25, + 1.75, + 2.5, + 1.75, + 0, + 5889.125, + 6500.375, + ], + [ + -25.5, + 25.5, + -15.25, + 15.25, + -5.3, + 5.3, + 0, + 0, + -127.8, + 127.8, + -15.222, + 15.222, + -105.333, + 105.333, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.float32()), + ), + pyarrow.array( + [ + None, + [ + -22.5, + -278.625, + -200.25, + -77.5, + 0, + 35, + 0, + 0, + 1, + 8.25, + 9.75, + 3.5, + 4.75, + 0, + 6889.125, + 7500.375, + ], + [ + -35.5, + 35.5, + -25.25, + 25.25, + -8.3, + 8.3, + 0, + 0, + -227.8, + 227.8, + -215.222, + 415.222, + -505.333, + 605.333, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.float64()), + ), + ] + names = [ + "IntCol", + "Vector8Col", + "Vector32Col", + "Vector64Col", + ] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + self.cursor.executemany( + f""" + insert into TestVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + self.conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = self.conn.fetch_df_all( + f""" + select {query_names} + from TestVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py new file mode 100644 index 00000000..4db88240 --- /dev/null +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -0,0 +1,872 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing DataFrame ingestion with asyncio +""" + +import datetime +import decimal +import unittest + +try: + import pyarrow + + HAS_INTEROP = True +except ImportError: + HAS_INTEROP = False + +import test_env + +SPARSE_VECTOR_FIELDS_FLOAT32 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.float32())), +] + +SPARSE_VECTOR_FIELDS_FLOAT64 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.float64())), +] + +SPARSE_VECTOR_FIELDS_INT8 = [ + ("num_dimensions", pyarrow.int64()), + ("indices", pyarrow.list_(pyarrow.uint32())), + ("values", pyarrow.list_(pyarrow.int8())), +] + + +@test_env.skip_unless_thin_mode() +@unittest.skipUnless(HAS_INTEROP, "missing pyarrow module") +class TestCase(test_env.BaseAsyncTestCase): + + async def test_9000(self): + "9000 - test basic ingestion of data frame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + datetime.datetime(2021, 2, 2), + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9001(self): + "9001 - test ingestion with null values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", None, "Bob"], pyarrow.string()), + pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + None, + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9002(self): + "9002 - test ingestion with single column" + arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] + names = ["Id"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + "insert into TestDataFrame (Id) values (:1)", df + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select Id as "Id" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9003(self): + "9003 - test ingestion with large data types" + long_str = "X" * 32_768 + long_raw = b"Y" * 32_768 + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([long_str], pyarrow.large_string()), + pyarrow.array([long_raw], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9004(self): + "9004 - test ingestion with decimal values" + arrays = [ + pyarrow.array( + [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + pyarrow.decimal128(9, 0), + ), + pyarrow.array( + [ + decimal.Decimal("1234567890.1234"), + decimal.Decimal("-9876543210.9876"), + decimal.Decimal("0.0001"), + ], + pyarrow.decimal128(15, 4), + ), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame + (Id, DecimalData) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_native_boolean_supported() + async def test_9005(self): + "9005 - test ingestion with boolean values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array([True, False, True], pyarrow.bool_()), + pyarrow.array([False, True, None], pyarrow.bool_()), + ] + names = ["IntCol", "BooleanCol1", "BooleanCol2"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("truncate table TestBooleans") + await self.cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + IntCol as "IntCol", + BooleanCol1 as "BooleanCol1", + BooleanCol2 as "BooleanCol2" + from TestBooleans + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9006(self): + "9006 - test ingestion with timestamp values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1, 0, 0, 0), + datetime.datetime(2021, 2, 2, 12, 34, 56), + datetime.datetime(2022, 3, 3, 23, 59, 59), + ], + pyarrow.timestamp("us"), + ), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame + (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9007(self): + "9007 - test ingestion with mismatched column count" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + ] + names = ["ID", "NAME"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + + async def test_9008(self): + "9008 - test ingestion with invalid data type" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [["a", "b"], ["c"], ["d", "e", "f"]], + pyarrow.list_(pyarrow.string()), + ), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-3033"): + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + + async def test_9009(self): + "9009 - test execute() with DataFrame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with self.assertRaisesFullCode("DPY-2003"): + await self.cursor.execute( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + + async def test_9010(self): + "9010 - test consecutive executemany() calls with same dataframe" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + for i in range(3): + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9011(self): + "9011 - test nulls/None for all datatypes" + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([None], pyarrow.float32()), + pyarrow.array([None], pyarrow.float64()), + pyarrow.array([None], pyarrow.string()), + pyarrow.array([None], pyarrow.timestamp("s")), + pyarrow.array([None], pyarrow.binary()), + ] + names = [ + "Id", + "FloatData", + "DoubleData", + "FirstName", + "DateOfBirth", + "RawData", + ] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + bind_names = ",".join(names) + bind_values = ",".join(f":{i + 1}" for i in range(len(names))) + await self.cursor.executemany( + f""" + insert into TestDataFrame ({bind_names}) + values ({bind_values}) + """, + df, + ) + await self.conn.commit() + query_values = ",".join(f'{name} as "{name}"' for name in names) + odf = await self.conn.fetch_df_all( + f""" + select {query_values} + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9012(self): + "9012 - test LOB sizes around 32K boundary using DataFrame ingestion" + test_sizes = [32766, 32767, 32768, 32769, 32770] + arrays = [ + pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), + pyarrow.array( + ["X" * s for s in test_sizes], pyarrow.large_string() + ), + pyarrow.array( + [b"Y" * s for s in test_sizes], pyarrow.large_binary() + ), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9013(self): + "9013 - test ingestion with mixed characters using DataFrame" + if test_env.get_charset() != "AL32UTF8": + self.skipTest("Database character set must be AL32UTF8") + + test_data = [ + "ASCII: Hello World", # Pure ASCII + "Latin: café España", # Latin-1 Supplement + "Cyrillic: русский текст", # Actual Cyrillic + "Chinese: 中文测试", # Actual Chinese + "Emoji: 👍😊❤️", # Emojis + "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols + "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters + ] + arrays = [ + pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), + pyarrow.array(test_data, pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9014(self): + "9014 - test various numeric values" + test_data = [ + decimal.Decimal(0), + decimal.Decimal(1), + decimal.Decimal(-1), + decimal.Decimal("99999999999.9999"), + decimal.Decimal("-99999999999.9999"), + decimal.Decimal("10000000000.0001"), + decimal.Decimal("-10000000000.0001"), + decimal.Decimal(".0001"), + decimal.Decimal("-.0001"), + decimal.Decimal(".9"), + decimal.Decimal("-.9"), + decimal.Decimal(".09"), + decimal.Decimal("-.09"), + decimal.Decimal(".009"), + decimal.Decimal("-.009"), + ] + ids = [decimal.Decimal(i) for i in range(len(test_data))] + arrays = [ + pyarrow.array(ids, pyarrow.decimal128(9, 0)), + pyarrow.array(test_data, pyarrow.decimal128(15, 4)), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, DecimalData) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9015(self): + "9015 - test various timestamp values" + test_data = [ + datetime.datetime(2056, 2, 29), + datetime.datetime(2020, 2, 29), + datetime.datetime(1900, 1, 1), + datetime.datetime(2000, 1, 1), + datetime.datetime(1970, 1, 1), + datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), + datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), + datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), + ] + ids = list(range(len(test_data))) + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(test_data, pyarrow.timestamp("us")), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + async def test_9016(self): + "9016 - test insertion with large data volumes" + num_rows = 10_000 + ids = list(range(1, num_rows + 1)) + names = [f"Employee-{i}" for i in ids] + salaries = [i * 100.25 for i in ids] + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(names, pyarrow.string()), + pyarrow.array(salaries, pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + await self.conn.commit() + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_sparse_vectors_supported() + async def test_9017(self): + "9017 - test ingestion of sparse vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), + dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), + ), + pyarrow.array( + [ + dict( + num_dimensions=16, indices=[1, 3], values=[1.1, -1.1] + ), + None, + dict( + num_dimensions=16, indices=[5, 10], values=[2.2, -2.2] + ), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), + ), + pyarrow.array( + [ + dict( + num_dimensions=16, indices=[1, 3], values=[1.25, -1.25] + ), + dict( + num_dimensions=16, indices=[5, 10], values=[2.5, -2.5] + ), + None, + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), + ), + ] + names = [ + "IntCol", + "SparseVector8Col", + "SparseVector32Col", + "SparseVector64Col", + ] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestSparseVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + await self.cursor.executemany( + f""" + insert into TestSparseVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + await self.conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = await self.conn.fetch_df_all( + f""" + select {query_names} + from TestSparseVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + @test_env.skip_unless_vectors_supported() + async def test_9018(self): + "9018 - test ingestion of dense vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + [ + -127, + -100, + -5, + -1, + 0, + 0, + 0, + 0, + 1, + 5, + 7, + 25, + 13, + 0, + 10, + 127, + ], + [ + -25, + 25, + -15, + 15, + -5, + 5, + 0, + 0, + -127, + 127, + -25, + 25, + -105, + 105, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.int8()), + ), + pyarrow.array( + [ + None, + [ + -12.5, + -578.625, + -100.25, + -87.5, + 0, + 25, + 0, + 0, + 1, + 1.25, + 1.75, + 2.5, + 1.75, + 0, + 5889.125, + 6500.375, + ], + [ + -25.5, + 25.5, + -15.25, + 15.25, + -5.3, + 5.3, + 0, + 0, + -127.8, + 127.8, + -15.222, + 15.222, + -105.333, + 105.333, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.float32()), + ), + pyarrow.array( + [ + None, + [ + -22.5, + -278.625, + -200.25, + -77.5, + 0, + 35, + 0, + 0, + 1, + 8.25, + 9.75, + 3.5, + 4.75, + 0, + 6889.125, + 7500.375, + ], + [ + -35.5, + 35.5, + -25.25, + 25.25, + -8.3, + 8.3, + 0, + 0, + -227.8, + 227.8, + -215.222, + 415.222, + -505.333, + 605.333, + -1, + 1, + ], + ], + pyarrow.list_(pyarrow.float64()), + ), + ] + names = [ + "IntCol", + "Vector8Col", + "Vector32Col", + "Vector64Col", + ] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + await self.cursor.executemany( + f""" + insert into TestVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + await self.conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = await self.conn.fetch_df_all( + f""" + select {query_names} + from TestVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) + + +if __name__ == "__main__": + test_env.run_test_cases() From ecd44d3b8a93cc7f0d4461fdfcd440acd2169daf Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:57:30 -0600 Subject: [PATCH 153/239] Avoid syntax that isn't supported by Python 3.9. --- src/oracledb/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/oracledb/utils.py b/src/oracledb/utils.py index 0f0129f3..134935aa 100644 --- a/src/oracledb/utils.py +++ b/src/oracledb/utils.py @@ -28,7 +28,7 @@ # Contains utility classes and methods. # ----------------------------------------------------------------------------- -from typing import Any, Callable, Union +from typing import Any, Callable, Optional, Union from .arrow_array import ArrowArray from .dataframe import DataFrame @@ -72,7 +72,7 @@ def from_arrow(obj: Any) -> Union[DataFrame, ArrowArray]: def normalize_sessionless_transaction_id( - value: bytes | str | None = None, + value: Optional[Union[bytes, str]] = None, ) -> bytes: """ Normalize and validate the transaction_id. From f7e129557e82437d0ff992c1fb459eb45fa31447 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:58:08 -0600 Subject: [PATCH 154/239] Further doc improvements. --- doc/src/release_notes.rst | 19 +++++-------------- doc/src/user_guide/appendix_a.rst | 2 +- doc/src/user_guide/ha.rst | 13 ++++++------- 3 files changed, 12 insertions(+), 22 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 38b5fc14..8c8156fa 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -2852,12 +2852,8 @@ cx_Oracle 6.0 beta 1 (April 2017) to the ``SessionPool.release`` method in order to support session tagging. #) Added parameter edition to the ``cx_Oracle.SessionPool()`` method. -#) Added support for - `universal rowids `__. -#) Added support for `DML Returning of multiple rows - `__. +#) Added support for universal rowids. +#) Added support for DML Returning of multiple rows. #) Added attributes :attr:`Variable.actualElements` and :attr:`Variable.values` to variables. #) Added parameters region, sharding_key and super_sharding_key to the @@ -2885,16 +2881,11 @@ cx_Oracle 6.0 beta 1 (April 2017) to the application. #) Dropped deprecated parameters action, module and clientinfo from the ``cx_Oracle.connect()`` method. The appcontext parameter should be used - instead as shown in this `sample `__. + instead. #) Dropped deprecated attribute numbersAsString from - :ref:`cursor objects `. Use an output type handler instead as - shown in this `sample `__. + :ref:`cursor objects `. Use an output type handler instead. #) Dropped deprecated attributes cqqos and rowids from - :ref:`subscription objects `. Use the qos attribute instead as - shown in this `sample `__. + :ref:`subscription objects `. Use the qos attribute instead. #) Dropped deprecated parameters cqqos and rowids from the :meth:`Connection.subscribe()` method. Use the qos parameter instead as shown in this `sample `__) + * - Connection Pool Runtime Load Balancing (RLB) (see `Runtime Connection Load Balancing `__) - No - Yes - Yes diff --git a/doc/src/user_guide/ha.rst b/doc/src/user_guide/ha.rst index e8ce25d8..b6d916d4 100644 --- a/doc/src/user_guide/ha.rst +++ b/doc/src/user_guide/ha.rst @@ -157,13 +157,12 @@ information. Transaction Guard ----------------- -Python-oracledb supports `Transaction Guard -`__ which enables Python -application to verify the success or failure of the last transaction in the -event of an unplanned outage. This feature requires Oracle Database 12.1 or -higher. When using python-oracledb Thick mode, Oracle Client 12.1 or higher is -additionally required. +Python-oracledb supports `Transaction Guard `__ which +enables Python application to verify the success or failure of the last +transaction in the event of an unplanned outage. This feature requires Oracle +Database 12.1 or higher. When using python-oracledb Thick mode, Oracle Client +12.1 or higher is additionally required. Using Transaction Guard helps to: From a6089226fafa07649f6296e2ae6061cee4d7b9b6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 09:58:57 -0600 Subject: [PATCH 155/239] Added support for Python 3.14. --- .github/workflows/build.yaml | 5 +++-- README.md | 2 +- doc/src/release_notes.rst | 3 +++ doc/src/user_guide/installation.rst | 4 ++-- doc/src/user_guide/introduction.rst | 6 +++--- pyproject.toml | 2 +- setup.cfg | 1 + tox.ini | 6 +++--- 8 files changed, 17 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 4052c596..239f531f 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -68,6 +68,7 @@ jobs: echo /opt/python/cp311-cp311/bin/python3.11 -m build >> build.sh echo /opt/python/cp312-cp312/bin/python3.12 -m build >> build.sh echo /opt/python/cp313-cp313/bin/python3.13 -m build >> build.sh + echo /opt/python/cp314-cp314/bin/python3.14 -m build >> build.sh echo cd dist >> build.sh echo auditwheel repair *.whl >> build.sh echo rm *.whl >> build.sh @@ -96,7 +97,7 @@ jobs: strategy: matrix: os: [macos-latest] - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] arch: ['x86', ''] exclude: - os: macos-latest @@ -133,7 +134,7 @@ jobs: strategy: matrix: os: [windows-latest] - python-version: ['3.9', '3.10', '3.11', '3.12', '3.13.5'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13.5', '3.14'] arch: ['x86', ''] steps: diff --git a/README.md b/README.md index f867079f..a80ec708 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ with oracledb.connect(user=un, password=pw, dsn=cs) as connection: ## Dependencies and Interoperability -- Python versions 3.9 through 3.13. +- Python versions 3.9 through 3.14. Pre-built packages are available on [PyPI][pypi] and other repositories. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 8c8156fa..0165e760 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -40,6 +40,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Pre-built binaries are now being created for Python 3.14. Note this Python + version is currently in release candidate phase. #) Added support for Oracle Database 23ai :ref:`Sessionless Transactions `. #) Changes to :ref:`data frame ` support: @@ -67,6 +69,7 @@ Common Changes Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. + oracledb `3.2.0 `__ (June 2025) -------------------------------------------------------------------------------------------------- diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 9cbead34..32e97c23 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -36,7 +36,7 @@ Python-oracledb is typically installed from Python's package repository 1. Install `Python 3 `__ if it is not already available. - Use any version from Python 3.9 through 3.13. + Use any version from Python 3.9 through 3.14. Previous versions of python-oracledb supported older Python versions. @@ -157,7 +157,7 @@ Installation Requirements To use python-oracledb, you need: -- Python 3.9, 3.10, 3.11, 3.12 or 3.13 +- Python 3.9, 3.10, 3.11, 3.12, 3.13, or 3.14 - The Python cryptography package. This package is automatically installed as a dependency of python-oracledb. It is strongly recommended that you keep the diff --git a/doc/src/user_guide/introduction.rst b/doc/src/user_guide/introduction.rst index a7f5236b..0124e8f6 100644 --- a/doc/src/user_guide/introduction.rst +++ b/doc/src/user_guide/introduction.rst @@ -18,9 +18,9 @@ The module is available from standard package repositories including `PyPI hosted at `github.com/oracle/python-oracledb `__. -This module is currently tested with Python 3.9, 3.10, 3.11, 3.12, and 3.13 -against Oracle Database 23ai, 21c, 19c, 18c, 12c, and 11gR2. Previous versions -of python-oracledb supported older Python versions. +This module is currently tested with Python 3.9, 3.10, 3.11, 3.12, 3.13, and +3.14 against Oracle Database 23ai, 21c, 19c, 18c, 12c, and 11gR2. Previous +versions of python-oracledb supported older Python versions. Changes in python-oracledb releases can be found in the :ref:`release notes `. diff --git a/pyproject.toml b/pyproject.toml index 6c46d797..a39f6814 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 79 -target-version = ["py39", "py310", "py311", "py312", "py313"] +target-version = ["py39", "py310", "py311", "py312", "py313", "py314"] required-version = 24 [tool.ruff] diff --git a/setup.cfg b/setup.cfg index 39864d29..2a48af48 100644 --- a/setup.cfg +++ b/setup.cfg @@ -31,6 +31,7 @@ classifiers = Programming Language :: Python :: 3.11 Programming Language :: Python :: 3.12 Programming Language :: Python :: 3.13 + Programming Language :: Python :: 3.14 Programming Language :: Python :: Implementation :: CPython Programming Language :: Cython Topic :: Database diff --git a/tox.ini b/tox.ini index efe3aa71..63c60476 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py{39,310,311,312,313}-{thin,thick} +envlist = py{39,310,311,312,313,314}-{thin,thick} [testenv] commands = {envpython} -m unittest discover -v -s tests @@ -18,10 +18,10 @@ passenv = DPI_DEBUG_LEVEL ORACLE_HOME -[testenv:py{39,310,311,312,313}-thick] +[testenv:py{39,310,311,312,313,314}-thick] setenv = PYO_TEST_DRIVER_MODE=thick -[testenv:py{39,310,311,312,313}-thin] +[testenv:py{39,310,311,312,313,314}-thin] setenv = PYO_TEST_DRIVER_MODE=thin From 2d9f577d75edaaba9a2a44d8f795457f423a3c64 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 10:09:46 -0600 Subject: [PATCH 156/239] Migrated build configuration completely to pyproject.toml and added test dependencies; upgraded Black to version 25.1. --- .pre-commit-config.yaml | 2 +- doc/src/release_notes.rst | 3 + pyproject.toml | 75 +++++++++++++++++++- setup.cfg | 55 -------------- src/oracledb/connection.py | 4 +- src/oracledb/pool.py | 4 +- tests/test_8000_dataframe.py | 40 +---------- tests/test_8100_dataframe_async.py | 29 +------- tests/test_8900_dataframe_ingestion.py | 9 +-- tests/test_9000_dataframe_ingestion_async.py | 9 +-- tox.ini | 1 + utils/templates/connection.py | 4 +- utils/templates/pool.py | 4 +- 13 files changed, 93 insertions(+), 146 deletions(-) delete mode 100644 setup.cfg diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8d193308..80da1bc9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ repos: - id: check-yaml - id: check-added-large-files - repo: https://github.com/psf/black-pre-commit-mirror - rev: 24.4.2 + rev: 25.1.0 hooks: - id: black - repo: https://github.com/astral-sh/ruff-pre-commit diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 0165e760..e6a0c4e9 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -68,6 +68,9 @@ Common Changes Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. +#) Internal change: migrated build configuration completely to pyproject.toml, + which allows for optional dependencies for the test suite on the numpy, + pandas and pyarrow modules. oracledb `3.2.0 `__ (June 2025) diff --git a/pyproject.toml b/pyproject.toml index a39f6814..f7196a35 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,13 +1,82 @@ [build-system] -requires = ["setuptools >= 77.0.0", "wheel", "cython"] +requires = [ + "setuptools >= 77.0.0", + "wheel", + "cython == 3.1", +] build-backend = "setuptools.build_meta" +[project] +name = "oracledb" +description = "Python interface to Oracle Database" +keywords = ["Oracle", "database"] +authors = [{name = "Anthony Tuininga", email = "anthony.tuininga@oracle.com"}] +license = "UPL-1.0 OR Apache-2.0" +license-files = [ + "LICENSE.txt", + "THIRD_PARTY_LICENSES.txt", + "NOTICE.txt", +] +classifiers = [ + "Development Status :: 5 - Production/Stable", + "Intended Audience :: Developers", + "Natural Language :: English", + "Operating System :: OS Independent", + "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Cython", + "Topic :: Database", +] +requires-python = ">=3.9" +dependencies = ["cryptography>=3.2.1"] +dynamic = ["version"] + +[project.readme] +file = "README.md" +content-type = "text/markdown" + +[project.urls] +Homepage = "https://oracle.github.io/python-oracledb" +Installation = "https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html" +Samples = "https://github.com/oracle/python-oracledb/tree/main/samples" +Documentation = "http://python-oracledb.readthedocs.io" +"Release Notes" = "https://python-oracledb.readthedocs.io/en/latest/release_notes.html" +Issues = "https://github.com/oracle/python-oracledb/issues" +Source = "https://github.com/oracle/python-oracledb" + +[tool.setuptools] +zip-safe = false +packages = [ + "oracledb", + "oracledb.plugins", +] +package-dir = {"" = "src"} +include-package-data = false + +[tool.setuptools.package-data] +"*" = ["py.typed"] + +[tool.setuptools.dynamic] +version = {attr = "oracledb.version.__version__"} + [tool.black] line-length = 79 -target-version = ["py39", "py310", "py311", "py312", "py313", "py314"] -required-version = 24 +required-version = 25 [tool.ruff] line-length = 79 target-version = "py39" exclude = ["templates"] + +[project.optional-dependencies] +test = [ + "numpy", + "pandas", + "pyarrow", +] diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 2a48af48..00000000 --- a/setup.cfg +++ /dev/null @@ -1,55 +0,0 @@ -[metadata] -name = oracledb -version = attr: oracledb.version.__version__ -description = Python interface to Oracle Database -long_description = file: README.md -long_description_content_type = text/markdown -keywords = Oracle, database -author = Anthony Tuininga -author_email = anthony.tuininga@oracle.com -url = https://oracle.github.io/python-oracledb -project_urls = - Installation = https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html - Samples = https://github.com/oracle/python-oracledb/tree/main/samples - Documentation = http://python-oracledb.readthedocs.io - Release Notes = https://python-oracledb.readthedocs.io/en/latest/release_notes.html - Issues = https://github.com/oracle/python-oracledb/issues - Source = https://github.com/oracle/python-oracledb -license = UPL-1.0 OR Apache-2.0 -license_files = - LICENSE.txt - THIRD_PARTY_LICENSES.txt - NOTICE.txt -classifiers = - Development Status :: 5 - Production/Stable - Intended Audience :: Developers - Natural Language :: English - Operating System :: OS Independent - Programming Language :: Python :: 3 :: Only - Programming Language :: Python :: 3.9 - Programming Language :: Python :: 3.10 - Programming Language :: Python :: 3.11 - Programming Language :: Python :: 3.12 - Programming Language :: Python :: 3.13 - Programming Language :: Python :: 3.14 - Programming Language :: Python :: Implementation :: CPython - Programming Language :: Cython - Topic :: Database - -[options] -zip_safe = false -python_requires = >=3.9 -setup_requires = cython>=3.0.10 -install_requires = cryptography>=3.2.1 -test_suite = tests -packages = - oracledb - oracledb.plugins -package_dir = - =src - -[options.packages.find] -where = src - -[options.package_data] -* = py.typed diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index f6ae6629..7c018cbc 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1310,7 +1310,7 @@ def unsubscribe(self, subscr: Subscription) -> None: def _connection_factory( - f: Callable[..., Connection] + f: Callable[..., Connection], ) -> Callable[..., Connection]: """ Decorator which checks the validity of the supplied keyword parameters by @@ -2238,7 +2238,7 @@ async def tpc_rollback(self, xid: Optional[Xid] = None) -> None: def _async_connection_factory( - f: Callable[..., AsyncConnection] + f: Callable[..., AsyncConnection], ) -> Callable[..., AsyncConnection]: """ Decorator which checks the validity of the supplied keyword parameters by diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index 77601581..d848d131 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -572,7 +572,7 @@ def reconfigure( def _pool_factory( - f: Callable[..., ConnectionPool] + f: Callable[..., ConnectionPool], ) -> Callable[..., ConnectionPool]: """ Decorator which checks the validity of the supplied keyword parameters by @@ -1107,7 +1107,7 @@ async def release( def _async_pool_factory( - f: Callable[..., AsyncConnectionPool] + f: Callable[..., AsyncConnectionPool], ) -> Callable[..., AsyncConnectionPool]: """ Decorator which checks the validity of the supplied keyword parameters by diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 6f75deb3..bb8b1c11 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -31,16 +31,9 @@ import decimal import oracledb - -try: - import numpy - import pandas - import pyarrow - - HAS_INTEROP = True -except ImportError: - HAS_INTEROP = False - +import numpy +import pandas +import pyarrow import test_env # basic @@ -253,13 +246,6 @@ class TestCase(test_env.BaseTestCase): - def __check_interop(self): - """ - Checks to see if the pyarrow and pandas modules are available. - """ - if not HAS_INTEROP: - self.skipTest("missing numpy, pandas or pyarrow modules") - def __convert_date(self, value): """ Converts a date to the format required by Arrow. @@ -379,7 +365,6 @@ def __test_df_interop(self, data): Tests interoperability with external data frames using the data set provided. """ - self.__check_interop() self.__populate_table(data) ora_df = self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, data) @@ -389,7 +374,6 @@ def __test_df_batches_interop(self, data, batch_size, num_batches): Tests interoperability with external data frames using the data set provided. """ - self.__check_interop() self.__populate_table(data) batches = list(self.conn.fetch_df_batches(QUERY_SQL, size=batch_size)) self.assertEqual(len(batches), num_batches) @@ -462,7 +446,6 @@ def test_8009(self): def test_8010(self): "8010 - verify passing Arrow arrays twice works" - self.__check_interop() self.__populate_table(DATASET_1) ora_df = self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, DATASET_1) @@ -507,7 +490,6 @@ def test_8015(self): def test_8016(self): "8016 - verify get_column() returns the correct value" - self.__check_interop() self.__populate_table(DATASET_1) ora_df = self.conn.fetch_df_all(QUERY_SQL) array = pyarrow.array(ora_df.get_column(1)) @@ -520,7 +502,6 @@ def test_8017(self): def test_8018(self): "8018 - fetch_decimals without precision and scale specified" data = [(1.0,)] - self.__check_interop() with test_env.DefaultsContextManager("fetch_decimals", True): ora_df = self.conn.fetch_df_all("select 1.0 from dual") fetched_tab = pyarrow.Table.from_arrays( @@ -533,7 +514,6 @@ def test_8018(self): def test_8019(self): "8019 - fetch clob" data = [("test_8023",)] - self.__check_interop() ora_df = self.conn.fetch_df_all( "select to_clob('test_8023') from dual" ) @@ -544,7 +524,6 @@ def test_8019(self): def test_8020(self): "8020 - fetch blob" data = [(b"test_8024",)] - self.__check_interop() ora_df = self.conn.fetch_df_all( "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" ) @@ -555,7 +534,6 @@ def test_8020(self): def test_8021(self): "8021 - fetch raw" data = [(b"test_8025",)] - self.__check_interop() ora_df = self.conn.fetch_df_all( "select utl_raw.cast_to_raw('test_8025') from dual" ) @@ -567,7 +545,6 @@ def test_8021(self): def test_8022(self): "8022 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ select true @@ -587,7 +564,6 @@ def test_8022(self): def test_8023(self): "8023 - fetch data with multiple rows containing null values" - self.__check_interop() ora_df = self.conn.fetch_df_all( """ select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual @@ -638,7 +614,6 @@ def test_8024(self): (array.array("f", [34.6, 77.8]).tolist(),), (array.array("f", [34.6, 77.8, 55.9]).tolist(),), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) @@ -658,7 +633,6 @@ def test_8025(self): ([34.6, 77.8],), ([34.6, 77.8, 55.9],), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) @@ -678,7 +652,6 @@ def test_8026(self): ([34, -77],), ([34, 77, 55],), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34, -77]', 2, INT8) @@ -698,7 +671,6 @@ def test_8027(self): ([3, 2, 3],), ([3, 2],), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[3, 2, 3]', 24, BINARY) @@ -719,7 +691,6 @@ def test_8028(self): (array.array("f", [34.6, 77.8, 55.9]).tolist(),), (None,), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) @@ -751,7 +722,6 @@ def test_8029(self): ([34.6, 77.8],), ([34.6, 77.8],), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) @@ -803,7 +773,6 @@ def test_8030(self): }, ), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR( @@ -845,7 +814,6 @@ def test_8031(self): }, ), ] - self.__check_interop() ora_df = self.conn.fetch_df_all( """ SELECT TO_VECTOR( @@ -883,7 +851,6 @@ def test_8032(self): @test_env.skip_unless_sparse_vectors_supported() def test_8033(self): "8033 - DPY-4007 -fetch sparse vectors with flexible dimensions" - self.__check_interop() with self.assertRaisesFullCode("DPY-2065"): self.conn.fetch_df_all( """ @@ -914,7 +881,6 @@ def test_8034(self): (56.25,), (91.25,), ] - self.__check_interop() self.__populate_table(dataset) # Use numeric expression involving a column diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index bc3070fd..9f17bf1b 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -31,15 +31,9 @@ import decimal import oracledb - -try: - import numpy - import pandas - import pyarrow - - HAS_INTEROP = True -except ImportError: - HAS_INTEROP = False +import numpy +import pandas +import pyarrow import test_env @@ -254,13 +248,6 @@ @test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): - def __check_interop(self): - """ - Checks to see if the pyarrow and pandas modules are available. - """ - if not HAS_INTEROP: - self.skipTest("missing pandas or pyarrow modules") - def __convert_date(self, value): """ Converts a date to the format required by Arrow. @@ -380,7 +367,6 @@ async def __test_df_interop(self, data): Tests interoperability with external data frames using the data set provided. """ - self.__check_interop() await self.__populate_table(data) ora_df = await self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, data) @@ -390,7 +376,6 @@ async def __test_df_batches_interop(self, data, batch_size, num_batches): Tests interoperability with external data frames using the data set provided. """ - self.__check_interop() await self.__populate_table(data) batches = [ df @@ -470,7 +455,6 @@ async def test_8109(self): async def test_8110(self): "8110 - verify passing Arrow arrays twice works" - self.__check_interop() await self.__populate_table(DATASET_1) ora_df = await self.conn.fetch_df_all(QUERY_SQL) self.__validate_df(ora_df, DATASET_1) @@ -522,7 +506,6 @@ async def test_8116(self): async def test_8117(self): "8117 - fetch_decimals without precision and scale specified" data = [(1.0,)] - self.__check_interop() with test_env.DefaultsContextManager("fetch_decimals", True): ora_df = await self.conn.fetch_df_all("select 1.0 from dual") fetched_df = pyarrow.table(ora_df).to_pandas() @@ -532,7 +515,6 @@ async def test_8117(self): async def test_8118(self): "8118 - fetch clob" data = [("test_8023",)] - self.__check_interop() ora_df = await self.conn.fetch_df_all( "select to_clob('test_8023') from dual" ) @@ -543,7 +525,6 @@ async def test_8118(self): async def test_8119(self): "8119 - fetch blob" data = [(b"test_8024",)] - self.__check_interop() ora_df = await self.conn.fetch_df_all( "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" ) @@ -555,7 +536,6 @@ async def test_8119(self): async def test_8120(self): "8120 - fetch boolean" data = [(True,), (False,), (False,), (True,), (True,)] - self.__check_interop() ora_df = await self.conn.fetch_df_all( """ select true @@ -580,7 +560,6 @@ async def test_8121(self): (array.array("f", [34.6, 77.8]).tolist(),), (array.array("f", [34.6, 77.8, 55.9]).tolist(),), ] - self.__check_interop() ora_df = await self.conn.fetch_df_all( """ SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) @@ -612,7 +591,6 @@ async def test_8122(self): }, ), ] - self.__check_interop() ora_df = await self.conn.fetch_df_all( """ SELECT TO_VECTOR( @@ -637,7 +615,6 @@ async def test_8122(self): async def test_8123(self): "8123 - fetch data with multiple rows containing null values" - self.__check_interop() ora_df = await self.conn.fetch_df_all( """ select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index 1ad9a52e..9c4b99aa 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -28,14 +28,8 @@ import datetime import decimal -import unittest -try: - import pyarrow - - HAS_INTEROP = True -except ImportError: - HAS_INTEROP = False +import pyarrow import test_env @@ -58,7 +52,6 @@ ] -@unittest.skipUnless(HAS_INTEROP, "missing pyarrow module") class TestCase(test_env.BaseTestCase): def test_8900(self): diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 4db88240..f0170e7d 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -28,14 +28,8 @@ import datetime import decimal -import unittest -try: - import pyarrow - - HAS_INTEROP = True -except ImportError: - HAS_INTEROP = False +import pyarrow import test_env @@ -59,7 +53,6 @@ @test_env.skip_unless_thin_mode() -@unittest.skipUnless(HAS_INTEROP, "missing pyarrow module") class TestCase(test_env.BaseAsyncTestCase): async def test_9000(self): diff --git a/tox.ini b/tox.ini index 63c60476..495fb1d2 100644 --- a/tox.ini +++ b/tox.ini @@ -3,6 +3,7 @@ envlist = py{39,310,311,312,313,314}-{thin,thick} [testenv] commands = {envpython} -m unittest discover -v -s tests +extras = test passenv = PYO_TEST_MAIN_USER PYO_TEST_MAIN_PASSWORD diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 80deeb74..fb4e4317 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1308,7 +1308,7 @@ def unsubscribe(self, subscr: Subscription) -> None: def _connection_factory( - f: Callable[..., Connection] + f: Callable[..., Connection], ) -> Callable[..., Connection]: """ Decorator which checks the validity of the supplied keyword parameters by @@ -1984,7 +1984,7 @@ async def tpc_rollback(self, xid: Optional[Xid] = None) -> None: def _async_connection_factory( - f: Callable[..., AsyncConnection] + f: Callable[..., AsyncConnection], ) -> Callable[..., AsyncConnection]: """ Decorator which checks the validity of the supplied keyword parameters by diff --git a/utils/templates/pool.py b/utils/templates/pool.py index b9839b38..975cb001 100644 --- a/utils/templates/pool.py +++ b/utils/templates/pool.py @@ -570,7 +570,7 @@ def reconfigure( def _pool_factory( - f: Callable[..., ConnectionPool] + f: Callable[..., ConnectionPool], ) -> Callable[..., ConnectionPool]: """ Decorator which checks the validity of the supplied keyword parameters by @@ -784,7 +784,7 @@ async def release( def _async_pool_factory( - f: Callable[..., AsyncConnectionPool] + f: Callable[..., AsyncConnectionPool], ) -> Callable[..., AsyncConnectionPool]: """ Decorator which checks the validity of the supplied keyword parameters by From ba350dc5dfeeddacd724d1abd0ca4cf7c7ba5fb1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 10:10:17 -0600 Subject: [PATCH 157/239] Fixed bug with connect strings containing "SOURCE_ROUTE=YES" where the second host is unresolvable by the client. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/base/connect_params.pyx | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e6a0c4e9..55f1a088 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -29,6 +29,8 @@ Thin Mode Changes #) Fixed bug with execution of a PL/SQL block containing at least one output bind variable immediately following a query that returned multiple duplicate rows. +#) Fixed bug with connect strings containing ``SOURCE_ROUTE=YES`` where the + second host is unresolvable by the client. Thick Mode Changes ++++++++++++++++++ diff --git a/src/oracledb/impl/base/connect_params.pyx b/src/oracledb/impl/base/connect_params.pyx index b8868c74..eb06d3f3 100644 --- a/src/oracledb/impl/base/connect_params.pyx +++ b/src/oracledb/impl/base/connect_params.pyx @@ -772,9 +772,10 @@ cdef class AddressList(ConnectParamsNode): cdef: list addresses = [] Address address - for address in children: + ConnectParamsNode._set_active_children(self, children) + for address in self.active_children: addresses.extend(address.resolve_host_name()) - ConnectParamsNode._set_active_children(self, addresses) + self.active_children = addresses cdef bint _uses_tcps(self): """ From ffb6407f51090a2b5d2a71a06a2f8e26ce25444e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 10:13:07 -0600 Subject: [PATCH 158/239] Update ODPI-C. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 33fc2109..447b25b3 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 33fc2109c6bcece63a59dbfe3d700ddfa99af1ef +Subproject commit 447b25b306b930a5d065ba2577eb5afcd74c65f3 From 1a5bea2da5ad238dc4a60c6147a693e4968bb422 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 24 Jul 2025 20:58:20 -0600 Subject: [PATCH 159/239] Simplify build action. --- .github/workflows/build.yaml | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 239f531f..d75527f8 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -98,10 +98,7 @@ jobs: matrix: os: [macos-latest] python-version: ['3.9', '3.10', '3.11', '3.12', '3.13', '3.14'] - arch: ['x86', ''] - exclude: - - os: macos-latest - arch: x86 + arch: [''] steps: - uses: actions/checkout@v4 From 6b15d7be32831deca5d3991a41bd788f5d48aa93 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:48:55 -0600 Subject: [PATCH 160/239] Update ODPI-C to released 5.6.2. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 447b25b3..8bba6229 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 447b25b306b930a5d065ba2577eb5afcd74c65f3 +Subproject commit 8bba6229f1a186395ad7d2cb475d6f87972bbe14 From 1c747ee3f9e4195dfe58d5a4f5dfe91e9ff221cb Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:51:54 -0600 Subject: [PATCH 161/239] Fixed bug when fetching numeric data with precision that exceeds 38 as decimal data. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/base/metadata.pyx | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 55f1a088..37937979 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -67,6 +67,8 @@ Common Changes - Fixed bug when fetching numeric data that has no decimal point but the Arrow array has scale > 0 - Fixed bug when fetching dates that are in the year 2038 or later + - Fixed bug when fetching numeric data with precision that exceeds 38 as + decimal data Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 45c647a3..28b2b03e 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -62,7 +62,8 @@ cdef class OracleMetadata: uint8_t py_type_num = self._py_type_num uint32_t db_type_num = self.dbtype.num if db_type_num == DB_TYPE_NUM_NUMBER: - if py_type_num == PY_TYPE_NUM_DECIMAL and self.precision > 0: + if py_type_num == PY_TYPE_NUM_DECIMAL \ + and self.precision > 0 and self.precision <= 38: self._arrow_type = NANOARROW_TYPE_DECIMAL128 elif py_type_num == PY_TYPE_NUM_STR: self._arrow_type = NANOARROW_TYPE_STRING From cb464ab293fb7880d5998fd2d2ca790f127752f1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:53:30 -0600 Subject: [PATCH 162/239] Test improvements. --- tests/sql/create_schema.sql | 70 ++ tests/sql/create_schema_23_4.sql | 3 +- tests/test_2300_object_var.py | 76 ++ tests/test_3100_boolean_var.py | 6 +- tests/test_5600_dbobject_async.py | 1 + tests/test_8000_dataframe.py | 1003 +++++++++++++++++++- tests/test_8100_dataframe_async.py | 1019 ++++++++++++++++++++- tests/test_9100_dataframe_vector.py | 369 ++++++++ tests/test_9200_dataframe_vector_async.py | 369 ++++++++ 9 files changed, 2885 insertions(+), 31 deletions(-) create mode 100644 tests/test_9100_dataframe_vector.py create mode 100644 tests/test_9200_dataframe_vector_async.py diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index fe6d1ce0..6ffc4076 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -153,6 +153,25 @@ create or replace type &main_user..udt_XmlTypeArray as table of sys.xmltype; / +create or replace type &main_user..udt_TableOfNumber as table of number; +/ + +create or replace type &main_user..udt_TableOfTableOfNumber +as table of &main_user..udt_TableOfNumber; +/ + +create or replace type &main_user..udt_VarrayOfTableOfNumber +as varray(5) of &main_user..udt_TableOfNumber; +/ + +create or replace type &main_user..udt_VarrayOfNumber +as varray(5) of number; +/ + +create or replace type &main_user..udt_TableOfVarrayOfNumber +as table of &main_user..udt_VarrayOfNumber; +/ + -- create tables create table &main_user..TestNumbers ( IntCol number(9) not null, @@ -355,6 +374,7 @@ create table &main_user..TestAllTypes ( IntValue integer, SmallIntValue smallint, RealValue real, + DecimalValue number(20, 6), DoublePrecisionValue double precision, FloatValue float, BinaryFloatValue binary_float, @@ -398,6 +418,16 @@ create table &main_user..TestDataframe ( ) / +create table &main_user..NestedCollectionTests ( + Id number(9), + TableCol &main_user..udt_TableOfTableOfNumber, + VarrayCol &main_user..udt_VarrayOfTableOfNumber +) +nested table TableCol store as NestedCollectionTests_nt ( + nested table column_value store as NestedCollectionTests_nti +) +/ + -- create queue table and queues for testing advanced queuing begin @@ -1363,6 +1393,46 @@ create or replace package body &main_user..pkg_TestNestedRecords as end; / +create or replace package &main_user..pkg_NestedTable as + + function GetTableOfNumber + return udt_TableOfNumber; + + function GetTableOfVarrayOfNumber + return udt_TableOfVarrayOfNumber; + + function GetVarrayOfNumber + return udt_VarrayOfNumber; + +end; +/ + +create or replace package body &main_user..pkg_NestedTable as + + function GetTableOfNumber + return udt_TableOfNumber is + begin + return udt_TableOfNumber(15, 25, 35, 45); + end; + + function GetTableOfVarrayOfNumber + return udt_TableOfVarrayOfNumber is + begin + return udt_TableOfVarrayOfNumber( + udt_VarrayOfNumber(10, 20), + udt_VarrayOfNumber(30, 40) + ); + end; + + function GetVarrayOfNumber + return udt_VarrayOfNumber is + begin + return udt_VarrayOfNumber(10, 20, 30); + end; + +end; +/ + create or replace package &main_user..pkg_SessionCallback as procedure TheCallback ( diff --git a/tests/sql/create_schema_23_4.sql b/tests/sql/create_schema_23_4.sql index 50a182c0..45c99dcd 100644 --- a/tests/sql/create_schema_23_4.sql +++ b/tests/sql/create_schema_23_4.sql @@ -75,6 +75,7 @@ store as (compress high) create table &main_user..TestBooleans ( IntCol number(9) not null, BooleanCol1 boolean not null, - BooleanCol2 boolean + BooleanCol2 boolean, + BooleanCol3 boolean ) / diff --git a/tests/test_2300_object_var.py b/tests/test_2300_object_var.py index 919fe3bf..7f87bdbd 100644 --- a/tests/test_2300_object_var.py +++ b/tests/test_2300_object_var.py @@ -760,6 +760,7 @@ def test_2337(self): ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), ( @@ -863,6 +864,81 @@ def test_2342(self): self.assertIsNone(obj.INNER2.ATTR1) self.assertEqual(obj.INNER2.ATTR2, value2) + def test_2343(self): + "2343 - test varray of numbers" + obj_type = self.conn.gettype("UDT_VARRAYOFNUMBER") + obj = self.cursor.callfunc( + "pkg_NestedTable.GetVarrayOfNumber", obj_type + ) + self.assertEqual(obj.aslist(), [10, 20, 30]) + + def test_2344(self): + "2344 - test table of numbers" + obj_type = self.conn.gettype("UDT_TABLEOFNUMBER") + obj = self.cursor.callfunc( + "pkg_NestedTable.GetTableOfNumber", obj_type + ) + self.assertEqual(obj.aslist(), [15, 25, 35, 45]) + + def test_2345(self): + "2345 - test table of varray of numbers" + obj_type = self.conn.gettype("UDT_TABLEOFVARRAYOFNUMBER") + obj = self.cursor.callfunc( + "pkg_NestedTable.GetTableOfVarrayOfNumber", obj_type + ) + plain_obj = self.get_db_object_as_plain_object(obj) + self.assertEqual(plain_obj, [[10, 20], [30, 40]]) + + def test_2346(self): + "2346 - test nested table of nested tables" + num_tab_type = self.conn.gettype("UDT_TABLEOFNUMBER") + tab_num_tab_type = self.conn.gettype("UDT_TABLEOFTABLEOFNUMBER") + + num_tab_1 = num_tab_type.newobject([1, 2]) + num_tab_2 = num_tab_type.newobject([3, 4, 5]) + num_tab_3 = num_tab_type.newobject([6, 7, 8, 9, 10]) + tab_num_tab = tab_num_tab_type.newobject( + [num_tab_1, None, num_tab_2, None, num_tab_3] + ) + + self.cursor.execute( + """ + insert into NestedCollectionTests (Id, TableCol) + values (:1, :2) + """, + [1, tab_num_tab], + ) + self.cursor.execute("select TableCol from NestedCollectionTests") + (obj,) = self.cursor.fetchone() + plain_obj = self.get_db_object_as_plain_object(obj) + expected_data = [[1, 2], None, [3, 4, 5], None, [6, 7, 8, 9, 10]] + self.assertEqual(plain_obj, expected_data) + + def test_2347(self): + "2347 - test nested table of varrays" + num_tab_type = self.conn.gettype("UDT_TABLEOFNUMBER") + arr_num_tab_type = self.conn.gettype("UDT_VARRAYOFTABLEOFNUMBER") + + num_tab_1 = num_tab_type.newobject([4, 8]) + num_tab_2 = num_tab_type.newobject([1, 3, 5]) + num_tab_3 = num_tab_type.newobject([2, 6, 10, 7, 9]) + tab_num_tab = arr_num_tab_type.newobject( + [num_tab_1, None, num_tab_2, None, num_tab_3] + ) + + self.cursor.execute( + """ + insert into NestedCollectionTests (Id, VarrayCol) + values (:1, :2) + """, + [1, tab_num_tab], + ) + self.cursor.execute("select VarrayCol from NestedCollectionTests") + (obj,) = self.cursor.fetchone() + plain_obj = self.get_db_object_as_plain_object(obj) + expected_data = [[4, 8], None, [1, 3, 5], None, [2, 6, 10, 7, 9]] + self.assertEqual(plain_obj, expected_data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_3100_boolean_var.py b/tests/test_3100_boolean_var.py index c9ee0998..ed60514e 100644 --- a/tests/test_3100_boolean_var.py +++ b/tests/test_3100_boolean_var.py @@ -115,7 +115,8 @@ def test_3110(self): self.cursor.execute("truncate table TestBooleans") true_values = ["true", "yes", "on", "1", "t", "y"] self.cursor.executemany( - "insert into TestBooleans values (:1, :2, :3)", + """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3)""", [(i, v, v) for i, v in enumerate(true_values)], ) self.cursor.execute( @@ -130,7 +131,8 @@ def test_3111(self): self.cursor.execute("truncate table TestBooleans") false_values = ["false", "no", "off", "0", "f", "n"] self.cursor.executemany( - "insert into TestBooleans values (:1, :2, :3)", + """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3)""", [(i, v, v) for i, v in enumerate(false_values)], ) self.cursor.execute( diff --git a/tests/test_5600_dbobject_async.py b/tests/test_5600_dbobject_async.py index 0a4edcad..0222abe3 100644 --- a/tests/test_5600_dbobject_async.py +++ b/tests/test_5600_dbobject_async.py @@ -563,6 +563,7 @@ async def test_5616(self): ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), ( diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index bb8b1c11..d8c7fbec 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -228,7 +228,7 @@ ), ] -QUERY_SQL = """ +QUERY_SQL_WITH_WHERE_CLAUSE = """ select Id, FirstName, @@ -240,17 +240,26 @@ CreditScore, LastUpdated from TestDataFrame + {where_clause} order by id """ +QUERY_SQL = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="") + class TestCase(test_env.BaseTestCase): - def __convert_date(self, value): + def __convert_date(self, typ, value): """ Converts a date to the format required by Arrow. """ - return (value - datetime.datetime(1970, 1, 1)).total_seconds() + if value is not None: + if typ.unit == "s": + value = datetime.datetime(value.year, value.month, value.day) + ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() + if typ.unit != "s": + ts *= 1_000_000 + return ts def __convert_to_array(self, data, typ): """ @@ -262,15 +271,7 @@ def __convert_to_array(self, data, typ): for value in data ] elif isinstance(typ, pyarrow.TimestampType): - if typ.unit == "s": - data = [ - self.__convert_date( - datetime.datetime(v.year, v.month, v.day) - ) - for v in data - ] - else: - data = [self.__convert_date(value) * 1000000 for value in data] + data = [self.__convert_date(typ, v) for v in data] mask = [value is None for value in data] return pyarrow.array(data, typ, mask=mask) @@ -877,6 +878,7 @@ def test_8034(self): (1, None, None, None, None, None, None, 225, None), (2, None, None, None, None, None, None, 365, None), ] + data = [ (56.25,), (91.25,), @@ -889,6 +891,983 @@ def test_8034(self): fetched_df = pyarrow.table(ora_df).to_pandas() self.assertEqual(data, self.__get_data_from_df(fetched_df)) + def test_8035(self): + "8035 - test metadata of all data types" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.float64()), + ("SMALLINTVALUE", 127, pyarrow.float64()), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.float64()), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + self.cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + self.cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + self.conn.commit() + sql = f"select {column_names} from TestAllTypes" + ora_df = self.conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + self.assertEqual(actual_types, expected_types) + + def test_8036(self): + "8036 - test metadata of all data types with fetch_decimals = True" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), + ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + self.cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + self.cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + sql = f"select {column_names} from TestAllTypes" + ora_df = self.conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [ + pyarrow.array(a).type for a in ora_df.column_arrays() + ] + self.assertEqual(actual_types, expected_types) + + @test_env.skip_unless_native_boolean_supported() + def test_8037(self): + "8037 - test metadata with boolean type" + self.cursor.execute("delete from TestBooleans") + data = [(1, True, False, None), (2, False, True, True)] + self.cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) + values (:1, :2, :3, :4) + """, + data, + ) + self.conn.commit() + + sql = "select * from TestBooleans order by IntCol" + ora_df = self.conn.fetch_df_all(sql) + expected_types = [ + pyarrow.int64(), + pyarrow.bool_(), + pyarrow.bool_(), + pyarrow.bool_(), + ] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + self.assertEqual(actual_types, expected_types) + + def test_8038(self): + "8038 - test NULL rows with all null values" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + ] + self.__test_df_interop(data) + + def test_8039(self): + "8039 - test repeated pyarrow table construction" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000.50, + 100, + datetime.datetime.now(), + ) + ] + self.__populate_table(data) + ora_df = self.conn.fetch_df_all(QUERY_SQL) + table1 = pyarrow.table(ora_df) + table2 = pyarrow.table(ora_df) + self.assertEqual(table1.schema, table2.schema) + self.assertEqual(table1.to_pydict(), table2.to_pydict()) + + def test_8040(self): + "8040 - test dataframe query with multiple bind variables" + self.__populate_table(DATASET_2) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id between :min_id and :max_id" + ) + ora_df = self.conn.fetch_df_all(statement, {"min_id": 2, "max_id": 3}) + self.assertEqual(ora_df.num_rows(), 2) + + expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] + raw_df = self.__convert_to_df(expected_data) + raw_data = self.__get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, raw_data) + + def test_8041(self): + "8041 - test error handling with invalid SQL in fetch_df_batches()" + with self.assertRaisesFullCode("ORA-00942"): + for batch in self.conn.fetch_df_batches( + "select * from NonExistentTable" + ): + pass + + def test_8042(self): + "8042 - test partial batch (last batch smaller than batch size)" + test_data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + datetime.date(2000, 1, 1), + i * 100, + i % 800, + datetime.datetime.now(), + ) + for i in range(1, 8) # 7 rows + ] + self.__test_df_batches_interop(test_data, batch_size=3, num_batches=3) + + def test_8043(self): + "8043 - test with date functions" + self.__populate_table(DATASET_1) + ora_df = self.conn.fetch_df_all( + """ + select + Id, + extract(year from DateOfBirth) as birth_year, + to_char(DateOfBirth, 'YYYY-MM') as birth_month + from TestDataFrame + order by Id + """ + ) + self.assertEqual(ora_df.num_rows(), len(DATASET_1)) + year_col = ora_df.get_column_by_name("BIRTH_YEAR") + array = pyarrow.array(year_col) + self.assertEqual(array.to_pylist(), [1955, 1955]) + + def test_8044(self): + "8044 - test column access by index bounds" + self.__populate_table(DATASET_1) + ora_df = self.conn.fetch_df_all(QUERY_SQL) + with self.assertRaises(IndexError): + ora_df.get_column(ora_df.num_columns()) + + def test_8045(self): + "8045 - test with different batch sizes" + self.__test_df_batches_interop(DATASET_4, batch_size=1, num_batches=6) + self.__test_df_batches_interop(DATASET_4, batch_size=2, num_batches=3) + + def test_8046(self): + "8046 - test with very large batch size" + self.__test_df_batches_interop( + DATASET_1, batch_size=1000, num_batches=1 + ) + + def test_8047(self): + "8047 - test error handling with invalid SQL" + with self.assertRaisesFullCode("ORA-00942"): + self.conn.fetch_df_all("select * from NonExistentTable") + + def test_8048(self): + "8048 - test error handling with invalid bind variable" + self.__populate_table(DATASET_1) + with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): + self.conn.fetch_df_all( + "select * from TestDataFrame where Id = :missing_bind" + ) + + def test_8049(self): + "8049 - test with single row result" + self.__populate_table(DATASET_1) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id = 1" + ) + ora_df = self.conn.fetch_df_all(statement) + self.assertEqual(ora_df.num_rows(), 1) + self.__validate_df(ora_df, [DATASET_1[0]]) + + def test_8050(self): + "8050 - test with calculated columns" + self.__populate_table(DATASET_1) + now = datetime.datetime.now().replace(microsecond=0) + ora_df = self.conn.fetch_df_all( + """ + select + Id, + FirstName || ' ' || LastName as full_name, + Salary * 12 as annual_salary, + :now as current_date + from TestDataFrame + order by Id + """, + [now], + ) + self.assertEqual(ora_df.num_rows(), len(DATASET_1)) + self.assertEqual(ora_df.num_columns(), 4) + + expected_data = [] + for row in DATASET_1: + expected_row = ( + row[0], # Id + f"{row[1]} {row[2]}", # full_name + float(str(row[6] * 12)), # annual_salary + now, + ) + expected_data.append(expected_row) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, expected_data) + + def test_8051(self): + "8051 - test fetch_df_batches with bind variables" + batch_size = 2 + self.__populate_table(DATASET_4) + where_clause = "where Id >= :min_id" + sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) + batches = self.conn.fetch_df_batches( + sql, {"min_id": 3}, size=batch_size + ) + expected_data = [row for row in DATASET_4 if row[0] >= 3] + offset = 0 + for batch in batches: + self.__validate_df( + batch, expected_data[offset : offset + batch_size] + ) + offset += batch_size + + def test_8052(self): + "8052 - test with large data" + data = [ + (1, "A" * 41_000, b"Very long description " * 5_000), + (2, "B" * 35_000, b"Another long text " * 10_000), + (3, "C" * 72_000, b"Even longer content " * 20_000), + ] + + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame + (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + data, + ) + self.conn.commit() + + ora_df = self.conn.fetch_df_all( + """ + select Id, LongData, LongRawData + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + + def test_8053(self): + "8053 - test fetching from an empty table with fetch_df_batches" + self.cursor.execute("delete from TestDataFrame") + batches = list(self.conn.fetch_df_batches(QUERY_SQL, size=10)) + self.assertEqual(len(batches), 1) + self.assertEqual(batches[0].num_rows(), 0) + + def test_8054(self): + "8054 - fetch clob in batches" + self.cursor.execute("delete from TestDataFrame") + test_string = "A" * 10000 + data = [(test_string,)] * 3 + self.cursor.executemany( + """ + insert into TestDataFrame (LongData) + values (:1) + """, + data, + ) + self.conn.commit() + + offset = 0 + batch_size = 2 + sql = "select LongData from TestDataFrame" + for batch in self.conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data[offset : offset + batch_size]) + offset += batch_size + + def test_8055(self): + "8055 - fetch blob in batches" + self.cursor.execute("delete from TestDataFrame") + test_string = b"B" * 10000 + data = [(test_string,)] * 4 + self.cursor.executemany( + """ + insert into TestDataFrame (LongRawData) + values (:1) + """, + data, + ) + self.conn.commit() + + offset = 0 + batch_size = 3 + sql = "select LongRawData from TestDataFrame" + for batch in self.conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data[offset : offset + batch_size]) + offset += batch_size + + def test_8056(self): + "8056 - test with empty strings" + data = [ + ( + 1, + "", + "", + "City", + "Country", + datetime.datetime(2000, 1, 1), + 1000.0, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First", + "Last", + "", + "", + datetime.datetime(2000, 1, 1), + 2000.0, + 200, + datetime.datetime.now(), + ), + ] + self.__populate_table(data) + expected_data = [ + tuple(None if v == "" else v for v in row) for row in data + ] + ora_df = self.conn.fetch_df_all(QUERY_SQL) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, expected_data) + + def test_8057(self): + "8057 - test with unicode characters" + data = [ + ( + 1, + "Jöhn", + "Döe", + "München", + "Deutschland", + datetime.date(1980, 5, 15), + 5000, + 300, + datetime.datetime.now(), + ), + ( + 2, + "?", + "?", + "??", + "??", + datetime.date(1990, 8, 20), + 8000, + 400, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8058(self): + "8072 - test with very old dates" + data = [ + ( + 1, + "Ancient", + "One", + "Babylon", + "Mesopotamia", + datetime.date(1, 1, 1), + 0, + 0, + datetime.datetime.now(), + ), + ( + 2, + "Medieval", + "Person", + "London", + "England", + datetime.date(1200, 6, 15), + 10, + 50, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8059(self): + "8059 - test with future dates" + data = [ + ( + 1, + "Future", + "Person", + "Mars", + "Solar System", + datetime.date(3000, 1, 1), + 100000, + 900, + datetime.datetime.now(), + ), + ( + 2, + "Distant", + "Future", + "Andromeda", + "Galaxy", + datetime.date(9999, 12, 31), + 999999, + 999, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8060(self): + "8060 - test with exactly arraysize rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, self.cursor.arraysize + 1) + ] + self.__test_df_interop(data) + + def test_8061(self): + "8061 - test with arraysize+1 rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, self.cursor.arraysize + 2) + ] + self.__test_df_interop(data) + + def test_8062(self): + "8062 - test with odd arraysize" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, 48) + ] + self.__test_df_interop(data) + + def test_8063(self): + "8063 - test with single row" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000, + 100, + datetime.datetime.now(), + ) + ] + self.__test_df_interop(data) + + def test_8064(self): + "8064 - test multiple rows with NULL values in different columns" + now = datetime.datetime.now() + test_date = datetime.datetime(2000, 1, 1) + data = [ + (1, None, "Last1", "City1", "Country1", None, None, 100, None), + (2, "First2", None, None, "Country2", test_date, 2000, None, None), + (3, "First3", "Last3", None, None, None, 3000, 300, now), + (4, None, None, None, None, None, None, None, None), + ] + self.__test_df_interop(data) + + def test_8065(self): + "8065 - test single column with all NULL values" + data = [ + ( + 1, + None, + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + ( + 3, + None, + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8066(self): + "8066 - test last column NULL in each row" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + None, + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + None, + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + None, + ), + ] + self.__test_df_interop(data) + + def test_8067(self): + "8067 - test alternating NULL/non-NULL values in a column" + data = [ + ( + 1, + "First1", + None, + "City1", + None, + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + (2, "First2", "Last2", None, "Country2", None, 2000, None, None), + ( + 3, + "First3", + None, + "City3", + None, + datetime.date(2002, 1, 1), + None, + 300, + datetime.datetime.now(), + ), + (4, "First4", "Last4", None, "Country4", None, 4000, None, None), + ] + self.__test_df_interop(data) + + def test_8068(self): + "8068 - test all columns NULL except one" + now = datetime.datetime.now() + test_date = datetime.date(2001, 1, 1) + data = [ + (1, None, None, None, None, None, None, None, now), + (2, None, None, None, None, test_date, None, None, None), + (3, "First3", None, None, None, None, None, None, None), + (4, None, None, None, "Country4", None, None, None, None), + ] + self.__test_df_interop(data) + + def test_8069(self): + "8069 - test all date columns with all NULL values" + data = [ + (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), + (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), + (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), + ] + self.__test_df_interop(data) + + def test_8070(self): + "8070 - test NULL values in numeric columns" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + None, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + None, + None, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8071(self): + "8071 - test multiple consecutive NULL rows" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2000, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + + def test_8072(self): + "8072 - test NULL rows interspersed with data rows" + data = [ + (1, None, None, None, None, None, None, None, None), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2003, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + (5, None, None, None, None, None, None, None, None), + ] + self.__test_df_interop(data) + + def test_8073(self): + "8073 - test multiple NULL rows with different NULL columns" + data = [ + (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), + ( + 2, + "First2", + None, + "City2", + "Country2", + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + None, + "City3", + "Country3", + None, + None, + 300, + datetime.datetime.now(), + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + self.__test_df_interop(data) + + def test_8074(self): + "8074 - test NULL rows with alternating NULL patterns" + data = [ + ( + 1, + None, + "Last1", + None, + "Country1", + None, + 1000, + None, + datetime.datetime.now(), + ), + ( + 2, + "First2", + None, + "City2", + None, + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + "Last3", + None, + "Country3", + None, + 3000, + None, + datetime.datetime.now(), + ), + ( + 4, + "First4", + None, + "City4", + None, + datetime.date(2003, 1, 1), + None, + 400, + None, + ), + ] + self.__test_df_interop(data) + + def test_8075(self): + "8075 - test multiple NULL rows with partial NULL groups" + data = [ + ( + 1, + None, + None, + "City1", + "Country1", + None, + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + None, + "City2", + "Country2", + None, + None, + 200, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + None, + None, + datetime.date(2002, 1, 1), + 3000, + None, + None, + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + self.__test_df_interop(data) + + def test_8076(self): + "8076 - test multiple NULL rows with varying NULL counts" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, "First2", None, "City2", None, None, 2000, None, None), + ( + 3, + None, + "Last3", + None, + "Country3", + datetime.date(2002, 1, 1), + None, + 300, + None, + ), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + None, + 4000, + 400, + datetime.datetime.now(), + ), + ] + self.__test_df_interop(data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 9f17bf1b..62bb8c17 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -229,7 +229,7 @@ ), ] -QUERY_SQL = """ +QUERY_SQL_WITH_WHERE_CLAUSE = """ select Id, FirstName, @@ -241,18 +241,27 @@ CreditScore, LastUpdated from TestDataFrame + {where_clause} order by id """ +QUERY_SQL = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="") + @test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): - def __convert_date(self, value): + def __convert_date(self, typ, value): """ Converts a date to the format required by Arrow. """ - return (value - datetime.datetime(1970, 1, 1)).total_seconds() + if value is not None: + if typ.unit == "s": + value = datetime.datetime(value.year, value.month, value.day) + ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() + if typ.unit != "s": + ts *= 1_000_000 + return ts def __convert_to_array(self, data, typ): """ @@ -264,15 +273,7 @@ def __convert_to_array(self, data, typ): for value in data ] elif isinstance(typ, pyarrow.TimestampType): - if typ.unit == "s": - data = [ - self.__convert_date( - datetime.datetime(v.year, v.month, v.day) - ) - for v in data - ] - else: - data = [self.__convert_date(value) * 1000000 for value in data] + data = [self.__convert_date(typ, v) for v in data] mask = [value is None for value in data] return pyarrow.array(data, typ, mask=mask) @@ -514,9 +515,9 @@ async def test_8117(self): async def test_8118(self): "8118 - fetch clob" - data = [("test_8023",)] + data = [("test_8123",)] ora_df = await self.conn.fetch_df_all( - "select to_clob('test_8023') from dual" + "select to_clob('test_8123') from dual" ) fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) @@ -524,9 +525,9 @@ async def test_8118(self): async def test_8119(self): "8119 - fetch blob" - data = [(b"test_8024",)] + data = [(b"test_8124",)] ora_df = await self.conn.fetch_df_all( - "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" + "select to_blob(utl_raw.cast_to_raw('test_8124')) from dual" ) fetched_df = pyarrow.table(ora_df).to_pandas() fetched_data = self.__get_data_from_df(fetched_df) @@ -651,6 +652,992 @@ async def test_8123(self): fetched_data = self.__get_data_from_df(fetched_df) self.assertEqual(fetched_data, data) + async def test_8124(self): + "8124 - test metadata of all data types" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.float64()), + ("SMALLINTVALUE", 127, pyarrow.float64()), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.float64()), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + await self.cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + await self.cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + await self.conn.commit() + sql = f"select {column_names} from TestAllTypes" + ora_df = await self.conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + self.assertEqual(actual_types, expected_types) + + async def test_8125(self): + "8125 - test metadata of all data types with fetch_decimals = True" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), + ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + await self.cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + await self.cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + await self.conn.commit() + with test_env.DefaultsContextManager("fetch_decimals", True): + sql = f"select {column_names} from TestAllTypes" + ora_df = await self.conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [ + pyarrow.array(a).type for a in ora_df.column_arrays() + ] + self.assertEqual(actual_types, expected_types) + + @test_env.skip_unless_native_boolean_supported() + async def test_8126(self): + "8126 - test metadata with boolean type" + await self.cursor.execute("delete from TestBooleans") + data = [(1, True, False, None), (2, False, True, True)] + await self.cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) + values (:1, :2, :3, :4) + """, + data, + ) + await self.conn.commit() + + sql = "select * from TestBooleans order by IntCol" + ora_df = await self.conn.fetch_df_all(sql) + expected_types = [ + pyarrow.int64(), + pyarrow.bool_(), + pyarrow.bool_(), + pyarrow.bool_(), + ] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + self.assertEqual(actual_types, expected_types) + + async def test_8127(self): + "8127 - test NULL rows with all null values" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + ] + await self.__test_df_interop(data) + + async def test_8128(self): + "8128 - test repeated pyarrow table construction" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000.50, + 100, + datetime.datetime.now(), + ) + ] + await self.__populate_table(data) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) + table1 = pyarrow.table(ora_df) + table2 = pyarrow.table(ora_df) + self.assertEqual(table1.schema, table2.schema) + self.assertEqual(table1.to_pydict(), table2.to_pydict()) + + async def test_8129(self): + "8129 - test dataframe query with multiple bind variables" + await self.__populate_table(DATASET_2) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id between :min_id and :max_id" + ) + ora_df = await self.conn.fetch_df_all( + statement, {"min_id": 2, "max_id": 3} + ) + self.assertEqual(ora_df.num_rows(), 2) + + expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] + raw_df = self.__convert_to_df(expected_data) + raw_data = self.__get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, raw_data) + + async def test_8130(self): + "8130 - test error handling with invalid SQL in fetch_df_batches()" + with self.assertRaisesFullCode("ORA-00942"): + async for batch in self.conn.fetch_df_batches( + "select * from NonExistentTable" + ): + pass + + async def test_8131(self): + "8131 - test partial batch (last batch smaller than batch size)" + test_data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + datetime.date(2000, 1, 1), + i * 100, + i % 800, + datetime.datetime.now(), + ) + for i in range(1, 8) # 7 rows + ] + await self.__test_df_batches_interop( + test_data, batch_size=3, num_batches=3 + ) + + async def test_8132(self): + "8132 - test with date functions" + await self.__populate_table(DATASET_1) + ora_df = await self.conn.fetch_df_all( + """ + select + Id, + extract(year from DateOfBirth) as birth_year, + to_char(DateOfBirth, 'YYYY-MM') as birth_month + from TestDataFrame + order by Id + """ + ) + self.assertEqual(ora_df.num_rows(), len(DATASET_1)) + year_col = ora_df.get_column_by_name("BIRTH_YEAR") + array = pyarrow.array(year_col) + self.assertEqual(array.to_pylist(), [1955, 1955]) + + async def test_8133(self): + "8133 - test column access by index bounds" + await self.__populate_table(DATASET_1) + ora_df = await self.conn.fetch_df_all(QUERY_SQL) + with self.assertRaises(IndexError): + ora_df.get_column(ora_df.num_columns()) + + async def test_8134(self): + "8134 - test with different batch sizes" + await self.__test_df_batches_interop( + DATASET_4, batch_size=1, num_batches=6 + ) + await self.__test_df_batches_interop( + DATASET_4, batch_size=2, num_batches=3 + ) + + async def test_8135(self): + "8135 - test with very large batch size" + await self.__test_df_batches_interop( + DATASET_1, batch_size=1000, num_batches=1 + ) + + async def test_8136(self): + "8136 - test error handling with invalid SQL" + with self.assertRaisesFullCode("ORA-00942"): + await self.conn.fetch_df_all("select * from NonExistentTable") + + async def test_8137(self): + "8137 - test error handling with invalid bind variable" + await self.__populate_table(DATASET_1) + with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): + await self.conn.fetch_df_all( + "select * from TestDataFrame where Id = :missing_bind" + ) + + async def test_8138(self): + "8138 - test with single row result" + await self.__populate_table(DATASET_1) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id = 1" + ) + ora_df = await self.conn.fetch_df_all(statement) + self.assertEqual(ora_df.num_rows(), 1) + self.__validate_df(ora_df, [DATASET_1[0]]) + + async def test_8139(self): + "8139 - test with calculated columns" + await self.__populate_table(DATASET_1) + now = datetime.datetime.now().replace(microsecond=0) + ora_df = await self.conn.fetch_df_all( + """ + select + Id, + FirstName || ' ' || LastName as full_name, + Salary * 12 as annual_salary, + :now as current_date + from TestDataFrame + order by Id + """, + [now], + ) + self.assertEqual(ora_df.num_rows(), len(DATASET_1)) + self.assertEqual(ora_df.num_columns(), 4) + + expected_data = [] + for row in DATASET_1: + expected_row = ( + row[0], # Id + f"{row[1]} {row[2]}", # full_name + float(str(row[6] * 12)), # annual_salary + now, + ) + expected_data.append(expected_row) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, expected_data) + + async def test_8140(self): + "8140 - test fetch_df_batches with bind variables" + batch_size = 2 + await self.__populate_table(DATASET_4) + where_clause = "where Id >= :min_id" + sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) + expected_data = [row for row in DATASET_4 if row[0] >= 3] + offset = 0 + async for batch in self.conn.fetch_df_batches( + sql, {"min_id": 3}, size=batch_size + ): + self.__validate_df( + batch, expected_data[offset : offset + batch_size] + ) + offset += batch_size + + async def test_8141(self): + "8141 - test with large data" + data = [ + (1, "A" * 41_000, b"Very long description " * 5_000), + (2, "B" * 35_000, b"Another long text " * 10_000), + (3, "C" * 72_000, b"Even longer content " * 20_000), + ] + + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame + (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + data, + ) + await self.conn.commit() + + ora_df = await self.conn.fetch_df_all( + """ + select Id, LongData, LongRawData + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data) + + async def test_8142(self): + "8142 - test fetching from an empty table with fetch_df_batches" + await self.cursor.execute("delete from TestDataFrame") + batches = [ + b async for b in self.conn.fetch_df_batches(QUERY_SQL, size=10) + ] + self.assertEqual(len(batches), 1) + self.assertEqual(batches[0].num_rows(), 0) + + async def test_8143(self): + "8143 - fetch clob in batches" + await self.cursor.execute("delete from TestDataFrame") + test_string = "A" * 10000 + data = [(test_string,)] * 3 + await self.cursor.executemany( + """ + insert into TestDataFrame (LongData) + values (:1) + """, + data, + ) + await self.conn.commit() + + offset = 0 + batch_size = 2 + sql = "select LongData from TestDataFrame" + async for batch in self.conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data[offset : offset + batch_size]) + offset += batch_size + + async def test_8144(self): + "8144 - fetch blob in batches" + await self.cursor.execute("delete from TestDataFrame") + test_string = b"B" * 10000 + data = [(test_string,)] * 4 + await self.cursor.executemany( + """ + insert into TestDataFrame (LongRawData) + values (:1) + """, + data, + ) + await self.conn.commit() + + offset = 0 + batch_size = 3 + sql = "select LongRawData from TestDataFrame" + async for batch in self.conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, data[offset : offset + batch_size]) + offset += batch_size + + async def test_8145(self): + "8145 - test with empty strings" + data = [ + ( + 1, + "", + "", + "City", + "Country", + datetime.datetime(2000, 1, 1), + 1000.0, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First", + "Last", + "", + "", + datetime.datetime(2000, 1, 1), + 2000.0, + 200, + datetime.datetime.now(), + ), + ] + await self.__populate_table(data) + expected_data = [ + tuple(None if v == "" else v for v in row) for row in data + ] + ora_df = await self.conn.fetch_df_all(QUERY_SQL) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = self.__get_data_from_df(fetched_df) + self.assertEqual(fetched_data, expected_data) + + async def test_8146(self): + "8146 - test with unicode characters" + data = [ + ( + 1, + "Jöhn", + "Döe", + "München", + "Deutschland", + datetime.date(1980, 5, 15), + 5000, + 300, + datetime.datetime.now(), + ), + ( + 2, + "?", + "?", + "??", + "??", + datetime.date(1990, 8, 20), + 8000, + 400, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8147(self): + "8147 - test with very old dates" + data = [ + ( + 1, + "Ancient", + "One", + "Babylon", + "Mesopotamia", + datetime.date(1, 1, 1), + 0, + 0, + datetime.datetime.now(), + ), + ( + 2, + "Medieval", + "Person", + "London", + "England", + datetime.date(1200, 6, 15), + 10, + 50, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8148(self): + "8148 - test with future dates" + data = [ + ( + 1, + "Future", + "Person", + "Mars", + "Solar System", + datetime.date(3000, 1, 1), + 100000, + 900, + datetime.datetime.now(), + ), + ( + 2, + "Distant", + "Future", + "Andromeda", + "Galaxy", + datetime.date(9999, 12, 31), + 999999, + 999, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8149(self): + "8149 - test with exactly arraysize rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, self.cursor.arraysize + 1) + ] + await self.__test_df_interop(data) + + async def test_8150(self): + "8150 - test with arraysize+1 rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, self.cursor.arraysize + 2) + ] + await self.__test_df_interop(data) + + async def test_8151(self): + "8151 - test with odd arraysize" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, 48) + ] + await self.__test_df_interop(data) + + async def test_8152(self): + "8152 - test with single row" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000, + 100, + datetime.datetime.now(), + ) + ] + await self.__test_df_interop(data) + + async def test_8153(self): + "8153 - test multiple rows with NULL values in different columns" + now = datetime.datetime.now() + test_date = datetime.datetime(2000, 1, 1) + data = [ + (1, None, "Last1", "City1", "Country1", None, None, 100, None), + (2, "First2", None, None, "Country2", test_date, 2000, None, None), + (3, "First3", "Last3", None, None, None, 3000, 300, now), + (4, None, None, None, None, None, None, None, None), + ] + await self.__test_df_interop(data) + + async def test_8154(self): + "8154 - test single column with all NULL values" + data = [ + ( + 1, + None, + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + ( + 3, + None, + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8155(self): + "8155 - test last column NULL in each row" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + None, + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + None, + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + None, + ), + ] + await self.__test_df_interop(data) + + async def test_8156(self): + "8156 - test alternating NULL/non-NULL values in a column" + data = [ + ( + 1, + "First1", + None, + "City1", + None, + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + (2, "First2", "Last2", None, "Country2", None, 2000, None, None), + ( + 3, + "First3", + None, + "City3", + None, + datetime.date(2002, 1, 1), + None, + 300, + datetime.datetime.now(), + ), + (4, "First4", "Last4", None, "Country4", None, 4000, None, None), + ] + await self.__test_df_interop(data) + + async def test_8157(self): + "8157 - test all columns NULL except one" + now = datetime.datetime.now() + test_date = datetime.date(2001, 1, 1) + data = [ + (1, None, None, None, None, None, None, None, now), + (2, None, None, None, None, test_date, None, None, None), + (3, "First3", None, None, None, None, None, None, None), + (4, None, None, None, "Country4", None, None, None, None), + ] + await self.__test_df_interop(data) + + async def test_8158(self): + "8158 - test all date columns with all NULL values" + data = [ + (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), + (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), + (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), + ] + await self.__test_df_interop(data) + + async def test_8159(self): + "8159 - test NULL values in numeric columns" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + None, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + None, + None, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8160(self): + "8160 - test multiple consecutive NULL rows" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2000, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + + async def test_8161(self): + "8161 - test NULL rows interspersed with data rows" + data = [ + (1, None, None, None, None, None, None, None, None), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2003, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + (5, None, None, None, None, None, None, None, None), + ] + await self.__test_df_interop(data) + + async def test_8162(self): + "8162 - test multiple NULL rows with different NULL columns" + data = [ + (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), + ( + 2, + "First2", + None, + "City2", + "Country2", + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + None, + "City3", + "Country3", + None, + None, + 300, + datetime.datetime.now(), + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + await self.__test_df_interop(data) + + async def test_8163(self): + "8163 - test NULL rows with alternating NULL patterns" + data = [ + ( + 1, + None, + "Last1", + None, + "Country1", + None, + 1000, + None, + datetime.datetime.now(), + ), + ( + 2, + "First2", + None, + "City2", + None, + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + "Last3", + None, + "Country3", + None, + 3000, + None, + datetime.datetime.now(), + ), + ( + 4, + "First4", + None, + "City4", + None, + datetime.date(2003, 1, 1), + None, + 400, + None, + ), + ] + await self.__test_df_interop(data) + + async def test_8164(self): + "8164 - test multiple NULL rows with partial NULL groups" + data = [ + ( + 1, + None, + None, + "City1", + "Country1", + None, + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + None, + "City2", + "Country2", + None, + None, + 200, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + None, + None, + datetime.date(2002, 1, 1), + 3000, + None, + None, + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + await self.__test_df_interop(data) + + async def test_8165(self): + "8165 - test multiple NULL rows with varying NULL counts" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, "First2", None, "City2", None, None, 2000, None, None), + ( + 3, + None, + "Last3", + None, + "Country3", + datetime.date(2002, 1, 1), + None, + 300, + None, + ), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + None, + 4000, + 400, + datetime.datetime.now(), + ), + ] + await self.__test_df_interop(data) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_9100_dataframe_vector.py b/tests/test_9100_dataframe_vector.py new file mode 100644 index 00000000..f63bbe2f --- /dev/null +++ b/tests/test_9100_dataframe_vector.py @@ -0,0 +1,369 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing dataframe with vector support +""" + +import array + +import numpy +import pandas +import pyarrow + +import test_env + + +@test_env.skip_unless_vectors_supported() +class TestCase(test_env.BaseTestCase): + + def __convert_df_value(self, df_val): + """ + This method converts a dataframe cell value to use with assertEqual() + For e.g. NaN and np.array cannot be compared directly. Values are + converted according to the following rules: + - NaN -> None + - np.array -> np.array.tolist() (Python list) + """ + if isinstance(df_val, numpy.ndarray): + return df_val.tolist() + elif pandas.isna(df_val): + return None + elif isinstance(df_val, dict): + return {k: self.__convert_df_value(v) for k, v in df_val.items()} + else: + return df_val + + def __get_data_from_df(self, df): + """ + Returns data from the data frame in a normalized fashion suitable for + comparison. In particular, NaN values cannot be compared to one another + so they are converted to the value None for comparison purposes. + """ + return [ + tuple(self.__convert_df_value(v) for v in row) + for row in df.itertuples(index=False, name=None) + ] + + def test_9100(self): + "9100 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (None,), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float32) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9101(self): + "9101 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + (None,), + ([34.6, 77.8, 55.9],), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9102(self): + "9102 - fetch int8 vector" + data = [ + ([34, -77],), + (None,), + ([34, 77, 55],), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[34, -77]', 2, int8) + union all + select null + union all + select to_vector('[34, 77, 55]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_binary_vectors_supported() + def test_9103(self): + "9103 - fetch binary vector" + data = [ + ([3, 2, 3],), + (None,), + ([3, 2],), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[3, 2, 3]', 24, binary) + union all + select null + union all + select to_vector('[3, 2]', 16, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9104(self): + "9104 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + def test_9105(self): + "9105 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + def test_9106(self): + "9106 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + (None,), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) + union all + select null + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9107(self): + "9107 - DPY-3031 - Unsupported flexible vector formats" + with self.assertRaisesFullCode("DPY-3031"): + self.conn.fetch_df_all( + """ + select to_vector('[44, 55, 89]', 3, int8) as flex_col + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) + """ + ) + + def test_9108(self): + "9108 - test vector operations with different dimensions" + data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] + + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[1, 0, 3]', 3, float64) from dual + union all + select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual + union all + select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9109(self): + "9109 - test vector operations with large arrays" + large_array = list(range(1, 1001)) + data = [(large_array,), (large_array,)] + str_value = ",".join(str(i) for i in large_array) + ora_df = self.conn.fetch_df_all( + f""" + select to_vector('[{str_value}]', {len(large_array)}, float64) + union all + select to_vector('[{str_value}]', {len(large_array)}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + def test_9110(self): + "9110 - test sparse vector operations with different dimensions" + with self.assertRaisesFullCode("DPY-2065"): + self.conn.fetch_df_all( + """ + select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) + union all + select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) + """ + ) + + def test_9111(self): + "9111 - test mixed vector types in a single dataframe" + data = [ + ([1.5, 2.5, 3.5], [1, 2, 3]), + ([4.25, 5.25, 6.25], [4, 5, 6]), + ] + ora_df = self.conn.fetch_df_all( + """ + select + to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, + to_vector('[1, 2, 3]', 3, int8) as int_vec + union all + select + to_vector('[4.25, 5.25, 6.25]', 3, float64), + to_vector('[4, 5, 6]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + def test_9112(self): + "9112 - test vectors with very large dimensions" + large_dim = 800 + large_vector = [2.25] * large_dim + large_vector[12] = 1.5 + large_vector[-25] = 8.5 + data = [(large_vector,)] + vector_str = ",".join(str(i) for i in large_vector) + ora_df = self.conn.fetch_df_all( + f""" + select to_vector('[{vector_str}]', {large_dim}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_binary_vectors_supported() + def test_9113(self): + "9113 - test binary vector edge case - max value" + data = [ + ([255, 255, 255],), + ([255, 0, 255],), + ] + ora_df = self.conn.fetch_df_all( + """ + select to_vector('[255, 255, 255]', 24, binary) + union all + select to_vector('[255, 0, 255]', 24, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + +if __name__ == "__main__": + test_env.run_test_cases() diff --git a/tests/test_9200_dataframe_vector_async.py b/tests/test_9200_dataframe_vector_async.py new file mode 100644 index 00000000..db012360 --- /dev/null +++ b/tests/test_9200_dataframe_vector_async.py @@ -0,0 +1,369 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing dataframe with vector support with asyncio +""" + +import array + +import numpy +import pandas +import pyarrow + +import test_env + + +@test_env.skip_unless_thin_mode() +@test_env.skip_unless_vectors_supported() +class TestCase(test_env.BaseAsyncTestCase): + + def __convert_df_value(self, df_val): + """ + This method converts a dataframe cell value to use with assertEqual() + For e.g. NaN and np.array cannot be compared directly. Values are + converted according to the following rules: + - NaN -> None + - np.array -> np.array.tolist() (Python list) + """ + if isinstance(df_val, numpy.ndarray): + return df_val.tolist() + elif pandas.isna(df_val): + return None + elif isinstance(df_val, dict): + return {k: self.__convert_df_value(v) for k, v in df_val.items()} + else: + return df_val + + def __get_data_from_df(self, df): + """ + Returns data from the data frame in a normalized fashion suitable for + comparison. In particular, NaN values cannot be compared to one another + so they are converted to the value None for comparison purposes. + """ + return [ + tuple(self.__convert_df_value(v) for v in row) + for row in df.itertuples(index=False, name=None) + ] + + async def test_9200(self): + "9200 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (None,), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float32) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9201(self): + "9201 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + (None,), + ([34.6, 77.8, 55.9],), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9202(self): + "9202 - fetch int8 vector" + data = [ + ([34, -77],), + (None,), + ([34, 77, 55],), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[34, -77]', 2, int8) + union all + select null + union all + select to_vector('[34, 77, 55]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_binary_vectors_supported() + async def test_9203(self): + "9203 - fetch binary vector" + data = [ + ([3, 2, 3],), + (None,), + ([3, 2],), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[3, 2, 3]', 24, binary) + union all + select null + union all + select to_vector('[3, 2]', 16, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9204(self): + "9204 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + async def test_9205(self): + "9205 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + async def test_9206(self): + "9206 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + (None,), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) + union all + select null + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9207(self): + "9207 - DPY-3031 - Unsupported flexible vector formats" + with self.assertRaisesFullCode("DPY-3031"): + await self.conn.fetch_df_all( + """ + select to_vector('[44, 55, 89]', 3, int8) as flex_col + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) + """ + ) + + async def test_9208(self): + "9208 - test vector operations with different dimensions" + data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[1, 0, 3]', 3, float64) from dual + union all + select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual + union all + select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9209(self): + "9209 - test vector operations with large arrays" + large_array = list(range(1, 1001)) + data = [(large_array,), (large_array,)] + str_value = ",".join(str(i) for i in large_array) + ora_df = await self.conn.fetch_df_all( + f""" + select to_vector('[{str_value}]', {len(large_array)}, float64) + union all + select to_vector('[{str_value}]', {len(large_array)}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_sparse_vectors_supported() + async def test_9210(self): + "9210 - test sparse vector operations with different dimensions" + with self.assertRaisesFullCode("DPY-2065"): + await self.conn.fetch_df_all( + """ + select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) + union all + select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) + """ + ) + + async def test_9211(self): + "9211 - test mixed vector types in a single dataframe" + data = [ + ([1.5, 2.5, 3.5], [1, 2, 3]), + ([4.25, 5.25, 6.25], [4, 5, 6]), + ] + ora_df = await self.conn.fetch_df_all( + """ + select + to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, + to_vector('[1, 2, 3]', 3, int8) as int_vec + union all + select + to_vector('[4.25, 5.25, 6.25]', 3, float64), + to_vector('[4, 5, 6]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + async def test_9212(self): + "9212 - test vectors with very large dimensions" + large_dim = 800 + large_vector = [2.25] * large_dim + large_vector[12] = 1.5 + large_vector[-25] = 8.5 + data = [(large_vector,)] + vector_str = ",".join(str(i) for i in large_vector) + ora_df = await self.conn.fetch_df_all( + f""" + select to_vector('[{vector_str}]', {large_dim}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + @test_env.skip_unless_binary_vectors_supported() + async def test_9213(self): + "9213 - test binary vector edge case - max value" + data = [ + ([255, 255, 255],), + ([255, 0, 255],), + ] + ora_df = await self.conn.fetch_df_all( + """ + select to_vector('[255, 255, 255]', 24, binary) + union all + select to_vector('[255, 0, 255]', 24, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual(data, self.__get_data_from_df(fetched_df)) + + +if __name__ == "__main__": + test_env.run_test_cases() From fc40c5265d6bc0f0cef91a126206d9320c68af6e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:54:46 -0600 Subject: [PATCH 163/239] Make it easier to use Instant Client for Mac & Windows testing. --- tests/test_env.py | 16 +++++++++++++++- tox.ini | 1 + 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/test_env.py b/tests/test_env.py index e0ab89d5..6d610110 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -43,11 +43,17 @@ # PYO_TEST_EXTERNAL_USER: user for testing external authentication # PYO_TEST_EDITION_NAME: name of edition for editioning tests # PYO_TEST_PLUGINS: list of plugins to import before running tests +# PYO_TEST_ORACLE_CLIENT_PATH: Oracle Client or Instant Client library dir # # PYO_TEST_CONNECT_STRING can be set to an Easy Connect string, or a # Net Service Name from a tnsnames.ora file or external naming service, # or it can be the name of a local Oracle database instance. # +# On Windows set PYO_TEST_ORACLE_CLIENT_PATH if Oracle libraries are not in +# PATH. On macOS set the variable to the Instant Client directory. On Linux do +# not set the variable; instead set LD_LIBRARY_PATH or configure ldconfig +# before running Python. +# # If oracledb is using Instant Client, then an Easy Connect string is generally # appropriate. The syntax is: # @@ -68,6 +74,7 @@ import getpass import importlib import os +import platform import secrets import sys import string @@ -95,7 +102,7 @@ def _initialize(): if PARAMETERS.get("INITIALIZED"): return if not get_is_thin() and oracledb.is_thin_mode(): - oracledb.init_oracle_client() + oracledb.init_oracle_client(lib_dir=get_oracle_client()) oracledb.defaults.thick_mode_dsn_passthrough = False plugin_names = os.environ.get("PYO_TEST_PLUGINS") if plugin_names is not None: @@ -232,6 +239,11 @@ def get_client_version(): return value +def get_oracle_client(): + if platform.system() == "Darwin" or platform.system() == "Windows": + return get_value("ORACLE_CLIENT_PATH", "Oracle Instant Client Path") + + def get_connect_params(): wallet_location = get_wallet_location() return oracledb.ConnectParams( @@ -487,6 +499,8 @@ def skip_soda_tests(): return True if has_server_version(20, 1) and not has_client_version(20, 1): return True + if has_client_version(23, 3) and platform.system() == "Darwin": + return True return False diff --git a/tox.ini b/tox.ini index 495fb1d2..363a61d6 100644 --- a/tox.ini +++ b/tox.ini @@ -16,6 +16,7 @@ passenv = PYO_TEST_WALLET_PASSWORD PYO_TEST_EXTERNAL_USER PYO_TEST_EDITION_NAME + PYO_TEST_ORACLE_CLIENT_PATH DPI_DEBUG_LEVEL ORACLE_HOME From a43a812307936de5e2ab5f2fa29bfb655487dd2e Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:55:33 -0600 Subject: [PATCH 164/239] Doc and sample improvements. --- doc/src/api_manual/async_connection.rst | 40 +++--- doc/src/api_manual/connection.rst | 43 ++++--- doc/src/api_manual/module.rst | 13 +- doc/src/release_notes.rst | 19 +-- doc/src/user_guide/dataframes.rst | 3 +- doc/src/user_guide/txn_management.rst | 11 +- samples/sessionless_transactions.py | 157 ++++++++++++++++++++++++ 7 files changed, 226 insertions(+), 60 deletions(-) create mode 100644 samples/sessionless_transactions.py diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 5fd6f486..b24a2fce 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -44,9 +44,8 @@ AsyncConnection Methods .. method:: AsyncConnection.begin_sessionless_transaction(transaction_id=None, \ timeout=60, defer_round_trip=False) - Begins a new sessionless transaction using the specified transaction - identifier. This method returns the transaction identifier specified by the - user or generated by python-oracledb. + Begins a new sessionless transaction. This method returns the transaction + identifier specified by the user or generated by python-oracledb. The ``transaction_id`` parameter should be of type string or bytes. If specified, it represents a unique identifier for the transaction. If a @@ -59,7 +58,11 @@ AsyncConnection Methods exceed 64 bytes in length. The ``timeout`` parameter is the number of seconds that this transaction - can be resumed by a connection the next time that it is suspended. The + can stay suspended when + :meth:`AsyncConnection.suspend_sessionless_transaction()` is later called, + or if the transaction is automatically suspended when the + ``suspend_on_success`` parameter is set to to *True* in + :meth:`AsyncCursor.execute()` or :meth:`AsyncCursor.executemany()`. The default value is *60* seconds. If a transaction is not resumed within this specified duration, the transaction will be rolled back. @@ -337,24 +340,20 @@ AsyncConnection Methods The ``timeout`` parameter is the number of seconds that the current connection waits to resume a transaction if another connection is using it. - This timeout is only effective when the transaction is in use by another - connection. In this case, the current connection waits for the transaction - to be suspended within this timeout period. When ``defer_round_trip`` is - set to *False*, the wait happens in the + When ``defer_round_trip`` is set to *False*, the wait happens in the ``resume_sessionless_transaction()`` call itself, and the function blocks - until the transaction becomes available or the timeout expires. - When ``defer_round_trip`` is set to *True*, the resume is deferred and the - wait occurs at the time of the next database operation instead. At the - start of the wait period, if the transaction is not in use by any other - connection, the resume happens immediately. If the transaction remains in - use by the other connection after the timeout period, the error `ORA-25351 + until the transaction becomes available or the timeout expires. When + ``defer_round_trip`` is set to *True*, the resume is deferred and the wait + occurs at the time of the next database operation instead. At the start of + the wait period, if the transaction is not in use by any other connection, + the resume happens immediately. If the transaction remains in use by the + other connection after the timeout period, the error `ORA-25351 `__ is raised. If another connection completes the transaction, the error `ORA-24756 `__ is raised. These error messages are only thrown for non-RAC instances. For information on - using Oracle RAC, see - :ref:`Sessionless Transactions with Oracle RAC `. - The default value is *60* seconds. + using Oracle RAC, see :ref:`Sessionless Transactions with Oracle RAC + `. The default value is *60* seconds. The ``defer_round_trip`` parameter is a boolean that determines whether the request to resume a transaction is to be sent immediately or with the @@ -404,9 +403,10 @@ AsyncConnection Methods This detaches the transaction from the connection, allowing it to be resumed later with the transaction identifier that was specified during - creation of the sessionless transaction. Also, the timeout value defined in - :meth:`AsyncConnection.begin_sessionless_transaction()` comes into effect - and determines how long the transaction can stay suspended. + creation of the sessionless transaction. The ``timeout`` previously passed + to :meth:`AsyncConnection.begin_sessionless_transaction()` determines how + long the transaction can stay suspended before it is automatically rolled + back. See :ref:`sessionlesstxns`. diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index debf85a3..2da9ee68 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -49,9 +49,8 @@ Connection Methods .. method:: Connection.begin_sessionless_transaction(transaction_id=None, \ timeout=60, defer_round_trip=False) - Begins a new sessionless transaction using the specified transaction - identifier. This method returns the transaction identifier specified by the - user or generated by python-oracledb. + Begins a new sessionless transaction. This method returns the transaction + identifier specified by the user or generated by python-oracledb. The ``transaction_id`` parameter should be of type string or bytes. If specified, it represents a unique identifier for the transaction. If a @@ -64,9 +63,13 @@ Connection Methods 64 bytes in length. The ``timeout`` parameter is the number of seconds that this transaction - can be resumed by a connection the next time that it is suspended. The - default value is *60* seconds. If a transaction is not resumed within this - specified duration, the transaction will be rolled back. + can stay suspended when + :meth:`Connection.suspend_sessionless_transaction()` is later called, or if + the transaction is automatically suspended when the ``suspend_on_success`` + parameter is set to to *True* in :meth:`Cursor.execute()` or + :meth:`Cursor.executemany()`. The default value is *60* seconds. If a + transaction is not resumed within this specified duration, the transaction + will be rolled back. The ``defer_round_trip`` parameter is a boolean that determines whether the request to start a transaction is to be sent immediately or with the @@ -322,24 +325,20 @@ Connection Methods The ``timeout`` parameter is the number of seconds that the current connection waits to resume a transaction if another connection is using it. - This timeout is only effective when the transaction is in use by another - connection. In this case, the current connection waits for the transaction - to be suspended within this timeout period. When ``defer_round_trip`` is - set to *False*, the wait happens in the + When ``defer_round_trip`` is set to *False*, the wait happens in the ``resume_sessionless_transaction()`` call itself, and the function blocks - until the transaction becomes available or the timeout expires. - When ``defer_round_trip`` is set to *True*, the resume is deferred and the - wait occurs at the time of the next database operation instead. At the - start of the wait period, if the transaction is not in use by any other - connection, the resume happens immediately. If the transaction remains in - use by the other connection after the timeout period, the error `ORA-25351 + until the transaction becomes available or the timeout expires. When + ``defer_round_trip`` is set to *True*, the resume is deferred and the wait + occurs at the time of the next database operation instead. At the start of + the wait period, if the transaction is not in use by any other connection, + the resume happens immediately. If the transaction remains in use by the + other connection after the timeout period, the error `ORA-25351 `__ is raised. If another connection completes the transaction, the error `ORA-24756 `__ is raised. These error messages are only thrown for non-RAC instances. For information on - using Oracle RAC, see - :ref:`Sessionless Transactions with Oracle RAC `. - The default value is *60* seconds. + using Oracle RAC, see :ref:`Sessionless Transactions with Oracle RAC + `. The default value is *60* seconds. The ``defer_round_trip`` parameter is a boolean that determines whether the request to resume a transaction is to be sent immediately or with the @@ -499,9 +498,9 @@ Connection Methods This detaches the transaction from the connection, allowing it to be resumed later with the transaction identifier that was specified during - creation of the sessionless transaction. Also, the timeout value defined in - :meth:`Connection.begin_sessionless_transaction()` comes into effect and - determines how long the transaction can stay suspended. + creation of the sessionless transaction. The ``timeout`` previously passed + to :meth:`Connection.begin_sessionless_transaction()` determines how long + the transaction can stay suspended before it is automatically rolled back. See :ref:`sessionlesstxns`. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 52a80bdb..5913cc0e 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2322,14 +2322,15 @@ Oracledb Methods .. function:: is_thin_mode() - Returns a boolean indicating if Thin mode is in use. + Returns a boolean indicating if python-oracledb is in Thin mode. Immediately after python-oracledb is imported, this function will return - *True* indicating that python-oracledb defaults to Thin mode. If - :func:`oracledb.init_oracle_client()` is called, then a subsequent call to - ``is_thin_mode()`` will return False indicating that Thick mode is - enabled. Once the first standalone connection or connection pool is - created, or a call to ``oracledb.init_oracle_client()`` is made, then + *True* indicating that python-oracledb defaults to Thin mode. If a call to + :func:`oracledb.init_oracle_client()` returns successfully, then a + subsequent call to ``is_thin_mode()`` will return False indicating that + Thick mode is enabled. Once the first standalone connection or connection + pool is created, or a successful call to ``oracledb.init_oracle_client()`` + is made, or :meth:`oracledb.enable_thin_mode()` is called, then python-oracledb’s mode is fixed and the value returned by ``is_thin_mode()`` will never change for the lifetime of the process. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 37937979..209f4f61 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -30,7 +30,7 @@ Thin Mode Changes bind variable immediately following a query that returned multiple duplicate rows. #) Fixed bug with connect strings containing ``SOURCE_ROUTE=YES`` where the - second host is unresolvable by the client. + second host is unresolvable by the host running python-oracledb. Thick Mode Changes ++++++++++++++++++ @@ -53,22 +53,27 @@ Common Changes :ref:`dfinsert`. - Added internal support for the ArrowArrayStream PyCapsule interface to simplify :ref:`DataFrame ` use. - - Remove use of the DataFrame Interchange Protocol in python-oracledb + - Removed use of the DataFrame Interchange Protocol in python-oracledb :ref:`DataFrame ` objects. + - Removed the prefix "Oracle" from the data frame object names. They are + now called :ref:`DataFrame ` and :ref:`ArrowArray + `. - Documentation on methods and attributes of the :ref:`DataFrame ` and :ref:`ArrowArray ` objects is now available when using IDE introspection. - Upgraded Arrow C Data (nanoarrow) API version to 0.7.0. - - Ensure that the GIL is held when releasing references to :ref:`ArrowArray + - Ensured that the `Python GIL + `__ + is held when releasing references to :ref:`ArrowArray ` objects when exported Arrow buffers are released by the consumer. This avoids a segfault seen in some circumstances. - - Fixed bug when deciding Arrow datatype for numeric expressions + - Fixed bug when deciding Arrow datatype for numeric expressions. (`issue 510 `__) - Fixed bug when fetching numeric data that has no decimal point but the - Arrow array has scale > 0 - - Fixed bug when fetching dates that are in the year 2038 or later + Arrow array has scale > 0. + - Fixed bug when fetching dates that are in the year 2038 or later. - Fixed bug when fetching numeric data with precision that exceeds 38 as - decimal data + decimal data. Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 995ba768..73620b07 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -664,7 +664,8 @@ For general information about fast data ingestion, and discussion of :meth:`Cursor.executemany()` and :meth:`AsyncCursor.executemany()` options, see :ref:`batchstmnt`. -**Explicit Conversion to DataFrame or ArrowArray** +Explicit Conversion to DataFrame or ArrowArray +============================================== Data frames that support the Apache Arrow PyCapsule Interface can be explicitly converted to :ref:`DataFrame ` and :ref:`ArrowArray diff --git a/doc/src/user_guide/txn_management.rst b/doc/src/user_guide/txn_management.rst index a63ddc66..5826b285 100644 --- a/doc/src/user_guide/txn_management.rst +++ b/doc/src/user_guide/txn_management.rst @@ -196,10 +196,13 @@ You can pass the following parameters to :meth:`~Connection.begin_sessionless_transaction`. An example is "36b8f84d-df4e-4d49-b662-bcde71a8764f". -- ``timeout``: This parameter determines the duration that this transaction - can be resumed by a connection the next time that it is suspended. The - default value is *60* seconds. If the transaction is not resumed within - the specified duration, the transaction will be rolled back. +- ``timeout``: This parameter is the number of seconds this transaction can + stay suspended when :meth:`Connection.suspend_sessionless_transaction()` is + later called, or if the transaction is automatically suspended when the + ``suspend_on_success`` parameter is set to to *True* in + :meth:`Cursor.execute()` or :meth:`Cursor.executemany()`. The default value + is *60* seconds. If the transaction is not resumed within the specified + duration, the transaction will be rolled back. - ``defer_round_trip``: This parameter determines whether the request to start a sessionless transaction should be sent immediately or with the next diff --git a/samples/sessionless_transactions.py b/samples/sessionless_transactions.py new file mode 100644 index 00000000..c0b31b07 --- /dev/null +++ b/samples/sessionless_transactions.py @@ -0,0 +1,157 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025 Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# sessionless_transactions.py +# +# Show Oracle Database 23ai Sessionless Transactions +# ----------------------------------------------------------------------------- + +import sys + +import oracledb +import sample_env + +# determine whether to use python-oracledb thin mode or thick mode +if not sample_env.get_is_thin(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) + +# this script only works with Oracle Database 23.6 or later +if sample_env.get_server_version() < (23, 6): + sys.exit("This example requires Oracle Database 23.6 or later.") + +# this script works with thin mode, or with thick mode using Oracle Client +# 23.6 or later +if not oracledb.is_thin_mode() and oracledb.clientversion()[:2] < (23, 6): + sys.exit( + "This example requires python-oracledb thin mode, or Oracle Client" + " 23.6 or later" + ) + +TXN_ID = b"my_transaction_id" + +pool = oracledb.create_pool( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_pool_params(), +) + +# ----------------------------------------------------------------------------- +# Basic Sessionless Transaction example + +print("Example 1:") + +# Start and suspend a transaction +with pool.acquire() as connection1: + + # Immediately begin the transaction + connection1.begin_sessionless_transaction(transaction_id=TXN_ID) + + with connection1.cursor() as cursor1: + cursor1.execute( + "insert into mytab(id, data) values (:i, :d)", [1, "Sessionless 1"] + ) + connection1.suspend_sessionless_transaction() + + # Since the transaction is suspended, there will be no rows + print("1st query") + with connection1.cursor() as cursor1b: + for r in cursor1b.execute("select * from mytab"): + print(r) + +# Resume and complete the transaction in a different connection +with pool.acquire() as connection2: + + # Immediately resume the transaction + connection2.resume_sessionless_transaction(transaction_id=TXN_ID) + + with connection2.cursor() as cursor2: + cursor2.execute( + "insert into mytab(id, data) values (:i, :d)", [2, "Sessionless 2"] + ) + + # The query will show both rows inserted + print("2nd query") + for r in cursor2.execute("select * from mytab order by id"): + print(r) + + # Rollback so the example can be run multiple times. + # This concludes the Sessionless Transaction + connection2.rollback() + +# ----------------------------------------------------------------------------- +# Sessionless Transaction example with custom timeouts and round-trip +# optimizations + +print("Example 2:") + +# Start and suspend a transaction +with pool.acquire() as connection3: + + connection3.begin_sessionless_transaction( + transaction_id=TXN_ID, + # The transaction can only ever be suspended for 15 seconds before it + # is automatically rolled back + timeout=15, + # Only start the transaction when the next DB operation is performed + defer_round_trip=True, + ) + with connection3.cursor() as cursor3: + cursor3.execute( + "insert into mytab(id, data) values (:i, :d)", + [3, "Sessionless 3"], + suspend_on_success=True, # automatically suspend on success + ) + + # Since the transaction is suspended, there will be no rows + print("1st query") + with connection3.cursor() as cursor3b: + for r in cursor3b.execute("select * from mytab"): + print(r) + +# Resume and complete the transaction in a different connection +with pool.acquire() as connection4: + connection4.resume_sessionless_transaction( + transaction_id=TXN_ID, + # Only wait 20 seconds if someone else is using the transaction + timeout=20, + # Only initiate resuming the transaction when the next DB operation is + # performed + defer_round_trip=True, + ) + + with connection4.cursor() as cursor4: + cursor4.execute( + "insert into mytab(id, data) values (:i, :d)", [4, "Sessionless 4"] + ) + + # The query will show both rows inserted + print("2nd query") + for r in cursor4.execute("select * from mytab order by id"): + print(r) + + # Rollback so the example can be run multiple times. + # This concludes the Sessionless Transaction + connection4.rollback() From 71cc3d4bb74f03ac4c12a69ffa5d83be20632322 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:55:54 -0600 Subject: [PATCH 165/239] Correct issue with fetching data frames that contain multiple packets with asyncio. --- doc/src/release_notes.rst | 2 ++ src/oracledb/impl/thin/messages/base.pyx | 41 ++++++++++++++++++++++++ src/oracledb/impl/thin/protocol.pyx | 1 + src/oracledb/impl/thin/var.pyx | 1 + 4 files changed, 45 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 209f4f61..75d1a652 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -74,6 +74,8 @@ Common Changes - Fixed bug when fetching dates that are in the year 2038 or later. - Fixed bug when fetching numeric data with precision that exceeds 38 as decimal data. + - Fixed bug when fetching large amounts of data in one round-trip when + using asyncio with Oracle Database versions before 23ai. Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 3bfd8fee..80dce97e 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -734,6 +734,13 @@ cdef class Message: buf.write_ub8(state | TNS_SESSION_STATE_EXPLICIT_BOUNDARY) self.conn_impl._session_state_desired = 0 + cdef int on_out_of_packets(self) except -1: + """ + Called when an OufOfPackets exception is raised indicating that further + packets are required to continue processing of this message. + """ + pass + cdef int postprocess(self) except -1: pass @@ -1307,6 +1314,7 @@ cdef class MessageWithData(Message): self.cursor_impl._last_row_index = self.row_index - 1 self.cursor_impl._buffer_rowcount = self.row_index self.bit_vector = NULL + self.on_row_completed() cdef int _process_row_header(self, ReadBuffer buf) except -1: cdef uint32_t num_bytes @@ -1511,6 +1519,39 @@ cdef class MessageWithData(Message): continue self._write_bind_params_column(buf, var_impl, pos + offset) + cdef int on_out_of_packets(self) except -1: + """ + Called when an OufOfPackets exception is raised indicating that further + packets are required to continue processing of this message. + """ + cdef ThinVarImpl var_impl + + # when fetching Arrow data, if the column has already been processed + # and no saved array already exists, the array is saved so that + # subsequent processing will not append to the array further; once the + # complete row has been processed, the saved arrays are restored and + # processing continues + if self.cursor_impl.fetching_arrow: + for var_impl in self.cursor_impl.fetch_var_impls: + if var_impl._saved_arrow_array is not None: + continue + elif var_impl._arrow_array.arrow_array.length > self.row_index: + var_impl._saved_arrow_array = var_impl._arrow_array + var_impl._arrow_array = None + var_impl._create_arrow_array() + + cdef int on_row_completed(self) except -1: + """ + Called when a row has been successfully completed. This allows for any + saved Arrow arrays to be restored. + """ + cdef ThinVarImpl var_impl + if self.cursor_impl.fetching_arrow: + for var_impl in self.cursor_impl.fetch_var_impls: + if var_impl._saved_arrow_array is not None: + var_impl._arrow_array = var_impl._saved_arrow_array + var_impl._saved_arrow_array = None + cdef int postprocess(self) except -1: """ Run any variable out converter functions on all non-null values that diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index ceefcd6b..df36f47a 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -844,6 +844,7 @@ cdef class BaseAsyncProtocol(BaseProtocol): break except OutOfPackets: await self._receive_packet(message) + message.on_out_of_packets() self._read_buf.restore_point() async def _process_single_message(self, Message message): diff --git a/src/oracledb/impl/thin/var.pyx b/src/oracledb/impl/thin/var.pyx index db005a97..72eacdd4 100644 --- a/src/oracledb/impl/thin/var.pyx +++ b/src/oracledb/impl/thin/var.pyx @@ -33,6 +33,7 @@ cdef class ThinVarImpl(BaseVarImpl): cdef: object _last_raw_value ArrowArrayImpl _last_arrow_array + ArrowArrayImpl _saved_arrow_array list _coroutine_indexes cdef int _bind(self, object conn, BaseCursorImpl cursor_impl, From 94a94408e728273a4ab04b104e8df22f22573a0b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 29 Jul 2025 15:56:33 -0600 Subject: [PATCH 166/239] Preparing to release python-oracledb 3.3. --- doc/src/release_notes.rst | 2 +- src/oracledb/version.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 75d1a652..84d8a9cd 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,7 +11,7 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. -oracledb `3.3.0 `__ (TBD) +oracledb `3.3.0 `__ (July 2025) -------------------------------------------------------------------------------------------------- Thin Mode Changes diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 9a33e6af..3c593abb 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.3.0b1" +__version__ = "3.3.0" From 4b7be110ec3298b9980de18f45994d30ea80cc1b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 11:04:19 -0600 Subject: [PATCH 167/239] Bump version in preparation for new changes. --- doc/src/release_notes.rst | 13 +++++++++++++ src/oracledb/version.py | 2 +- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 84d8a9cd..fcdd360a 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -11,6 +11,19 @@ Release changes are listed as affecting Thin Mode (the default runtime behavior of python-oracledb), as affecting the optional :ref:`Thick Mode `, or as being 'Common' for changes that impact both modes. +oracledb `3.4.0 `__ (TBD) +-------------------------------------------------------------------------------------------- + +Thin Mode Changes ++++++++++++++++++ + +Thick Mode Changes +++++++++++++++++++ + +Common Changes +++++++++++++++ + + oracledb `3.3.0 `__ (July 2025) -------------------------------------------------------------------------------------------------- diff --git a/src/oracledb/version.py b/src/oracledb/version.py index 3c593abb..e7cc192d 100644 --- a/src/oracledb/version.py +++ b/src/oracledb/version.py @@ -30,4 +30,4 @@ # file doc/src/conf.py both reference this file directly. # ----------------------------------------------------------------------------- -__version__ = "3.3.0" +__version__ = "3.4.0b1" From 8af7d76a494da126865f3970612a70d297878ea6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 11:04:48 -0600 Subject: [PATCH 168/239] Start process of migrating API documentation to source code. --- doc/src/api_manual/async_connection.rst | 679 ++---------- doc/src/api_manual/async_cursor.rst | 527 +-------- doc/src/api_manual/connection.rst | 840 ++------------ doc/src/api_manual/cursor.rst | 551 +--------- doc/src/api_manual/module.rst | 7 - doc/src/conf.py | 8 + doc/src/release_notes.rst | 2 + src/oracledb/connection.py | 1321 +++++++++++++++-------- src/oracledb/cursor.py | 851 +++++++++------ utils/templates/connection.py | 1321 +++++++++++++++-------- 10 files changed, 2603 insertions(+), 3504 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index b24a2fce..58d1c4df 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -4,105 +4,56 @@ API: AsyncConnection Objects **************************** -An AsyncConnection object can be created with :meth:`oracledb.connect_async()` -or with :meth:`AsyncConnectionPool.acquire()`. AsyncConnections support use of -concurrent programming with `asyncio `__. Unless explicitly noted as synchronous, the AsyncConnection -methods should be used with ``await``. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +.. autoclass:: AsyncConnection -.. versionadded:: 2.0.0 + An AsyncConnection object should be created with + :meth:`oracledb.connect_async()` or with + :meth:`AsyncConnectionPool.acquire()`. AsyncConnections support use of + concurrent programming with + `asyncio `__. -.. note:: + .. dbapiobjectextension:: - AsyncConnection objects are only supported in the python-oracledb Thin - mode. + .. versionadded:: 2.0.0 -.. note:: + .. note:: + + AsyncConnection objects are only supported in the python-oracledb Thin + mode. + + .. note:: - Any outstanding database transaction will be rolled back when the - connection object is destroyed or closed. You must perform a - :meth:`commit ` first if you want data to - persist in the database, see :ref:`txnasync`. + Any outstanding database transaction will be rolled back when the + connection object is destroyed or closed. You must perform a + :meth:`commit ` first if you want data to persist in the + database, see :ref:`txnasync`. .. _asyncconnmeth: AsyncConnection Methods ======================= -.. method:: AsyncConnection.__aenter__() - - The entry point for the asynchronous connection as a context manager. It - returns itself. - -.. method:: AsyncConnection.__aexit__() - - The exit point for the asynchronous connection as a context manager. This - will close the connection and roll back any uncommitted transaction. - -.. method:: AsyncConnection.begin_sessionless_transaction(transaction_id=None, \ - timeout=60, defer_round_trip=False) +.. automethod:: AsyncConnection.__aenter__ - Begins a new sessionless transaction. This method returns the transaction - identifier specified by the user or generated by python-oracledb. +.. automethod:: AsyncConnection.__aexit__ - The ``transaction_id`` parameter should be of type string or bytes. If - specified, it represents a unique identifier for the transaction. If a - string is passed, then it will be UTF-8 encoded to bytes. If this value is - not specified, then python-oracledb generates a a random - `universally-unique identifier (UUID) `__ value when - ``AsyncConnection.begin_sessionless_transaction()`` is called. An example - is "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot - exceed 64 bytes in length. - - The ``timeout`` parameter is the number of seconds that this transaction - can stay suspended when - :meth:`AsyncConnection.suspend_sessionless_transaction()` is later called, - or if the transaction is automatically suspended when the - ``suspend_on_success`` parameter is set to to *True* in - :meth:`AsyncCursor.execute()` or :meth:`AsyncCursor.executemany()`. The - default value is *60* seconds. If a transaction is not resumed within this - specified duration, the transaction will be rolled back. - - The ``defer_round_trip`` parameter is a boolean that determines whether - the request to start a transaction is to be sent immediately or with the - next database operation. If set to *False*, the request is sent - immediately. If set to *True*, the request is included with the next - database operation on the connection. The default value is *False*. +.. automethod:: AsyncConnection.begin_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: AsyncConnection.callfunc(name, return_type, parameters=None, \ - keyword_parameters=None) - - Calls a PL/SQL function with the given name. - - This is a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.callfunc()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.callproc(name, parameters=None, \ - keyword_parameters=None) - - Calls a PL/SQL procedure with the given name. - - This is a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.callproc()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.cancel() +.. automethod:: AsyncConnection.callfunc - A synchronous method that breaks a long-running statement. +.. automethod:: AsyncConnection.callproc -.. method:: AsyncConnection.changepassword(old_password, new_password) +.. automethod:: AsyncConnection.cancel - Changes the password for the user to which the connection is connected. +.. automethod:: AsyncConnection.changepassword -.. method:: AsyncConnection.close() - - Closes the connection. +.. automethod:: AsyncConnection.close .. note:: @@ -113,79 +64,27 @@ AsyncConnection Methods `__ ``with`` block. -.. method:: AsyncConnection.commit() - - Commits any pending transaction to the database. - -.. method:: AsyncConnection.createlob(lob_type) - - Creates and returns a new temporary LOB of the specified type. +.. automethod:: AsyncConnection.commit -.. method:: AsyncConnection.cursor(scrollable=False) +.. automethod:: AsyncConnection.createlob(lob_type) - A synchronous method that returns an :ref:`AsyncCursor object - ` associated with the connection. +.. automethod:: AsyncConnection.cursor -.. method:: AsyncConnection.decode_oson(data) - - A synchronous method that decodes `OSON-encoded - `__ - bytes and returns the object encoded in those bytes. This is useful for - fetching columns which have the check constraint ``IS JSON FORMAT OSON`` - enabled. +.. automethod:: AsyncConnection.decode_oson .. versionadded:: 2.1.0 -.. method:: AsyncConnection.encode_oson(value) - - A synchronous method that encodes a Python value into `OSON-encoded - `__ - bytes and returns them. This is useful for inserting into columns which - have the check constraint ``IS JSON FORMAT OSON`` enabled. +.. automethod:: AsyncConnection.encode_oson .. versionadded:: 2.1.0 -.. method:: AsyncConnection.execute(statement, parameters=None) - - Executes a statement against the database. - - This is a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.execute()`, and then :meth:`AsyncCursor.close()` +.. automethod:: AsyncConnection.execute -.. method:: AsyncConnection.executemany(statement, parameters) +.. automethod:: AsyncConnection.executemany - Executes a SQL statement once using all bind value mappings or sequences - found in the sequence parameters. This can be used to insert, update, or - delete multiple rows in a table with a single python-oracledb call. It can - also invoke a PL/SQL procedure multiple times. +.. automethod:: AsyncConnection.fetchall - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one bind variable placeholder in ``statement``. It can also be a - list of dictionaries, where the keys match the bind variable placeholder - names in ``statement``. If there are no bind values, or values have - previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. - - This is a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.executemany()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.fetch_df_all(statement, parameters=None, \ - arraysize=None) - - Fetches all rows of the SQL query ``statement``, returning them in a - :ref:`DataFrame ` object. An empty DataFrame is - returned if there are no rows available. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one :ref:`bind variable placeholder ` in ``statement``. It - can also be a list of dictionaries, where the keys match the bind variable - placeholder names in ``statement``. - - The ``arraysize`` parameter can be specified to tune performance of fetching - data across the network. It defaults to :attr:`defaults.arraysize`. - Internally, the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is - always set to the value of the explicit or default ``arraysize`` parameter - value. +.. automethod:: AsyncConnection.fetch_df_all See :ref:`dataframeformat` for the supported data types and examples. @@ -196,23 +95,7 @@ AsyncConnection Methods .. versionadded:: 3.0.0 -.. method:: AsyncConnection.fetch_df_batches(statement, parameters=None, \ - size=None) - - This returns an iterator yielding the next ``size`` rows of the SQL query - ``statement`` in each iteration as a :ref:`DataFrame ` - object. An empty DataFrame is returned if there are no rows available. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one :ref:`bind variable placeholder ` in ``statement``. It - can also be a list of dictionaries, where the keys match the bind variable - placeholder names in ``statement``. - - The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and - :attr:`Cursor.prefetchrows` sizes are always set to the value of the - explicit or default ``size`` parameter value. +.. automethod:: AsyncConnection.fetch_df_batches See :ref:`dataframeformat` for the supported data types and examples. @@ -223,165 +106,33 @@ AsyncConnection Methods .. versionadded:: 3.0.0 -.. method:: AsyncConnection.fetchall(statement, parameters=None, \ - arraysize=None, rowfactory=None) +.. automethod:: AsyncConnection.fetchmany - Executes a query and returns all of the rows. +.. automethod:: AsyncConnection.fetchone - The default value for ``arraysize`` is :attr:`defaults.arraysize`. +.. automethod:: AsyncConnection.gettype - Internally, this method's :attr:`Cursor.prefetchrows` size is set to the - value of the explicit or default ``arraysize`` parameter value. +.. automethod:: AsyncConnection.is_healthy - This is a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.fetchall()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.fetchmany(statement, parameters=None, \ - num_rows=None, rowfactory=None) - - Executes a query and returns up to the specified number of rows. - - The default value for ``num_rows`` is the value of - :attr:`defaults.arraysize`. - - Internally, this method's :attr:`Cursor.prefetchrows` size is set to the - value of the explicit or default ``num_rows`` parameter, allowing all rows - to be fetched in one :ref:`round-trip ` - - Since only one fetch is performed for a query, consider adding a ``FETCH - NEXT`` clause to the statement to prevent the database processing rows that - will never be fetched, see :ref:`rowlimit`. - - This a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.fetchmany()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.fetchone(statement, parameters=None, \ - rowfactory=None) - - Executes a query and returns the first row of the result set if one exists - (or *None* if no rows exist). - - Internally, this method's :attr:`Cursor.prefetchrows` and - :attr:`Cursor.arraysize` sizes will be set to *1*. - - Since only one fetch is performed for a query, consider adding a ``WHERE`` - condition or using a ``FETCH NEXT`` clause in the statement to prevent the - database processing rows that will never be fetched, see :ref:`rowlimit`. - - This a shortcut for calling :meth:`AsyncConnection.cursor()`, - :meth:`AsyncCursor.fetchone()`, and then :meth:`AsyncCursor.close()`. - -.. method:: AsyncConnection.gettype(name) - - Returns a :ref:`type object ` given its name. This can then - be used to create objects which can be bound to cursors created by this - connection. - -.. method:: AsyncConnection.is_healthy() - - A synchronous method that returns a boolean indicating the health status - of a connection. - - Connections may become unusable in several cases, such as, if the network - socket is broken, if an Oracle error indicates the connection is unusable, - or after receiving a planned down notification from the database. - - This function is best used before starting a new database request on an - existing standalone connection. Pooled connections internally perform this - check before returning a connection to the application. - - If this function returns *False*, the connection should be not be used by - the application and a new connection should be established instead. - - This function performs a local check. To fully check a connection's health, - use :meth:`AsyncConnection.ping()` which performs a :ref:`round-trip - ` to the database. - -.. method:: AsyncConnection.msgproperties(payload, correlation, delay, exceptionq, expiration, priority) - - Creates and returns a message properties object that contains the - properties of messages used in advanced queuing. See - :ref:`asyncmsgproperties` for more information. - - Each of the parameters are optional. If specified, they act as a shortcut - for setting each of the equivalently named properties. +.. automethod:: AsyncConnection.msgproperties .. versionadded:: 3.1.0 -.. method:: AsyncConnection.ping() - - Pings the database to verify if the connection is valid. - -.. method:: AsyncConnection.queue(name, payload_type=None) +.. automethod:: AsyncConnection.ping() - Creates a :ref:`queue ` which is used to enqueue and dequeue - messages in Advanced Queuing. - - The ``name`` parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. - - The ``payload_type`` parameter, if specified, is expected to be an - :ref:`object type ` that identifies the type of payload the - queue expects. If the string "JSON" is specified, JSON data is enqueued and - dequeued. If not specified, RAW data is enqueued and dequeued. +.. automethod:: AsyncConnection.queue .. versionadded:: 3.1.0 -.. method:: AsyncConnection.resume_sessionless_transaction(transaction_id, \ - timeout=60, defer_round_trip=False) - - Resumes an existing sessionless transaction using the specified - transaction identifier. This method returns the transaction identifier - used to resume the sessionless transaction. - - The ``transaction_id`` parameter should be a string or bytes value that - uniquely identifies an existing sessionless transaction that is to be - resumed. - - The ``timeout`` parameter is the number of seconds that the current - connection waits to resume a transaction if another connection is using it. - When ``defer_round_trip`` is set to *False*, the wait happens in the - ``resume_sessionless_transaction()`` call itself, and the function blocks - until the transaction becomes available or the timeout expires. When - ``defer_round_trip`` is set to *True*, the resume is deferred and the wait - occurs at the time of the next database operation instead. At the start of - the wait period, if the transaction is not in use by any other connection, - the resume happens immediately. If the transaction remains in use by the - other connection after the timeout period, the error `ORA-25351 - `__ is raised. If - another connection completes the transaction, the error `ORA-24756 - `__ is raised. These - error messages are only thrown for non-RAC instances. For information on - using Oracle RAC, see :ref:`Sessionless Transactions with Oracle RAC - `. The default value is *60* seconds. - - The ``defer_round_trip`` parameter is a boolean that determines whether - the request to resume a transaction is to be sent immediately or with the - next database operation. If set to *False*, the request is sent - immediately. If set to *True*, the request is included with the next - database operation on the connection. The default value is *False*. +.. automethod:: AsyncConnection.resume_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: AsyncConnection.rollback() - - Rolls back any pending transaction. - -.. method:: AsyncConnection.run_pipeline(pipeline, continue_on_error=False) +.. automethod:: AsyncConnection.rollback - Runs all of the operations in the :ref:`pipeline ` and returns - a list of :ref:`PipelineOpResult Objects `, each - entry corresponding to an operation executed in the pipeline. - - The ``continue_on_error`` parameter determines whether operations should - continue to run after an error has occurred. If this parameter is set to - *True*, then the :attr:`PipelineOpResult.error` attribute will be populated - with an :ref:`_Error ` instance which identifies the error - that occurred. If this parameter is set to *False*, then an exception will - be raised as soon as an error is detected and all subsequent operations - will be terminated. The default value is *False*. +.. automethod:: AsyncConnection.run_pipeline See :ref:`pipelining` for more information. @@ -397,43 +148,13 @@ AsyncConnection Methods .. versionadded:: 2.4.0 -.. method:: AsyncConnection.suspend_sessionless_transaction() - - Suspends the currently active sessionless transaction immediately. - - This detaches the transaction from the connection, allowing it to be - resumed later with the transaction identifier that was specified during - creation of the sessionless transaction. The ``timeout`` previously passed - to :meth:`AsyncConnection.begin_sessionless_transaction()` determines how - long the transaction can stay suspended before it is automatically rolled - back. +.. automethod:: AsyncConnection.suspend_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: AsyncConnection.tpc_begin(xid, flags, timeout) - - Begins a Two-Phase Commit (TPC) on a global transaction using the specified - transaction identifier (xid). - - The ``xid`` parameter should be an object returned by the - :meth:`~Connection.xid()` method. - - The ``flags`` parameter is one of the constants - :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, - :data:`oracledb.TPC_BEGIN_PROMOTE`, or :data:`oracledb.TPC_BEGIN_RESUME`. - The default is :data:`oracledb.TPC_BEGIN_NEW`. - - The ``timeout`` parameter is the number of seconds to wait for a - transaction to become available for resumption when - :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` parameter. - When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the ``flags`` - parameter, the ``timeout`` parameter indicates the number of seconds the - transaction can be inactive before it is automatically terminated by the - system. A transaction is inactive between the time it is detached with - :meth:`AsyncConnection.tpc_end()` and the time it is resumed with - :meth:`AsyncConnection.tpc_begin()`.The default is *0* seconds. +.. automethod:: AsyncConnection.tpc_begin The following code sample demonstrates the ``tpc_begin()`` function:: @@ -444,27 +165,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_commit(xid, one_phase) - - Commits a global transaction. When called with no arguments, this method - commits a transaction previously prepared with - :meth:`~AsyncConnection.tpc_begin()` and optionally prepared with - :meth:`~AsyncConnection.tpc_prepare()`. If - :meth:`~AsyncConnection.tpc_prepare()` is not called, a single phase commit - is performed. A transaction manager may choose to do this if only a single - resource is participating in the global transaction. - - If an ``xid`` parameter is passed, then an object should be returned by the - :meth:`~Connection.xid()` function. This form should be called outside of a - transaction and is intended for use in recovery. - - The ``one_phase`` parameter is a boolean identifying whether to perform a - one-phase or two-phase commit. If ``one_phase`` parameter is *True*, a - single-phase commit is performed. The default value is *False*. This - parameter is only examined if a value is provided for the ``xid`` - parameter. Otherwise, the driver already knows whether - :meth:`~AsyncConnection.tpc_prepare()` was called for the transaction and - whether a one-phase or two-phase commit is required. +.. automethod:: AsyncConnection.tpc_commit The following code sample demonstrates the ``tpc_commit()`` function:: @@ -475,23 +176,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_end(xid, flags) - - Ends or suspends work on a global transaction. This function is only - intended for use by transaction managers. - - If an ``xid`` parameter is passed, then an object should be returned by the - :meth:`~Connection.xid()` function. If no xid parameter is passed, then the - transaction identifier used by the previous :meth:`~Connection.tpc_begin()` - is used. - - The ``flags`` parameter is one of the constants - :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. The - default is :data:`oracledb.TPC_END_NORMAL`. - - If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction may be - resumed later by calling :meth:`AsyncConnection.tpc_begin()` with the flag - :data:`oracledb.TPC_BEGIN_RESUME`. +.. automethod:: AsyncConnection.tpc_end The following code sample demonstrates the ``tpc_end()`` function:: @@ -502,13 +187,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_forget(xid) - - Causes the database to forget a heuristically completed TPC transaction. - This function is only intended to be called by transaction managers. - - The ``xid`` parameter is mandatory and should be an object should be - returned by the :meth:`~Connection.xid()` function. +.. automethod:: AsyncConnection.tpc_forget The following code sample demonstrates the ``tpc_forget()`` function:: @@ -519,21 +198,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_prepare(xid) - - Prepares a two-phase transaction for commit. After this function is called, - no further activity should take place on this connection until either - :meth:`~AsyncConnection.tpc_commit()` or - :meth:`~AsyncConnection.tpc_rollback()` have been called. - - Returns a boolean indicating whether a commit is needed or not. If you - attempt to commit when not needed, then it results in the error - ``ORA-24756: transaction does not exist``. - - If an ``xid`` parameter is passed, then an object should be returned by the - :meth:`~Connection.xid()` function. If an ``xid`` parameter is not passed, - then the transaction identifier used by the previous - :meth:`~AsyncConnection.tpc_begin()` is used. +.. automethod:: AsyncConnection.tpc_prepare The following code sample demonstrates the ``tpc_prepare()`` function:: @@ -544,16 +209,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_recover() - - Returns a list of pending transaction identifiers that require recovery. - Objects of type ``Xid`` (as returned by the :meth:`~Connection.xid()` - function) are returned and these can be passed to - :meth:`~AsyncConnection.tpc_commit()` or - :meth:`~AsyncConnection.tpc_rollback()` as needed. - - This function queries the view ``DBA_PENDING_TRANSACTIONS`` and requires - ``SELECT`` privilege on that view. +.. automethod:: AsyncConnection.tpc_recover The following code sample demonstrates the ``tpc_recover()`` function:: @@ -563,17 +219,7 @@ AsyncConnection Methods .. versionadded:: 2.3.0 -.. method:: AsyncConnection.tpc_rollback(xid) - - Rolls back a global transaction. - - If an ``xid`` parameter is not passed, then it rolls back the transaction - that was previously started with :meth:`~AsyncConnection.tpc_begin()`. - - If an ``xid`` parameter is passed, then an object should be returned by - :meth:`~Connection.xid()` and the specified transaction is rolled back. - This form should be called outside of a transaction and is intended for use - in recovery. +.. automethod:: AsyncConnection.tpc_rollback The following code sample demonstrates the ``tpc_rollback()`` function:: @@ -584,230 +230,79 @@ AsyncConnection Methods .. versionadded:: 2.3.0 +.. automethod:: AsyncConnection.xid + .. _asynconnattr: AsyncConnection Attributes ========================== -.. attribute:: AsyncConnection.action - - This write-only attribute sets the ACTION column in the V$SESSION view. It - is a string attribute but the value *None* is accepted and treated as an - empty string. - -.. attribute:: AsyncConnection.autocommit - - This read-write attribute determines whether autocommit mode is on or off. - When autocommit mode is on, all statements are committed as soon as they - have completed executing. - -.. attribute:: AsyncConnection.call_timeout - - This read-write attribute specifies the amount of time (in milliseconds) - that a single round-trip to the database may take before a timeout will - occur. A value of *0* means that no timeout will take place. - - If a timeout occurs, the error ``DPI-1067`` will be returned if the - connection is still usable. Alternatively, the error ``DPI-1080`` will be - returned if the connection has become invalid and can no longer be used. - -.. attribute:: AsyncConnection.client_identifier - - This write-only attribute sets the CLIENT_IDENTIFIER column in the - V$SESSION view. - -.. attribute:: AsyncConnection.clientinfo +.. autoproperty:: AsyncConnection.action - This write-only attribute sets the CLIENT_INFO column in the V$SESSION - view. +.. autoproperty:: AsyncConnection.autocommit -.. attribute:: AsyncConnection.current_schema +.. autoproperty:: AsyncConnection.call_timeout - This read-write attribute sets the current schema attribute for the - session. Setting this value is the same as executing the SQL statement - ``ALTER SESSION SET CURRENT_SCHEMA``. The attribute is set (and verified) on - the next call that does a round trip to the server. The value is placed - before unqualified database objects in SQL statements you then execute. +.. autoproperty:: AsyncConnection.client_identifier -.. attribute:: AsyncConnection.db_domain +.. autoproperty:: AsyncConnection.clientinfo - This read-only attribute specifies the Oracle Database domain name - associated with the connection. It is the same value returned by the SQL - ``SELECT value FROM V$PARAMETER WHERE NAME = 'db_domain'``. +.. autoproperty:: AsyncConnection.current_schema -.. attribute:: AsyncConnection.db_name +.. autoproperty:: AsyncConnection.db_domain - This read-only attribute specifies the Oracle Database name associated with - the connection. It is the same value returned by the SQL - ``SELECT NAME FROM V$DATABASE``. +.. autoproperty:: AsyncConnection.db_name -.. attribute:: AsyncConnection.dbop +.. autoproperty:: AsyncConnection.dbop - This write-only attribute sets the database operation that is to be - monitored. This can be viewed in the DBOP_NAME column of the - V$SQL_MONITOR view. +.. autoproperty:: AsyncConnection.dsn -.. attribute:: AsyncConnection.dsn +.. autoproperty:: AsyncConnection.econtext_id - This read-only attribute returns the TNS entry of the database to which a - connection has been established. +.. autoproperty:: AsyncConnection.edition -.. attribute:: AsyncConnection.econtext_id +.. autoproperty:: AsyncConnection.external_name - This write-only attribute specifies the execution context id. This value - can be found as the ECID column in the V$SESSION view and ECONTEXT_ID in - the auditing tables. The maximum length is 64 bytes. +.. autoproperty:: AsyncConnection.inputtypehandler -.. attribute:: AsyncConnection.edition +.. autoproperty:: AsyncConnection.instance_name - This read-only attribute gets the session edition and is only available - with Oracle Database 11.2, or later. +.. autoproperty:: AsyncConnection.internal_name -.. attribute:: AsyncConnection.external_name +.. autoproperty:: AsyncConnection.ltxid - This read-write attribute specifies the external name that is used by the - connection when logging distributed transactions. - -.. attribute:: AsyncConnection.inputtypehandler - - This read-write attribute specifies a method called for each value that is - bound to a statement executed on any cursor associated with this - connection. The method signature is handler(cursor, value, arraysize) and - the return value is expected to be a variable object or *None* in which - case a default variable object will be created. If this attribute is - *None*, the default behavior will take place for all values bound to - statements. - -.. attribute:: AsyncConnection.instance_name - - This read-only attribute specifies the Oracle Database instance name - associated with the connection. It is the same value as the SQL expression - ``sys_context('userenv', 'instance_name')``. - -.. attribute:: AsyncConnection.internal_name - - This read-write attribute specifies the internal name that is used by the - connection when logging distributed transactions. - -.. attribute:: AsyncConnection.ltxid - - This read-only attribute returns the logical transaction id for the - connection. It is used within Oracle Transaction Guard as a means of - ensuring that transactions are not duplicated. See the Oracle documentation - and the provided sample for more information. - - .. note: - - This attribute is only available with Oracle Database 12.1 or later. - -.. attribute:: AsyncConnection.max_identifier_length - - This read-only attribute specifies the maximum database identifier length - in bytes supported by the database to which the connection has been - established. See `Database Object Naming Rules - `__. +.. autoproperty:: AsyncConnection.max_identifier_length .. versionadded:: 2.5.0 -.. attribute:: AsyncConnection.max_open_cursors +.. autoproperty:: AsyncConnection.max_open_cursors - This read-only attribute specifies the maximum number of cursors that the - database can have open concurrently. It is the same value returned by the - SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = 'open_cursors'``. +.. autoproperty:: AsyncConnection.module -.. attribute:: AsyncConnection.module - - This write-only attribute sets the MODULE column in the V$SESSION view. - The maximum length for this string is 48 and if you exceed this length you - will get ``ORA-24960``. - -.. attribute:: AsyncConnection.outputtypehandler - - This read-write attribute specifies a method called for each column that is - going to be fetched from any cursor associated with this connection. The - method signature is ``handler(cursor, metadata)`` and the return value is - expected to be a :ref:`variable object ` or *None* in which case a - default variable object will be created. If this attribute is *None*, the - default behavior will take place for all columns fetched from cursors. +.. autoproperty:: AsyncConnection.outputtypehandler See :ref:`outputtypehandlers`. -.. attribute:: AsyncConnection.sdu - - This read-only attribute specifies the size of the Session Data Unit (SDU) - that is being used by the connection. The value will be the lesser of the - requested python-oracledb size and the maximum size allowed by the database - network configuration. +.. autoproperty:: AsyncConnection.sdu -.. attribute:: AsyncConnection.serial_num - - This read-only attribute specifies the session serial number associated with - the connection. It is the same value returned by the SQL - ``SELECT SERIAL# FROM V$SESSION``. - - It is available only in python-oracledb Thin mode. - - For applications using :ref:`drcp`, the ``serial_num`` attribute may not - contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a round-trip. - - .. dbapiattributeextension:: +.. autoproperty:: AsyncConnection.serial_num .. versionadded:: 2.5.0 -.. attribute:: AsyncConnection.service_name - - This read-only attribute specifies the Oracle Database service name - associated with the connection. This is the same value returned by the SQL - ``SELECT SYS_CONTEXT('USERENV', 'SERVICE_NAME') FROM DUAL``. - -.. attribute:: AsyncConnection.session_id - - This read-only attribute specifies the session identifier associated with - the connection. It is the same value returned by the SQL - ``SELECT SID FROM V$SESSION``. - - It is available only in python-oracledb Thin mode. +.. autoproperty:: AsyncConnection.service_name - For applications using :ref:`drcp`, the ``session_id`` attribute may - not contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a - round-trip. - - .. dbapiattributeextension:: +.. autoproperty:: AsyncConnection.session_id .. versionadded:: 2.5.0 -.. attribute:: AsyncConnection.stmtcachesize - - This read-write attribute specifies the size of the statement cache. This - value can make a significant difference in performance if you have a small - number of statements that you execute repeatedly. - - The default value is *20*. +.. autoproperty:: AsyncConnection.stmtcachesize See :ref:`Statement Caching ` for more information. -.. attribute:: AsyncConnection.thin - - This read-only attribute returns a boolean indicating if the connection was - established with the python-oracledb Thin mode (*True*) or python-oracledb - Thick mode (*False*). - -.. attribute:: AsyncConnection.transaction_in_progress - - This read-only attribute specifies whether a transaction is currently in - progress on the database associated with the connection. - -.. attribute:: AsyncConnection.username +.. autoproperty:: AsyncConnection.thin - This read-only attribute returns the name of the user which established the - connection to the database. +.. autoproperty:: AsyncConnection.transaction_in_progress -.. attribute:: AsyncConnection.version +.. autoproperty:: AsyncConnection.username - This read-only attribute returns the version of the database to which a - connection has been established. +.. autoproperty:: AsyncConnection.version diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index d0e345de..a00b656a 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -4,111 +4,55 @@ API: AsyncCursor Objects ************************ -An AsyncCursor object can be created with :meth:`AsyncConnection.cursor()`. -Unless explicitly noted as synchronous, the AsyncCursor methods should be used -with ``await``. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +.. autoclass:: AsyncCursor -.. versionadded:: 2.0.0 + An AsyncCursor object should be created with + :meth:`AsyncConnection.cursor()`. -.. note:: + .. dbapiobjectextension:: - AsyncCursor objects are only supported in the python-oracledb Thin mode. + .. versionadded:: 2.0.0 + + .. note:: + + AsyncCursor objects are only supported in the python-oracledb Thin + mode. .. _asynccursormeth: AsyncCursor Methods =================== -.. method:: AsyncCursor.__aiter__() - - Returns the cursor itself to be used as an asynchronous iterator. - -.. method:: AsyncCursor.__enter__() - - The entry point for the cursor as a context manager. It returns itself. - -.. method:: AsyncCursor.__exit__() - - The exit point for the cursor as a context manager. It closes the cursor. +.. automethod:: AsyncCursor.__aiter__ -.. method:: AsyncCursor.arrayvar(typ, value, [size]) +.. automethod:: AsyncCursor.__aenter__ - A synchronous method that creates an array variable associated with the - cursor of the given type and size and returns a - :ref:`variable object `. The value is either an integer specifying - the number of elements to allocate or it is a list and the number of - elements allocated is drawn from the size of the list. If the value is a - list, the variable is also set with the contents of the list. If the size - is not specified and the type is a string or binary, 4000 bytes is - allocated. This is needed for passing arrays to PL/SQL (in cases where - the list might be empty and the type cannot be determined automatically) or - returning arrays from PL/SQL. +.. automethod:: AsyncCursor.__aexit__ - Array variables can only be used for PL/SQL associative arrays with - contiguous keys. For PL/SQL associative arrays with sparsely populated keys - or for varrays and nested tables, the approach shown in this - `example `__ needs to be used. +.. automethod:: AsyncCursor.arrayvar -.. method:: AsyncCursor.bindnames() +.. automethod:: AsyncCursor.bindnames - A synchronous method that returns the list of bind variable names bound to - the statement. Note that a statement must have been prepared first. - -.. method:: AsyncCursor.callfunc(name, return_type, parameters=None, \ - keyword_parameters=None) - - Calls a PL/SQL function with the given name and returns its value. - - The ``return_type`` parameter for :meth:`~AsyncCursor.callfunc()` is - expected to be a Python type, one of the :ref:`oracledb types ` or - an :ref:`Object Type `. - - The sequence of parameters must contain one entry for each parameter that - the PL/SQL function expects. Any keyword parameters will be included after - the positional parameters. - - Use :meth:`AsyncCursor.var()` to define any OUT or IN OUT parameters, if - necessary. +.. automethod:: AsyncCursor.callfunc See :ref:`plsqlfunc` for examples. .. note:: In line with the Python DB API, it is not recommended to call - :meth:`AsyncCursor.setinputsizes()` prior to calling - :meth:`~AsyncCursor.callfunc()`. Use :meth:`AsyncCursor.var()` instead. - In existing code that calls :meth:`~AsyncCursor.setinputsizes()`, the - first item in the :meth:`~AsyncCursor.setinputsizes()` parameter list - refers to the return value of the PL/SQL function. - -.. method:: AsyncCursor.callproc(name, parameters=None, keyword_parameters=None) - - Calls a PL/SQL procedure with the given name. + :meth:`setinputsizes()` prior to calling this function. + Use :meth:`AsyncCursor.var()` instead. In existing code that calls + :meth:`~AsyncCursor.setinputsizes()`, the first item in the + :meth:`~AsyncCursor.setinputsizes()` parameter list refers to the + return value of the PL/SQL function. - The sequence of parameters must contain one entry for each parameter that - the procedure expects. The result of the call is a modified copy of the - input sequence. Input parameters are left untouched; output and - input/output parameters are replaced with possibly new values. Keyword - parameters will be included after the positional parameters and are not - returned as part of the output sequence. - - Use :meth:`AsyncCursor.var()` to define any OUT or IN OUT parameters if - necessary. - - No query result set is returned by :meth:`~AsyncCursor.callproc()`. - Instead, use :ref:`REF CURSOR ` parameters or :ref:`Implicit - Results `. +.. automethod:: AsyncCursor.callproc See :ref:`plsqlproc` for an example. -.. method:: AsyncCursor.close() - - A synchronous method that closes the cursor now. The cursor will be - unusable from this point forward. An Error exception will be raised if any - operation is attempted with the cursor. +.. automethod:: AsyncCursor.close .. note:: @@ -119,238 +63,50 @@ AsyncCursor Methods `__ ``with`` block. -.. method:: AsyncCursor.execute(statement, parameters=None, \ - suspend_on_success=False, ** keyword_parameters) - - Executes a statement against the database. See :ref:`sqlexecution`. - - Parameters may be passed as a dictionary or sequence or as keyword - parameters. If the parameters are a dictionary, the values will be bound by - name and if the parameters are a sequence the values will be bound by - position. Note that if the values are bound by position, the order of the - variables is from left to right as they are encountered in the statement - and SQL statements are processed differently than PL/SQL statements. For - this reason, it is generally recommended to bind parameters by name instead - of by position. - - Parameters passed as a dictionary are name and value pairs. The name maps - to the bind variable name used by the statement and the value maps to the - Python value you wish bound to that bind variable. - - A reference to the statement will be retained by the cursor. If *None* or - the same string object is passed in again, the cursor will execute that - statement again without performing a prepare or rebinding and redefining. - This is most effective for algorithms where the same statement is used, but - different parameters are bound to it (many times). Note that parameters - that are not passed in during subsequent executions will retain the value - passed in during the last execution that contained them. - - The ``suspend_on_success`` parameter is specific to :ref:`sessionless - transactions `. When set to *True*, the active sessionless - transaction will be suspended when ``execute()`` completes successfully. - See :ref:`suspendtxns`. - - For maximum efficiency when reusing a statement, it is best to use the - :meth:`AsyncCursor.setinputsizes()` method to specify the parameter types and - sizes ahead of time; in particular, *None* is assumed to be a string of - length 1 so any values that are later bound as numbers or dates will raise - a TypeError exception. - - If the statement is a query, the cursor is returned as a convenience to the - caller (so it can be used directly as an iterator over the rows in the - cursor); otherwise, *None* is returned. +.. automethod:: AsyncCursor.execute .. versionchanged:: 3.3.0 The ``suspend_on_success`` parameter was added. -.. method:: AsyncCursor.executemany(statement, parameters, batcherrors=False, \ - arraydmlrowcounts=False, suspend_on_success=False) - - Executes a SQL statement once using all bind value mappings or sequences - found in the sequence parameters. This can be used to insert, update, or - delete multiple rows in a table with a single python-oracledb call. It can - also invoke a PL/SQL procedure multiple times. See :ref:`batchstmnt`. - - The ``statement`` parameter is managed in the same way as the - :meth:`AsyncCursor.execute()` method manages it. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one bind variable placeholder in ``statement``. It can also be a - list of dictionaries, where the keys match the bind variable placeholder - names in ``statement``. If there are no bind values, or values have - previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. The ``parameters`` parameter can also - be a :ref:`DataFrame `, or a third-party data frame - that supports the `Apache Arrow PyCapsule `__ Interface. - - In python-oracledb Thick mode, if the size of the buffers allocated for any - of the parameters exceeds 2 GB, you will receive the error ``DPI-1015: - array size of is too large``. If you receive this error, decrease the - number of rows being inserted. - - When True, the ``batcherrors`` parameter enables batch error support within - Oracle and ensures that the call succeeds even if an exception takes place - in one or more of the sequence of parameters. The errors can then be - retrieved using :meth:`AsyncCursor.getbatcherrors()`. - - When True, the ``arraydmlrowcounts`` parameter enables DML row counts to be - retrieved from Oracle after the method has completed. The row counts can - then be retrieved using :meth:`AsyncCursor.getarraydmlrowcounts()`. - - Both the ``batcherrors`` parameter and the ``arraydmlrowcounts`` parameter - can only be True when executing an insert, update, delete, or merge - statement. In all other cases, an error will be raised. - - The ``suspend_on_success`` parameter is specific to :ref:`sessionless - transactions `. When set to *True*, the active sessionless - transaction will be suspended when ``executemany()`` completes - successfully. See :ref:`suspendtxns`. - - For maximum efficiency, it is best to use the - :meth:`AsyncCursor.setinputsizes()` method to specify the parameter types - and sizes ahead of time. In particular, the value *None* is assumed to be a - string of length 1 so any values that are later bound as numbers or dates - will raise a TypeError exception. +.. automethod:: AsyncCursor.executemany .. versionchanged:: 3.3.0 Added support for passing data frames in the ``parameters`` parameter. - - .. versionadded:: 3.3.0 - The ``suspend_on_success`` parameter was added. -.. method:: AsyncCursor.fetchall() - - Fetches all (remaining) rows of a query result, returning them as a list of - tuples. An empty list is returned if no more rows are available. An - exception is raised if the previous call to :meth:`AsyncCursor.execute()` - did not produce any result set or no call was issued yet. - - Note that the cursor's :attr:`~AsyncCursor.arraysize` attribute can affect - the performance of this operation, as internally data is fetched in batches - of that size from the database. +.. automethod:: AsyncCursor.fetchall -.. method:: AsyncCursor.fetchmany(size=cursor.arraysize) +.. automethod:: AsyncCursor.fetchmany - Fetches the next set of rows of a query result, returning a list of tuples. - An empty list is returned if no more rows are available. Note that the - cursor's :attr:`~AsyncCursor.arraysize` attribute can affect the - performance of this operation. +.. automethod:: AsyncCursor.fetchone - The number of rows to fetch is specified by the parameter. If it is not - given, the cursor's :attr:`~AsyncCursor.arraysize` attribute determines the - number of rows to be fetched. If the number of rows available to be fetched - is fewer than the amount requested, fewer rows will be returned. +.. automethod:: AsyncCursor.getarraydmlrowcounts - An exception is raised if the previous call to - :meth:`AsyncCursor.execute()` did not produce any result set or no call - was issued yet. +.. automethod:: AsyncCursor.getbatcherrors -.. method:: AsyncCursor.fetchone() - - Fetches the next row of a query result set, returning a single tuple or - *None* when no more data is available. - - An exception is raised if the previous call to - :meth:`AsyncCursor.execute()` did not produce any result set or no call - was issued yet. - -.. method:: AsyncCursor.getarraydmlrowcounts() - - A synchronous method that retrieves the DML row counts after a call to - :meth:`AsyncCursor.executemany()` with ``arraydmlrowcounts`` enabled. This - will return a list of integers corresponding to the number of rows - affected by the DML statement for each element of the array passed to - :meth:`AsyncCursor.executemany()`. +.. automethod:: AsyncCursor.getimplicitresults .. note:: - This method is only available for Oracle 12.1 and later. - -.. method:: AsyncCursor.getbatcherrors() - - A synchronous method that retrieves the exceptions that took place after a - call to :meth:`AsyncCursor.executemany()` with ``batcherrors`` enabled. - This will return a list of Error objects, one error for each iteration that - failed. The offset can be determined by looking at the offset attribute of - the error object. - -.. method:: AsyncCursor.getimplicitresults() + It is most like the DB API method nextset(), but unlike that method + (which requires that the next result set overwrite the current result + set), this method returns cursors which can be fetched independently of + each other. - A synchronous method that returns a list of cursors which correspond to - implicit results made available from a PL/SQL block or procedure without - the use of OUT ref cursor parameters. The PL/SQL block or procedure opens - the cursors and marks them for return to the driver using the procedure - dbms_sql.return_result. Cursors returned in this fashion should not be - closed. They will be closed automatically by the parent cursor when it is - closed. Closing the parent cursor will invalidate the cursors returned by - this method. - - .. note:: - - This method is only available with Oracle Database 12.1 or later. It is - most like the DB API method nextset(), but unlike that method (which - requires that the next result set overwrite the current result set), - this method returns cursors which can be fetched independently of each - other. - -.. method:: AsyncCursor.parse(statement) - - This can be used to parse a statement without actually executing it - (parsing step is done automatically by Oracle when a statement is - :meth:`executed `). +.. automethod:: AsyncCursor.parse .. note:: You can parse any DML or DDL statement. DDL statements are executed immediately and an implied commit takes place. -.. method:: AsyncCursor.prepare(statement, tag, cache_statement=True) - - A synchronous method that can be used before a call to - :meth:`AsyncCursor.execute()` to define the statement that will be - executed. When this is done, the prepare phase will not be performed when - the call to :meth:`AsyncCursor.execute()` is made with *None* or the same - string object as the statement. - - If the ``tag`` parameter is specified and the ``cache_statement`` parameter - is *True*, the statement will be returned to the statement cache with the - given tag. - - If the ``cache_statement`` parameter is *False*, the statement will be - removed from the statement cache (if it was found there) or will simply not - be cached. +.. automethod:: AsyncCursor.prepare See :ref:`Statement Caching ` for more information. -.. method:: AsyncCursor.setinputsizes(*args, **keywordArgs) - - A synchronous method that can be used before a call to - :meth:`AsyncCursor.execute()` and :meth:`AsyncCursor.executemany()` to - predefine memory areas for the operation's parameters. Each parameter - should be a type object corresponding to the data that will be used for a - :ref:`bind variable placeholder ` in the SQL or PL/SQL statement. - Alternatively, it can be an integer specifying the maximum length of a - string bind variable value. - - Use keyword parameters when :ref:`binding by name `. Use - positional parameters when :ref:`binding by position `. The - parameter value can be *None* to indicate that python-oracledb should - determine the required space from the data value provided. - - The parameters or keyword names correspond to the bind variable - placeholders used in the SQL or PL/SQL statement. Note this means that for - use with :meth:`AsyncCursor.executemany()` it does not correspond to the - number of bind value mappings or sequences being passed. - - When repeated calls to :meth:`AsyncCursor.execute()` or - :meth:`AsyncCursor.executemany()` are made binding different string data - lengths, using :meth:`~AsyncCursor.setinputsizes()` can help reduce the - database's SQL "version count" for the statement. See :ref:`Reducing the - SQL Version Count `. +.. automethod:: AsyncCursor.setinputsizes .. note:: @@ -362,216 +118,43 @@ AsyncCursor Methods :meth:`AsyncCursor.callfunc()`, the first parameter in the list refers to the return value of the PL/SQL function. -.. method:: AsyncCursor.scroll(value=0, mode="relative") - - Scrolls the cursor in the result set to a new position according to the - mode. - - If mode is *relative* (the default value), the value is taken as an offset - to the current position in the result set. If set to *absolute*, value - states an absolute target position. If set to *first*, the cursor is - positioned at the first row and if set to *last*, the cursor is set to the - last row in the result set. - - An error is raised if the mode is *relative* or *absolute* and the scroll - operation would position the cursor outside of the result set. - -.. method:: AsyncCursor.setoutputsize(size, [column]) - - This method does nothing and is retained solely for compatibility with the - DB API. Python-oracledb automatically allocates as much space as needed to - fetch LONG and LONG RAW columns, and also to fetch CLOB as string and BLOB - as bytes. - -.. method:: AsyncCursor.var(typ, [size, arraysize, inconverter, outconverter, \ - typename, encoding_errors, bypass_decode, convert_nulls]) - - A synchronous method that creates a :ref:`variable object ` with - the specified characteristics. This method can be used for binding to - PL/SQL IN and OUT parameters where the length or type cannot be determined - automatically from the Python variable being bound. It can also be used in - :ref:`input ` and :ref:`output ` - type handlers. - - The ``typ`` parameter specifies the type of data that should be stored in the - variable. This should be one of the :ref:`database type constants - `, :ref:`DB API constants `, an object type returned from - the method :meth:`AsyncConnection.gettype()` or one of the following Python - types: - - .. list-table-with-summary:: - :header-rows: 1 - :class: wy-table-responsive - :align: center - :summary: The first column is the Python Type. The second column is the corresponding Database Type. - - * - Python Type - - Database Type - * - bool - - :attr:`oracledb.DB_TYPE_BOOLEAN` - * - bytes - - :attr:`oracledb.DB_TYPE_RAW` - * - datetime.date - - :attr:`oracledb.DB_TYPE_DATE` - * - datetime.datetime - - :attr:`oracledb.DB_TYPE_DATE` - * - datetime.timedelta - - :attr:`oracledb.DB_TYPE_INTERVAL_DS` - * - decimal.Decimal - - :attr:`oracledb.DB_TYPE_NUMBER` - * - float - - :attr:`oracledb.DB_TYPE_NUMBER` - * - int - - :attr:`oracledb.DB_TYPE_NUMBER` - * - str - - :attr:`oracledb.DB_TYPE_VARCHAR` - - The ``size`` parameter specifies the length of string and raw variables and is - ignored in all other cases. If not specified for string and raw variables, - the value *4000* is used. - - The ``arraysize`` parameter specifies the number of elements the variable will - have. If not specified the bind array size (usually *1*) is used. When a - variable is created in an output type handler this parameter should be set - to the cursor's array size. - - The ``inconverter`` and ``outconverter`` parameters specify methods used for - converting values to/from the database. More information can be found in - the section on :ref:`variable objects`. - - The ``typename`` parameter specifies the name of a SQL object type and must be - specified when using type :data:`oracledb.OBJECT` unless the type object - was passed directly as the first parameter. - - The ``encoding_errors`` parameter specifies what should happen when decoding - byte strings fetched from the database into strings. It should be one of - the values noted in the builtin - `decode `__ - function. - - The ``bypass_decode`` parameter, if specified, should be passed as a - boolean value. Passing a `True` value causes values of database types - :data:`~oracledb.DB_TYPE_VARCHAR`, :data:`~oracledb.DB_TYPE_CHAR`, - :data:`~oracledb.DB_TYPE_NVARCHAR`, :data:`~oracledb.DB_TYPE_NCHAR` and - :data:`~oracledb.DB_TYPE_LONG` to be returned as `bytes` instead of `str`, - meaning that python-oracledb does not do any decoding. See :ref:`Fetching raw - data ` for more information. - - The ``convert_nulls`` parameter, if specified, should be passed as a boolean - value. Passing the value *True* causes the ``outconverter`` to be called - when a null value is fetched from the database; otherwise, the - ``outconverter`` is only called when non-null values are fetched from the - database. +.. automethod:: AsyncCursor.scroll + +.. automethod:: AsyncCursor.setoutputsize + +.. automethod:: AsyncCursor.var .. _asynccursorattr: AsyncCursor Attributes ====================== -.. attribute:: AsyncCursor.arraysize - - This read-write attribute can be used to tune the number of rows internally - fetched and buffered by internal calls to the database when fetching rows - from SELECT statements and REF CURSORS. The value can drastically affect - the performance of a query since it directly affects the number of network - round trips between Python and the database. For methods like - :meth:`AsyncCursor.fetchone()` and :meth:`AsyncCursor.fetchall()` it - affects internal behavior but does not change how many rows are returned to - the application. For :meth:`AsyncCursor.fetchmany()` it is the default - number of rows to fetch. - - The attribute is only used for tuning row and SODA document fetches from - the database. It does not affect data inserts. - - Due to the performance benefits, the default ``arraysize`` is *100* instead - of the *1* that the Python DB API recommends. +.. autoproperty:: AsyncCursor.arraysize See :ref:`Tuning Fetch Performance ` for more information. -.. attribute:: AsyncCursor.bindvars - - This read-only attribute provides the bind variables used for the last - execute. The value will be either a list or a dictionary depending on - whether binding was done by position or name. Care should be taken when - referencing this attribute. In particular, elements should not be removed - or replaced. - -.. attribute:: AsyncCursor.description - - This read-only attribute is a sequence of :ref:`FetchInfo` - objects. This attribute will be *None* for operations that do not return - rows or if the cursor has not had an operation invoked via the - :meth:`AsyncCursor.execute()` method yet. - -.. attribute:: AsyncCursor.fetchvars +.. autoproperty:: AsyncCursor.bindvars - This read-only attribute specifies the list of variables created for the - last query that was executed on the cursor. Care should be taken when - referencing this attribute. In particular, elements should not be removed - or replaced. +.. autoproperty:: AsyncCursor.description -.. attribute:: AsyncCursor.inputtypehandler +.. autoproperty:: AsyncCursor.fetchvars - This read-write attribute specifies a method called for each value that is - bound to a statement executed on the cursor and overrides the attribute - with the same name on the connection if specified. The method signature is - handler(cursor, value, arraysize) and the return value is expected to be a - variable object or *None* in which case a default variable object will be - created. If this attribute is *None*, the default behavior will take place - for all values bound to the statements. +.. autoproperty:: AsyncCursor.inputtypehandler -.. attribute:: AsyncCursor.lastrowid +.. autoproperty:: AsyncCursor.lastrowid - This read-only attribute returns the rowid of the last row modified by the - cursor. If no row was modified by the last operation performed on the - cursor, the value *None* is returned. - -.. attribute:: AsyncCursor.outputtypehandler - - This read-write attribute specifies a method called for each column that is - to be fetched from this cursor. The method signature is - handler(cursor, metadata) and the return value is expected to be a - :ref:`variable object` or *None* in which case a default variable - object will be created. If this attribute is *None*, then the default - behavior will take place for all columns fetched from this cursor. +.. autoproperty:: AsyncCursor.outputtypehandler See :ref:`outputtypehandlers`. -.. attribute:: AsyncCursor.prefetchrows - - This read-write attribute can be used to tune the number of rows that the - python-oracledb fetches when a SELECT statement is executed. This value can - reduce the number of round-trips to the database that are required to fetch - rows but at the cost of additional memory. Setting this value to *0* can be - useful when the timing of fetches must be explicitly controlled. - - The attribute is only used for tuning row fetches from the database. It - does not affect data inserts. +.. autoproperty:: AsyncCursor.prefetchrows See :ref:`Tuning Fetch Performance ` for more information. -.. attribute:: AsyncCursor.rowcount +.. autoproperty:: AsyncCursor.rowcount - This read-only attribute specifies the number of rows that have currently - been fetched from the cursor (for select statements) or that have been - affected by the operation (for insert, update, delete and merge - statements). For all other statements the value is always *0*. If the - cursor or connection is closed, the value returned is *-1*. - -.. attribute:: AsyncCursor.rowfactory - - This read-write attribute specifies a method to call for each row that is - retrieved from the database. Ordinarily, a tuple is returned for each row - but if this attribute is set, the method is called with the tuple that - would normally be returned, and the result of the method is returned - instead. +.. autoproperty:: AsyncCursor.rowfactory See :ref:`rowfactories`. -.. attribute:: AsyncCursor.scrollable - - This read-write boolean attribute specifies whether the cursor can be - scrolled or not. By default, cursors are not scrollable, as the server - resources and response times are greater than nonscrollable cursors. This - attribute is checked and the corresponding mode set in Oracle when calling - the method :meth:`AsyncCursor.execute()`. +.. autoproperty:: AsyncCursor.scrollable diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 2da9ee68..7439540a 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -4,124 +4,60 @@ API: Connection Objects *********************** -A Connection object can be created with :meth:`oracledb.connect()` or with -:meth:`ConnectionPool.acquire()`. +.. currentmodule:: oracledb -.. note:: +Connection Class +================ - Any outstanding database transaction will be rolled back when the - connection object is destroyed or closed. You must perform a commit first - if you want data to persist in the database, see :ref:`txnmgmnt`. +.. autoclass:: Connection -Connection Methods -================== - -.. method:: Connection.__enter__() + A connection object should be created with :meth:`oracledb.connect()` or + with :meth:`ConnectionPool.acquire()`. - The entry point for the connection as a context manager. It returns itself. + .. note:: - .. dbapimethodextension:: + Any outstanding database transaction will be rolled back when the + connection object is destroyed or closed. You must perform a commit + first if you want data to persist in the database, see :ref:`txnmgmnt`. -.. method:: Connection.__exit__() +Connection Methods +================== - The exit point for the connection as a context manager. This will close - the connection and roll back any uncommitted transaction. +.. automethod:: Connection.__enter__ .. dbapimethodextension:: -.. method:: Connection.begin([formatId, transactionId, branchId]) +.. automethod:: Connection.__exit__ - Explicitly begins a new transaction. Without parameters, this explicitly - begins a local transaction; otherwise, this explicitly begins a distributed - (global) transaction with the given parameters. See the Oracle - documentation for more details. + .. dbapimethodextension:: - Note that in order to make use of global (distributed) transactions, the - :attr:`~Connection.internal_name` and :attr:`~Connection.external_name` - attributes must be set. +.. automethod:: Connection.begin .. deprecated:: 1.0 - Use the method :meth:`Connection.tpc_begin()` instead. + Use the method :meth:`tpc_begin()` instead. .. dbapimethodextension:: -.. method:: Connection.begin_sessionless_transaction(transaction_id=None, \ - timeout=60, defer_round_trip=False) - - Begins a new sessionless transaction. This method returns the transaction - identifier specified by the user or generated by python-oracledb. - - The ``transaction_id`` parameter should be of type string or bytes. If - specified, it represents a unique identifier for the transaction. If a - string is passed, then it will be UTF-8 encoded to bytes. If this value is - not specified, then python-oracledb generates a a random - `universally-unique identifier (UUID) `__ value when - ``Connection.begin_sessionless_transaction()`` is called. An example is - "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot exceed - 64 bytes in length. - - The ``timeout`` parameter is the number of seconds that this transaction - can stay suspended when - :meth:`Connection.suspend_sessionless_transaction()` is later called, or if - the transaction is automatically suspended when the ``suspend_on_success`` - parameter is set to to *True* in :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()`. The default value is *60* seconds. If a - transaction is not resumed within this specified duration, the transaction - will be rolled back. - - The ``defer_round_trip`` parameter is a boolean that determines whether - the request to start a transaction is to be sent immediately or with the - next database operation. If set to *False*, the request is sent - immediately. If set to *True*, the request is included with the next - database operation on the connection. The default value is *False*. +.. automethod:: Connection.begin_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: Connection.cancel() - - Breaks a long-running statement. +.. automethod:: Connection.cancel .. dbapimethodextension:: -.. method:: Connection.changepassword(oldpassword, newpassword) - - Changes the password for the user to which the connection is - connected. +.. automethod:: Connection.changepassword .. dbapimethodextension:: -.. method:: Connection.close() - - Closes the connection now and makes it unusable for further operations. - An Error exception will be raised if any operation is attempted with this - connection after this method is completed successfully. - - All open cursors and LOBs created by the connection will be closed and will - also no longer be usable. - - Internally, references to the connection are held by cursor objects, - LOB objects, subscription objects, etc. Once all of these references are - released, the connection itself will be closed automatically. Either - control references to these related objects carefully or explicitly close - connections in order to ensure sufficient resources are available. +.. automethod:: Connection.close -.. method:: Connection.commit() +.. automethod:: Connection.commit - Commits any pending transactions to the database. - -.. method:: Connection.createlob(lob_type, data=None) - - Creates and returns a new temporary :ref:`LOB object ` of the - specified type. The ``lob_type`` parameter should be one of - :data:`oracledb.DB_TYPE_CLOB`, :data:`oracledb.DB_TYPE_BLOB`, or - :data:`oracledb.DB_TYPE_NCLOB`. - - If data is supplied, it will be written to the temporary LOB before it is - returned. +.. automethod:: Connection.createlob .. versionchanged:: 2.0 @@ -129,47 +65,21 @@ Connection Methods .. dbapimethodextension:: -.. method:: Connection.cursor(scrollable=False) - - Returns a new :ref:`cursor object ` using the connection. +.. automethod:: Connection.cursor -.. method:: Connection.decode_oson(data) - - Decodes `OSON-encoded `__ bytes and returns the - object encoded in those bytes. This is useful for fetching columns which - have the check constraint ``IS JSON FORMAT OSON`` enabled. +.. automethod:: Connection.decode_oson .. versionadded:: 2.1.0 -.. method:: Connection.encode_oson(value) + .. dbapimethodextension:: - Encodes a Python value into `OSON-encoded `__ - bytes and returns them. This is useful for inserting into columns which - have the check constraint ``IS JSON FORMAT OSON`` enabled. +.. automethod:: Connection.encode_oson .. versionadded:: 2.1.0 -.. method:: Connection.fetch_df_all(statement, parameters=None, \ - arraysize=None) - - Fetches all rows of the SQL query ``statement``, returning them in a - :ref:`DataFrame ` object. An empty DataFrame is - returned if there are no rows available. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one :ref:`bind variable placeholder ` in ``statement``. It - can also be a list of dictionaries, where the keys match the bind variable - placeholder names in ``statement``. - - The ``arraysize`` parameter can be specified to tune performance of - fetching data across the network. It defaults to - :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s - :attr:`Cursor.prefetchrows` size is always set to the value of the explicit - or default ``arraysize`` parameter value. + .. dbapimethodextension:: - Any LOB fetched must be less than 1 GB. +.. automethod:: Connection.fetch_df_all See :ref:`dataframeformat` for the supported data types and examples. @@ -182,25 +92,7 @@ Connection Methods .. versionadded:: 3.0.0 -.. method:: Connection.fetch_df_batches(statement, parameters=None, \ - size=None) - - This returns an iterator yielding the next ``size`` rows of the SQL query - ``statement`` in each iteration as a :ref:`DataFrame ` - object. An empty DataFrame is returned if there are no rows available. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one :ref:`bind variable placeholder ` in ``statement``. It - can also be a list of dictionaries, where the keys match the bind variable - placeholder names in ``statement``. - - The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and - :attr:`Cursor.prefetchrows` sizes are always set to the value of the - explicit or default ``size`` parameter value. - - Any LOB fetched must be less than 1 GB. +.. automethod:: Connection.fetch_df_batches See :ref:`dataframeformat` for the supported data types and examples. @@ -213,321 +105,77 @@ Connection Methods .. versionadded:: 3.0.0 -.. method:: Connection.getSodaDatabase() - - Returns a :ref:`SodaDatabase ` object for Simple Oracle Document - Access (SODA). All SODA operations are performed either on the returned - SodaDatabase object or from objects created by the returned SodaDatabase - object. See `here `__ for - additional information on SODA. +.. automethod:: Connection.getSodaDatabase .. dbapimethodextension:: -.. method:: Connection.gettype(name) - - Returns a :ref:`type object ` given its name. This can then - be used to create objects which can be bound to cursors created by this - connection. +.. automethod:: Connection.gettype .. dbapimethodextension:: -.. method:: Connection.is_healthy() - - This function returns a boolean indicating the health status of a - connection. - - Connections may become unusable in several cases, such as, if the network - socket is broken, if an Oracle error indicates the connection is unusable, - or, after receiving a planned down notification from the database. - - This function is best used before starting a new database request on an - existing :ref:`standalone connections `. For pooled - connections, the :meth:`ConnectionPool.acquire()` method internally - performs this check before returning a connection to the application, see - :ref:`poolhealth`. - - If this function returns *False*, the connection should be not be used by - the application and a new connection should be established instead. - - This function performs a local check. To fully check a connection's health, - use :meth:`Connection.ping()` which performs a round-trip to the database. +.. automethod:: Connection.is_healthy .. dbapimethodextension:: -.. method:: Connection.msgproperties(payload, correlation, delay, exceptionq, expiration, priority) - - Returns an object specifying the properties of messages used in advanced - queuing. See :ref:`msgproperties` for more information. - - Each of the parameters are optional. If specified, they act as a shortcut - for setting each of the equivalently named properties. +.. automethod:: Connection.msgproperties .. dbapimethodextension:: -.. method:: Connection.ping() - - Pings the database to verify if the connection is valid. An exception is - thrown if it is not, in which case the connection should not be used by the - application and a new connection should be established instead. - - This function performs a :ref:`round-trip ` to the database, so - it should not be used unnecessarily. - - Note connection pools will perform the same health check automatically, - based on configuration settings. See :ref:`poolhealth`. - - Also, see :meth:`Connection.is_healthy()` for a lightweight alternative. +.. automethod:: Connection.ping .. dbapimethodextension:: -.. method:: Connection.prepare() - - Prepares the distributed (global) transaction for commit. Returns a boolean - indicating if a transaction was actually prepared in order to avoid the - error ``ORA-24756 (transaction does not exist)``. +.. automethod:: Connection.prepare - .. deprecated:: python-oracledb 1.0 - - Use the method :meth:`Connection.tpc_prepare()` instead. + .. deprecated:: 1.0. Use the method :meth:`tpc_prepare()` instead. .. dbapimethodextension:: -.. method:: Connection.queue(name, payload_type=None) - - Creates a :ref:`queue ` which is used to enqueue and dequeue - messages in Advanced Queuing. - - The ``name`` parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. - - The ``payload_type`` parameter, if specified, is expected to be an - :ref:`object type ` that identifies the type of payload the - queue expects. If the string "JSON" is specified, JSON data is enqueued and - dequeued. If not specified, RAW data is enqueued and dequeued. - - For consistency and compliance with the PEP 8 naming style, the - parameter ``payloadType`` was renamed to ``payload_type``. The old name - will continue to work as a keyword parameter for a period of time. +.. automethod:: Connection.queue .. dbapimethodextension:: -.. method:: Connection.resume_sessionless_transaction(transaction_id, \ - timeout=60, defer_round_trip=False) - - Resumes an existing sessionless transaction using the specified - transaction identifier. This method returns the transaction identifier - used to resume the sessionless transaction. - - The ``transaction_id`` parameter should be a string or bytes value that - uniquely identifies an existing sessionless transaction that is to be - resumed. - - The ``timeout`` parameter is the number of seconds that the current - connection waits to resume a transaction if another connection is using it. - When ``defer_round_trip`` is set to *False*, the wait happens in the - ``resume_sessionless_transaction()`` call itself, and the function blocks - until the transaction becomes available or the timeout expires. When - ``defer_round_trip`` is set to *True*, the resume is deferred and the wait - occurs at the time of the next database operation instead. At the start of - the wait period, if the transaction is not in use by any other connection, - the resume happens immediately. If the transaction remains in use by the - other connection after the timeout period, the error `ORA-25351 - `__ is raised. If - another connection completes the transaction, the error `ORA-24756 - `__ is raised. These - error messages are only thrown for non-RAC instances. For information on - using Oracle RAC, see :ref:`Sessionless Transactions with Oracle RAC - `. The default value is *60* seconds. - - The ``defer_round_trip`` parameter is a boolean that determines whether - the request to resume a transaction is to be sent immediately or with the - next database operation. If set to *False*, the request is sent - immediately. If set to *True*, the request is included with the next - database operation on the connection. The default value is *False*. +.. automethod:: Connection.resume_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: Connection.rollback() - - Rolls back any pending transactions. +.. automethod:: Connection.rollback -.. method:: Connection.shutdown([mode]) +.. automethod:: Connection.shutdown - Shuts down the database. In order to do this the connection must be - connected as :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER`. Two - calls must be made unless the mode specified is - :data:`~oracledb.DBSHUTDOWN_ABORT`. - An example is shown below: - - :: - - import oracledb - - connection = oracledb.connect(mode = oracledb.SYSDBA) - connection.shutdown(mode = oracledb.DBSHUTDOWN_IMMEDIATE) - cursor = connection.cursor() - cursor.execute("alter database close normal") - cursor.execute("alter database dismount") - connection.shutdown(mode = oracledb.DBSHUTDOWN_FINAL) + See :ref:`startup`. .. dbapimethodextension:: +.. automethod:: Connection.startup -.. method:: Connection.startup(force=False, restrict=False, pfile=None) - - Starts up the database. This is equivalent to the SQL\*Plus command - ``startup nomount``. The connection must be connected as - :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER` with the - :data:`~oracledb.PRELIM_AUTH` option specified for this to work. - - The ``pfile`` parameter, if specified, is expected to be a string - identifying the location of the parameter file (PFILE) which will be used - instead of the stored parameter file (SPFILE). - - An example is shown below: - - :: - - import oracledb - - connection = oracledb.connect( - mode=oracledb.SYSDBA | oracledb.PRELIM_AUTH) - connection.startup() - connection = oracledb.connect(mode=oracledb.SYSDBA) - cursor = connection.cursor() - cursor.execute("alter database mount") - cursor.execute("alter database open") + See :ref:`startup`. .. dbapimethodextension:: -.. method:: Connection.subscribe(namespace=oracledb.SUBSCR_NAMESPACE_DBCHANGE, \ - protocol=oracledb.SUBSCR_PROTO_OCI, callback=None, timeout=0, \ - operations=OPCODE_ALLOPS, port=0, qos=0, ip_address=None, grouping_class=0, \ - grouping_value=0, grouping_type=oracledb.SUBSCR_GROUPING_TYPE_SUMMARY, \ - name=None, client_initiated=False) - - Returns a new :ref:`subscription object ` that receives - notifications for events that take place in the database that match the - given parameters. - - The ``namespace`` parameter specifies the namespace the subscription uses. - It can be one of :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` or - :data:`oracledb.SUBSCR_NAMESPACE_AQ`. - - The ``protocol`` parameter specifies the protocol to use when notifications - are sent. Currently the only valid value is - :data:`oracledb.SUBSCR_PROTO_OCI`. - - The ``callback`` is expected to be a callable that accepts a single - parameter. A :ref:`message object ` is passed to this callback - whenever a notification is received. - - The ``timeout`` value specifies that the subscription expires after the - given time in seconds. The default value of *0* indicates that the - subscription never expires. - - The ``operations`` parameter enables filtering of the messages that are - sent (insert, update, delete). The default value will send notifications - for all operations. This parameter is only used when the namespace is set - to :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE`. - - The ``port`` parameter specifies the listening port for callback - notifications from the database server. If not specified, an unused port - will be selected by the Oracle Client libraries. - - The ``qos`` parameter specifies quality of service options. It should be - one or more of the following flags, OR'ed together: - :data:`oracledb.SUBSCR_QOS_RELIABLE`, - :data:`oracledb.SUBSCR_QOS_DEREG_NFY`, - :data:`oracledb.SUBSCR_QOS_ROWIDS`, - :data:`oracledb.SUBSCR_QOS_QUERY`, - :data:`oracledb.SUBSCR_QOS_BEST_EFFORT`. - - The ``ip_address`` parameter specifies the IP address (*IPv4* or *IPv6*) in - standard string notation to bind for callback notifications from the - database server. If not specified, the client IP address will be determined - by the Oracle Client libraries. - - The ``grouping_class`` parameter specifies what type of grouping of - notifications should take place. Currently, if set, this value can only be - set to the value :data:`oracledb.SUBSCR_GROUPING_CLASS_TIME`, which - will group notifications by the number of seconds specified in the - ``grouping_value`` parameter. The ``grouping_type`` parameter should be one - of the values :data:`oracledb.SUBSCR_GROUPING_TYPE_SUMMARY` (the default) - or :data:`oracledb.SUBSCR_GROUPING_TYPE_LAST`. - - The ``name`` parameter is used to identify the subscription and is - specific to the selected namespace. If the namespace parameter is - :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` then the name is optional and - can be any value. If the namespace parameter is - :data:`oracledb.SUBSCR_NAMESPACE_AQ`, however, the name must be in the - format '' for single consumer queues and - ':' for multiple consumer queues, and identifies - the queue that will be monitored for messages. The queue name may include - the schema, if needed. - - The ``client_initiated`` parameter is used to determine if client initiated - connections or server initiated connections (the default) will be - established. Client initiated connections are only available in Oracle - Client 19.4 and Oracle Database 19.4 and higher. - - For consistency and compliance with the PEP 8 naming style, the - parameter ``ipAddress`` was renamed to ``ip_address``, the parameter - ``groupingClass`` was renamed to ``grouping_class``, the parameter - ``groupingValue`` was renamed to ``grouping_value``, the parameter - ``groupingType`` was renamed to ``grouping_type`` and the parameter - ``clientInitiated`` was renamed to ``client_initiated``. The old names will - continue to work as keyword parameters for a period of time. +.. automethod:: Connection.subscribe .. dbapimethodextension:: .. note:: The subscription can be deregistered in the database by calling the - function :meth:`~Connection.unsubscribe()`. If this method is not - called and the connection that was used to create the subscription is - explicitly closed using the function :meth:`~Connection.close()`, the - subscription will not be deregistered in the database. - -.. method:: Connection.suspend_sessionless_transaction() + function :meth:`unsubscribe()`. If this method is not called and the + connection that was used to create the subscription is explicitly + closed using the function :meth:`close()`, the subscription will not be + deregistered in the database. - Suspends the currently active sessionless transaction immediately. - - This detaches the transaction from the connection, allowing it to be - resumed later with the transaction identifier that was specified during - creation of the sessionless transaction. The ``timeout`` previously passed - to :meth:`Connection.begin_sessionless_transaction()` determines how long - the transaction can stay suspended before it is automatically rolled back. +.. automethod:: Connection.suspend_sessionless_transaction See :ref:`sessionlesstxns`. .. versionadded:: 3.3.0 -.. method:: Connection.tpc_begin(xid, flags, timeout) - - Begins a Two-Phase Commit (TPC) on a global transaction using the specified - transaction identifier (xid). - - The ``xid`` parameter should be an object returned by the - :meth:`~Connection.xid()` method. - - The ``flags`` parameter is one of the constants - :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, - :data:`oracledb.TPC_BEGIN_PROMOTE`, or :data:`oracledb.TPC_BEGIN_RESUME`. - The default is :data:`oracledb.TPC_BEGIN_NEW`. + .. dbapimethodextension:: - The ``timeout`` parameter is the number of seconds to wait for a - transaction to become available for resumption when - :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` parameter. - When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the ``flags`` - parameter, the ``timeout`` parameter indicates the number of seconds the - transaction can be inactive before it is automatically terminated by the - system. A transaction is inactive between the time it is detached with - :meth:`Connection.tpc_end()` and the time it is resumed with - :meth:`Connection.tpc_begin()`.The default is *0* seconds. +.. automethod:: Connection.tpc_begin The following code sample demonstrates the ``tpc_begin()`` function:: @@ -536,27 +184,7 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_commit(xid, one_phase) - - Commits a global transaction. When called with no arguments, this method - commits a transaction previously prepared with - :meth:`~Connection.tpc_begin()` and optionally prepared with - :meth:`~Connection.tpc_prepare()`. If :meth:`~Connection.tpc_prepare()` - is not called, a single phase commit is performed. A transaction manager - may choose to do this if only a single resource is participating in the - global transaction. - - If an ``xid`` parameter is passed, then an object should be returned by the - :meth:`~Connection.xid()` function. This form should be called outside of a - transaction and is intended for use in recovery. - - The ``one_phase`` parameter is a boolean identifying whether to perform a - one-phase or two-phase commit. If ``one_phase`` parameter is *True*, a - single-phase commit is performed. The default value is *False*. This - parameter is only examined if a value is provided for the ``xid`` - parameter. Otherwise, the driver already knows whether - :meth:`~Connection.tpc_prepare()` was called for the transaction and - whether a one-phase or two-phase commit is required. +.. automethod:: Connection.tpc_commit The following code sample demonstrates the ``tpc_commit()`` function:: @@ -565,23 +193,7 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_end(xid, flags) - - Ends or suspends work on a global transaction. This function is only - intended for use by transaction managers. - - If an ``xid`` parameter is passed, then an object should be returned by the - :meth:`~Connection.xid()` function. If no xid parameter is passed, then the - transaction identifier used by the previous :meth:`~Connection.tpc_begin()` - is used. - - The ``flags`` parameter is one of the constants - :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. The - default is :data:`oracledb.TPC_END_NORMAL`. - - If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction may be - resumed later by calling :meth:`Connection.tpc_begin()` with the flag - :data:`oracledb.TPC_BEGIN_RESUME`. +.. automethod:: Connection.tpc_end The following code sample demonstrates the ``tpc_end()`` function:: @@ -590,13 +202,9 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_forget(xid) - - Causes the database to forget a heuristically completed TPC transaction. - This function is only intended to be called by transaction managers. + .. dbapimethodextension:: - The ``xid`` parameter is mandatory and should be an object should be - returned by the :meth:`~Connection.xid()` function. +.. automethod:: Connection.tpc_forget The following code sample demonstrates the ``tpc_forget()`` function:: @@ -605,21 +213,9 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_prepare(xid) - - Prepares a two-phase transaction for commit. After this function is called, - no further activity should take place on this connection until either - :meth:`~Connection.tpc_commit()` or :meth:`~Connection.tpc_rollback()` have - been called. - - Returns a boolean indicating whether a commit is needed or not. If you - attempt to commit when not needed, then it results in the error - ``ORA-24756: transaction does not exist``. + .. dbapimethodextension:: - If an ``xid`` parameter is passed, then an object should be returned by - the :meth:`~Connection.xid()` function. If an ``xid`` parameter is not - passed, then the transaction identifier used by the previous - :meth:`~Connection.tpc_begin()` is used. +.. automethod:: Connection.tpc_prepare The following code sample demonstrates the ``tpc_prepare()`` function:: @@ -628,35 +224,15 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_recover() - - Returns a list of pending transaction identifiers that require recovery. - Objects of type ``Xid`` (as returned by the :meth:`~Connection.xid()` - function) are returned and these can be passed to - :meth:`~Connection.tpc_commit()` or :meth:`~Connection.tpc_rollback()` as - needed. - - This function queries the DBA_PENDING_TRANSACTIONS view and requires - "SELECT" privilege on that view. +.. automethod:: Connection.tpc_recover The following code sample demonstrates the ``tpc_recover()`` function:: connection.tpc_recover() - See :ref:`tpc` for information on TPC. -.. method:: Connection.tpc_rollback(xid) - - Rolls back a global transaction. - - If an ``xid`` parameter is not passed, then it rolls back the transaction - that was previously started with :meth:`~Connection.tpc_begin()`. - - If an ``xid`` parameter is passed, then an object should be returned by - :meth:`~Connection.xid()` and the specified transaction is rolled back. - This form should be called outside of a transaction and is intended for - use in recovery. +.. automethod:: Connection.tpc_rollback The following code sample demonstrates the ``tpc_rollback()`` function:: @@ -665,32 +241,11 @@ Connection Methods See :ref:`tpc` for information on TPC. -.. method:: Connection.unsubscribe(subscr) - - Unsubscribe from events in the database that were originally subscribed to - using :meth:`~Connection.subscribe()`. The connection used to unsubscribe - should be the same one used to create the subscription, or should access - the same database and be connected as the same user name. - -.. method:: Connection.xid (format_id, global_transaction_id, branch_qualifier) - - Returns a global transaction identifier (xid) that can be used with the - Two-Phase Commit (TPC) functions. - - The ``xid`` contains a format identifier, a global transaction identifier, and - a branch identifier. There are no checks performed at the Python level. The - values are checked by ODPI-C when they are passed to the relevant functions. - .. When this functionality is also supported in the thin driver the checks will be performed at the Python level as well. +.. automethod:: Connection.unsubscribe - The ``format_id`` parameter should be a positive 32-bit integer. This - value identifies the format of the ``global_transaction_id`` and - ``branch_qualifier`` parameters and the value is determined by the - Transaction Manager (TM), if one is in use. + .. dbapimethodextension:: - The ``global_transaction_id`` and ``branch_qualifier`` parameters should - be of type bytes or string. If a value of type string is passed, then - this value will be UTF-8 encoded to bytes. The values cannot exceed 64 - bytes in length. +.. automethod:: Connection.xid The following code sample demonstrates the ``xid()`` function:: @@ -698,177 +253,90 @@ Connection Methods See :ref:`tpc` for information on TPC. + .. dbapimethodextension:: + .. _connattrs: Connection Attributes ===================== -.. attribute:: Connection.action - - This write-only attribute sets the ACTION column in the V$SESSION view. It - is a string attribute but the value *None* is accepted and treated as an - empty string. +.. autoproperty:: Connection.action .. dbapiattributeextension:: -.. attribute:: Connection.autocommit - - This read-write attribute determines whether autocommit mode is on or off. - When autocommit mode is on, all statements are committed as soon as they - have completed executing. +.. autoproperty:: Connection.autocommit .. dbapiattributeextension:: -.. attribute:: Connection.call_timeout - - This read-write attribute specifies the amount of time (in milliseconds) - that a single round-trip to the database may take before a timeout will - occur. A value of *0* means that no timeout will take place. - - In python-oracledb Thick mode, this attribute is only available in Oracle - Client 18c or later. - - If a timeout occurs, the error ``DPI-1067`` will be returned if the - connection is still usable. Alternatively the error ``DPI-1080`` will be - returned if the connection has become invalid and can no longer be used. - - For consistency and compliance with the PEP 8 naming style, the - attribute ``callTimeout`` was renamed to ``call_timeout``. The old name - will continue to work for a period of time. The error ``DPI-1080`` was - also introduced in this release. +.. autoproperty:: Connection.call_timeout .. dbapiattributeextension:: -.. attribute:: Connection.client_identifier - - This write-only attribute sets the CLIENT_IDENTIFIER column in the - V$SESSION view. +.. autoproperty:: Connection.client_identifier .. dbapiattributeextension:: -.. attribute:: Connection.clientinfo - - This write-only attribute sets the CLIENT_INFO column in the V$SESSION - view. +.. autoproperty:: Connection.clientinfo .. dbapiattributeextension:: -.. attribute:: Connection.current_schema - - This read-write attribute sets the current schema attribute for the - session. Setting this value is the same as executing the SQL statement - ``ALTER SESSION SET CURRENT_SCHEMA``. The attribute is set (and verified) on - the next call that does a round trip to the server. The value is placed - before unqualified database objects in SQL statements you then execute. +.. autoproperty:: Connection.current_schema .. dbapiattributeextension:: -.. attribute:: Connection.db_domain - - This read-only attribute specifies the Oracle Database domain name - associated with the connection. It is the same value returned by the SQL - ``SELECT value FROM V$PARAMETER WHERE NAME = 'db_domain'``. +.. autoproperty:: Connection.db_domain .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.db_name - - This read-only attribute specifies the Oracle Database name associated with - the connection. It is the same value returned by the SQL - ``SELECT NAME FROM V$DATABASE``. +.. autoproperty:: Connection.db_name .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.dbop - - This write-only attribute sets the database operation that is to be - monitored. This can be viewed in the DBOP_NAME column of the - V$SQL_MONITOR view. +.. autoproperty:: Connection.dbop .. dbapiattributeextension:: -.. attribute:: Connection.dsn - - This read-only attribute returns the TNS entry of the database to which a - connection has been established. +.. autoproperty:: Connection.dsn .. dbapiattributeextension:: -.. attribute:: Connection.econtext_id - - This write-only attribute specifies the execution context id. This value - can be found as the ECID column in the V$SESSION view and ECONTEXT_ID in - the auditing tables. The maximum length is 64 bytes. - -.. attribute:: Connection.edition - - This read-only attribute gets the session edition and is only available - with Oracle Database 11.2, or later. +.. autoproperty:: Connection.econtext_id .. dbapiattributeextension:: -.. attribute:: Connection.external_name - - This read-write attribute specifies the external name that is used by the - connection when logging distributed transactions. +.. autoproperty:: Connection.edition .. dbapiattributeextension:: -.. attribute:: Connection.handle +.. autoproperty:: Connection.external_name - This read-only attribute returns the Oracle Call Interface (OCI) service - context handle for the connection. It is primarily provided to facilitate - testing the creation of a connection using the OCI service context handle. + .. dbapiattributeextension:: - This property is only relevant in the python-oracledb Thick mode. +.. autoproperty:: Connection.handle .. dbapiattributeextension:: -.. attribute:: Connection.inputtypehandler - - This read-write attribute specifies a method called for each value that is - bound to a statement executed on any cursor associated with this - connection. The method signature is handler(cursor, value, arraysize) and - the return value is expected to be a variable object or *None* in which - case a default variable object will be created. If this attribute is - *None*, the default behavior will take place for all values bound to - statements. +.. autoproperty:: Connection.inputtypehandler See :ref:`inputtypehandlers`. .. dbapiattributeextension:: -.. attribute:: Connection.instance_name - - This read-only attribute specifies the Oracle Database instance name - associated with the connection. It is the same value as the SQL expression - ``sys_context('userenv', 'instance_name')``. +.. autoproperty:: Connection.instance_name .. dbapiattributeextension:: .. versionadded:: 1.4.0 -.. attribute:: Connection.internal_name - - This read-write attribute specifies the internal name that is used by the - connection when logging distributed transactions. +.. autoproperty:: Connection.internal_name .. dbapiattributeextension:: -.. attribute:: Connection.ltxid - - This read-only attribute returns the logical transaction id for the - connection. It is used within Oracle Transaction Guard as a means of - ensuring that transactions are not duplicated. See :ref:`tg` for more - information. - - This is only available with Oracle Database 12.1 or later. In - python-oracledb Thick mode, it also requires Oracle Client libraries 12.1 - or later. +.. autoproperty:: Connection.ltxid .. dbapiattributeextension:: @@ -876,48 +344,23 @@ Connection Attributes This attribute was added to python-oracledb Thin mode. -.. attribute:: Connection.max_identifier_length +.. autoproperty:: Connection.max_identifier_length - This read-only attribute specifies the maximum database identifier length - in bytes supported by the database to which the connection has been - established. See `Database Object Naming Rules - `__. The value may be - *None*, *30*, or *128*. The value *None* indicates the size cannot be - reliably determined by python-oracledb, which occurs when using Thick mode - with Oracle Client libraries 12.1 (or older) to connect to Oracle Database - 12.2, or later. + .. dbapiattributeextension:: .. versionadded:: 2.5.0 -.. attribute:: Connection.max_open_cursors - - This read-only attribute specifies the maximum number of cursors that the - database can have open concurrently. It is the same value returned by the - SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = 'open_cursors'``. When - using python-oracledb Thick mode, Oracle Client libraries 12.1 (or later) - are required. +.. autoproperty:: Connection.max_open_cursors .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.module - - This write-only attribute sets the MODULE column in the V$SESSION view. - The maximum length for this string is 48 and if you exceed this length you - will get ``ORA-24960``. +.. autoproperty:: Connection.module .. dbapiattributeextension:: -.. attribute:: Connection.outputtypehandler - - This read-write attribute specifies a method called for each column that is - going to be fetched from any cursor associated with this connection. The - method signature is ``handler(cursor, metadata)`` and the return value is - expected to be a :ref:`variable object` or *None* in which case a - default variable object will be created. If this attribute is *None*, the - default behavior will take place for all columns fetched from cursors. +.. autoproperty:: Connection.outputtypehandler See :ref:`outputtypehandlers`. @@ -929,122 +372,64 @@ Connection Attributes .. dbapiattributeextension:: -.. attribute:: Connection.proxy_user - - This read-only attribute returns the name of the user which was used as a - proxy when creating the connection to the database. +.. autoproperty:: Connection.proxy_user .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.sdu - - This read-only attribute specifies the size of the Session Data Unit (SDU) - that is being used by the connection. The value will be the lesser of the - requested python-oracledb size and the maximum size allowed by the database - network configuration. It is available only in the python-oracledb Thin - mode. +.. autoproperty:: Connection.sdu .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.serial_num - - This read-only attribute specifies the session serial number associated - with the connection. It is the same value returned by the SQL - ``SELECT SERIAL# FROM V$SESSION WHERE SID=SYS_CONTEXT('USERENV', 'SID')``. - It is available only in python-oracledb Thin mode. - - - For applications using :ref:`drcp`, the ``serial_num`` attribute may not - contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a round-trip. +.. autoproperty:: Connection.serial_num .. dbapiattributeextension:: .. versionadded:: 2.5.0 -.. attribute:: Connection.service_name - - This read-only attribute specifies the Oracle Database service name - associated with the connection. This is the same value returned by the SQL - ``SELECT SYS_CONTEXT('USERENV', 'SERVICE_NAME') FROM DUAL``. +.. autoproperty:: Connection.service_name .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.session_id - - This read-only attribute specifies the session identifier associated with - the connection. It is the same value returned by the SQL - ``SELECT SYS_CONTEXT('USERENV', 'SID') FROM DUAL``. It is available - only in python-oracledb Thin mode. - - For applications using :ref:`drcp`, the ``session_id`` attribute may not - contain the current session state until a round-trip is made to the - database after acquiring a session. It is recommended to not use this - attribute if your application uses DRCP but may not perform a round-trip. +.. autoproperty:: Connection.session_id .. dbapiattributeextension:: .. versionadded:: 2.5.0 -.. attribute:: Connection.stmtcachesize - - This read-write attribute specifies the size of the statement cache. This - value can make a significant difference in performance if you have a small - number of statements that you execute repeatedly. - - The default value is *20*. +.. autoproperty:: Connection.stmtcachesize See :ref:`Statement Caching ` for more information. .. dbapiattributeextension:: -.. attribute:: Connection.tag - - This read-write attribute initially contains the actual tag of the session - that was acquired from a pool by :meth:`ConnectionPool.acquire()`. If the - connection was not acquired from a pool or no tagging parameters were - specified (``tag`` and ``matchanytag``) when the connection was acquired - from the pool, this value will be None. If the value is changed, it must - be a string containing name=value pairs like "k1=v1;k2=v2". - - If this value is not *None* when the connection is released back to the - pool it will be used to retag the session. This value can be overridden in - the call to :meth:`ConnectionPool.release()`. +.. autoproperty:: Connection.tag .. dbapiattributeextension:: -.. attribute:: Connection.thin - - This read-only attribute returns a boolean indicating if the connection was - established with the python-oracledb Thin mode (*True*) or python-oracledb - Thick mode (*False*). +.. autoproperty:: Connection.thin .. dbapiattributeextension:: -.. attribute:: Connection.transaction_in_progress - - This read-only attribute specifies whether a transaction is currently in - progress on the database associated with the connection. +.. autoproperty:: Connection.transaction_in_progress .. dbapiattributeextension:: .. versionadded:: 2.0.0 -.. attribute:: Connection.username +.. autoproperty:: Connection.username This read-only attribute returns the name of the user which established the connection to the database. .. dbapiattributeextension:: -.. attribute:: Connection.version +.. autoproperty:: Connection.version This read-only attribute returns the version of the database to which a connection has been established. @@ -1058,30 +443,7 @@ Connection Attributes receive the base version (such as 18.0.0.0.0) instead of the full version (such as 18.3.0.0.0). -.. attribute:: Connection.warning - - This read-only attribute provides an :ref:`oracledb._Error` - object giving information about any database warnings (such as the password - being in the grace period, or the pool being created with a smaller than - requested size due to database resource restrictions) that were generated - during connection establishment or by :meth:`oracledb.create_pool()`. The - attribute will be present if there was a warning, but creation otherwise - completed successfully. The connection will be usable despite the warning. - - For :ref:`standalone connections `, - ``Connection.warning`` will be present for the lifetime of the connection. - - For :ref:`pooled connections `, ``Connection.warning`` will be - cleared when a connection is released to the pool such as with - :meth:`ConnectionPool.release()`. - - In python-oracledb Thick mode, warnings may be generated during pool - creation itself. These warnings will be placed on new connections created - by the pool, provided no warnings were generated by the individual - connection creations, in which case those connection warnings will be - returned. - - If no warning was generated the value *None* is returned. +.. autoproperty:: Connection.warning .. dbapiattributeextension:: diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index edd15dec..8f0fe0ba 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -4,40 +4,32 @@ API: Cursor Objects ******************* -A cursor object can be created with :meth:`Connection.cursor()`. +.. currentmodule:: oracledb -Cursor Methods -============== +Cursor Class +============ -.. method:: Cursor.__enter__() +.. autoclass:: Cursor - The entry point for the cursor as a context manager. It returns itself. - - .. dbapimethodextension:: + A cursor object should be created with :meth:`Connection.cursor()`. -.. method:: Cursor.__exit__() +Cursor Methods +============== - The exit point for the cursor as a context manager. It closes the cursor. +.. automethod:: Cursor.__enter__ .. dbapimethodextension:: -.. method:: Cursor.__iter__() +.. automethod:: Cursor.__exit__ - Returns the cursor itself to be used as an iterator. + .. dbapimethodextension:: - .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. +.. automethod:: Cursor.__iter__ -.. method:: Cursor.arrayvar(typ, value, [size]) + .. dbapimethodextension:: + It is mentioned in PEP 249 as an optional extension. - Creates an array variable associated with the cursor of the given type and - size and returns a :ref:`variable object `. The value is either an - integer specifying the number of elements to allocate or it is a list and - the number of elements allocated is drawn from the size of the list. If the - value is a list, the variable is also set with the contents of the list. If - the size is not specified and the type is a string or binary, 4000 bytes - is allocated. This is needed for passing arrays to PL/SQL (in cases where - the list might be empty and the type cannot be determined automatically) or - returning arrays from PL/SQL. +.. automethod:: Cursor.arrayvar Array variables can only be used for PL/SQL associative arrays with contiguous keys. For PL/SQL associative arrays with sparsely populated keys @@ -47,120 +39,35 @@ Cursor Methods .. dbapimethodextension:: -.. method:: Cursor.bindnames() - - Returns the list of bind variable names bound to the statement. Note that a - statement must have been prepared first. +.. automethod:: Cursor.bindnames .. dbapimethodextension:: -.. method:: Cursor.callfunc(name, return_type, parameters=[], \ - keyword_parameters={}) - - Calls a PL/SQL function with the given name and returns its value. - - The ``return_type`` parameter for :meth:`~Cursor.callfunc()` is expected to - be a Python type, one of the :ref:`oracledb types ` or an - :ref:`Object Type `. - - The sequence of parameters must contain one entry for each parameter that - the PL/SQL function expects. Any keyword parameters will be included after - the positional parameters. - - Use :meth:`Cursor.var()` to define any OUT or IN OUT parameters, if - necessary. +.. automethod:: Cursor.callfunc See :ref:`plsqlfunc` for examples. - For consistency and compliance with the PEP 8 naming style, the parameter - ``keywordParameters`` was renamed to ``keyword_parameters``. The old name - will continue to work for a period of time. - .. dbapimethodextension:: .. note:: In line with the Python DB API, it is not recommended to call - :meth:`Cursor.setinputsizes()` prior to calling - :meth:`~Cursor.callfunc()`. Use :meth:`Cursor.var()` instead. In - existing code that calls :meth:`~Cursor.setinputsizes()`, the first - item in the :meth:`~Cursor.setinputsizes()` parameter list refers to - the return value of the PL/SQL function. - -.. method:: Cursor.callproc(name, parameters=[], keyword_parameters={}) - - Calls a PL/SQL procedure with the given name. + :meth:`setinputsizes()` prior to calling this function. Use + :meth:`var()` instead. In existing code that calls + :meth:`setinputsizes()`, the first item in the :meth:`setinputsizes()` + parameter list refers to the return value of the PL/SQL function. - The sequence of parameters must contain one entry for each parameter that - the procedure expects. The result of the call is a modified copy of the - input sequence. Input parameters are left untouched; output and - input/output parameters are replaced with possibly new values. Keyword - parameters will be included after the positional parameters and are not - returned as part of the output sequence. - - Use :meth:`Cursor.var()` to define any OUT or IN OUT parameters if - necessary. - - No query result set is returned by :meth:`~Cursor.callproc()`. Instead, use - :ref:`REF CURSOR ` parameters or :ref:`Implicit Results - `. +.. automethod:: Cursor.callproc See :ref:`plsqlproc` for an example. - For consistency and compliance with the PEP 8 naming style, the parameter - ``keywordParameters`` was renamed to ``keyword_parameters``. The old name - will continue to work for a period of time. - .. note:: The DB API definition does not allow for keyword parameters. -.. method:: Cursor.close() - - Closes the cursor now, rather than whenever __del__ is called. The cursor - will be unusable from this point forward; an Error exception will be raised - if any operation is attempted with the cursor. - -.. method:: Cursor.execute(statement, parameters=[], suspend_on_success=False, \ - ** keyword_parameters) - - Executes a statement against the database. See :ref:`sqlexecution`. - - Parameters may be passed as a dictionary or sequence or as keyword - parameters. If the parameters are a dictionary, the values will be bound by - name and if the parameters are a sequence the values will be bound by - position. Note that if the values are bound by position, the order of the - variables is from left to right as they are encountered in the statement - and SQL statements are processed differently than PL/SQL statements. For - this reason, it is generally recommended to bind parameters by name instead - of by position. - - Parameters passed as a dictionary are name and value pairs. The name maps - to the bind variable name used by the statement and the value maps to the - Python value you wish bound to that bind variable. - - A reference to the statement will be retained by the cursor. If *None* or - the same string object is passed in again, the cursor will execute that - statement again without performing a prepare or rebinding and redefining. - This is most effective for algorithms where the same statement is used, but - different parameters are bound to it (many times). Note that parameters - that are not passed in during subsequent executions will retain the value - passed in during the last execution that contained them. - - The ``suspend_on_success`` parameter is specific to :ref:`sessionless - transactions `. When set to *True*, the active sessionless - transaction will be suspended when ``execute()`` completes successfully. - See :ref:`suspendtxns`. - - For maximum efficiency when reusing a statement, it is best to use the - :meth:`Cursor.setinputsizes()` method to specify the parameter types and - sizes ahead of time; in particular, *None* is assumed to be a string of - length 1 so any values that are later bound as numbers or dates will raise - a TypeError exception. - - If the statement is a query, the cursor is returned as a convenience to the - caller (so it can be used directly as an iterator over the rows in the - cursor); otherwise, *None* is returned. +.. automethod:: Cursor.close + +.. automethod:: Cursor.execute .. versionchanged:: 3.3.0 @@ -170,135 +77,34 @@ Cursor Methods The DB API definition does not define the return value of this method. -.. method:: Cursor.executemany(statement, parameters, batcherrors=False, \ - arraydmlrowcounts=False, suspend_on_success=False) - - Executes a SQL statement once using all bind value mappings or sequences - found in the sequence parameters. This can be used to insert, update, or - delete multiple rows in a table with a single python-oracledb call. It can - also invoke a PL/SQL procedure multiple times. See :ref:`batchstmnt`. - - The ``statement`` parameter is managed in the same way as the - :meth:`Cursor.execute()` method manages it. - - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one bind variable placeholder in ``statement``. It can also be a - list of dictionaries, where the keys match the bind variable placeholder - names in ``statement``. If there are no bind values, or values have - previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. The ``parameters`` parameter can also - be a :ref:`DataFrame `, or a third-party data frame - that supports the `Apache Arrow PyCapsule `__ Interface. - - In python-oracledb Thick mode, if the size of the buffers allocated for any - of the parameters exceeds 2 GB, you will receive the error ``DPI-1015: - array size of is too large``. If you receive this error, decrease the - number of rows being inserted. - - When *True*, the ``batcherrors`` parameter enables batch error support - within Oracle Database and ensures that the call succeeds even if an - exception takes place in one or more of the sequence of bind values. The - errors can then be retrieved using :meth:`Cursor.getbatcherrors()`. - - When *True*, the ``arraydmlrowcounts`` parameter enables DML row counts to - be retrieved from Oracle after the method has completed. The row counts can - then be retrieved using :meth:`Cursor.getarraydmlrowcounts()`. - - Both the ``batcherrors`` parameter and the ``arraydmlrowcounts`` parameter - can only be *True* when executing an insert, update, delete, or merge - statement; in all other cases an error will be raised. - - The ``suspend_on_success`` parameter is specific to :ref:`sessionless - transactions `. When set to *True*, the active sessionless - transaction will be suspended when ``executemany()`` completes - successfully. See :ref:`suspendtxns`. - - For maximum efficiency, it is best to use the - :meth:`Cursor.setinputsizes()` method to specify the bind value types and - sizes. In particular, if the type is not explicitly specified, the value - *None* is assumed to be a string of length 1 so any values that are later - bound as numbers or dates will raise a TypeError exception. +.. automethod:: Cursor.executemany .. versionchanged:: 3.3.0 Added support for passing data frames in the ``parameters`` parameter. The ``suspend_on_success`` parameter was added. -.. method:: Cursor.fetchall() - - Fetches all (remaining) rows of a query result, returning them as a list of - tuples. An empty list is returned if no more rows are available. An - exception is raised if the previous call to :meth:`Cursor.execute()` did - not produce any result set or no call was issued yet. - - Note that the cursor's :attr:`~Cursor.arraysize` attribute can affect the - performance of this operation, as internally data is fetched in batches of - that size from the database. See :ref:`Tuning Fetch Performance - `. +.. automethod:: Cursor.fetchall See :ref:`fetching` for an example. -.. method:: Cursor.fetchmany(size=cursor.arraysize) - - Fetches the next set of rows of a query result, returning a list of tuples. - An empty list is returned if no more rows are available. Note that the - cursor's :attr:`~Cursor.arraysize` attribute can affect the performance of - this operation. - - The number of rows to fetch is specified by the parameter. If it is not - given, the cursor's :attr:`~Cursor.arraysize` attribute determines the - number of rows to be fetched. If the number of rows available to be fetched - is fewer than the amount requested, fewer rows will be returned. - - An exception is raised if the previous call to :meth:`Cursor.execute()` - did not produce any result set or no call was issued yet. +.. automethod:: Cursor.fetchmany See :ref:`fetching` for an example. -.. method:: Cursor.fetchone() - - Fetches the next row of a query result set, returning a single tuple or - *None* when no more data is available. - - An exception is raised if the previous call to :meth:`Cursor.execute()` - did not produce any result set or no call was issued yet. +.. automethod:: Cursor.fetchone See :ref:`fetching` for an example. -.. method:: Cursor.getarraydmlrowcounts() - - Retrieves the DML row counts after a call to :meth:`Cursor.executemany()` - with ``arraydmlrowcounts`` enabled. This will return a list of integers - corresponding to the number of rows affected by the DML statement for each - element of the array passed to :meth:`Cursor.executemany()`. - - This method is only available for Oracle Database 12.1 and later. +.. automethod:: Cursor.getarraydmlrowcounts .. dbapimethodextension:: -.. method:: Cursor.getbatcherrors() - - Retrieves the exceptions that took place after a call to - :meth:`Cursor.executemany()` with ``batcherrors`` enabled. This will return a - list of Error objects, one error for each iteration that failed. The offset - can be determined by looking at the offset attribute of the error object. +.. automethod:: Cursor.getbatcherrors .. dbapimethodextension:: -.. method:: Cursor.getimplicitresults() - - Returns a list of cursors which correspond to implicit results made - available from a PL/SQL block or procedure without the use of OUT ref - cursor parameters. The PL/SQL block or procedure opens the cursors and - marks them for return to the client using the procedure - dbms_sql.return_result. In python-oracledb Thick mode, closing the parent - cursor will result in the automatic closure of the implicit result set - cursors. See :ref:`implicitresults`. - - This method is only available for Oracle Database 12.1 (or later). For - python-oracledb :ref:`Thick ` mode, Oracle Client 12.1 (or - later) is additionally required. +.. automethod:: Cursor.getimplicitresults .. dbapimethodextension:: @@ -307,11 +113,7 @@ Cursor Methods set), this method returns cursors which can be fetched independently of each other. -.. method:: Cursor.parse(statement) - - This can be used to parse a statement without actually executing it - (parsing step is done automatically by Oracle when a statement is - :meth:`executed `). +.. automethod:: Cursor.parse .. dbapimethodextension:: @@ -321,166 +123,30 @@ Cursor Methods immediately and an implied commit takes place. You can also parse PL/SQL statements. -.. method:: Cursor.prepare(statement, tag, cache_statement=True) - - This can be used before a call to :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()` to define the statement that will be - executed. When this is done, the prepare phase will not be performed when - the call to :meth:`Cursor.execute()` or :meth:`Cursor.executemany()` is - made with *None* or the same string object as the statement. - - If the ``tag`` parameter is specified and the ``cache_statement`` parameter - is *True*, the statement will be returned to the statement cache with the - given tag. - - If the ``cache_statement`` parameter is *False*, the statement will be - removed from the statement cache (if it was found there) or will simply not - be cached. +.. automethod:: Cursor.prepare See :ref:`Statement Caching ` for more information. .. dbapimethodextension:: -.. method:: Cursor.scroll(value=0, mode="relative") - - Scrolls the cursor in the result set to a new position according to the - mode. - - If mode is *relative* (the default value), the value is taken as an offset - to the current position in the result set. If set to *absolute*, value - states an absolute target position. If set to *first*, the cursor is - positioned at the first row and if set to *last*, the cursor is set to the - last row in the result set. - - An error is raised if the mode is *relative* or *absolute* and the scroll - operation would position the cursor outside of the result set. +.. automethod:: Cursor.scroll - .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. - -.. method:: Cursor.setinputsizes(*args, **keywordArgs) + .. dbapimethodextension:: + It is mentioned in PEP 249 as an optional extension. - This can be used before calls to :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()` to predefine memory areas used for - :ref:`bind variables `. Each parameter should be a type object - corresponding to the data that will be used for a bind variable placeholder - in the SQL or PL/SQL statement. Alternatively, it can be an integer - specifying the maximum length of a string bind variable value. +.. automethod:: Cursor.setinputsizes - Use keyword parameters when :ref:`binding by name `. Use - positional parameters when :ref:`binding by position `. The - parameter value can be *None* to indicate that python-oracledb should - determine the required space from the data value provided. + .. note:: - The parameters or keyword names correspond to the bind variable - placeholders used in the SQL or PL/SQL statement. Note this means that for - use with :meth:`Cursor.executemany()` it does not correspond to the number - of bind value mappings or sequences being passed. + This function should not be used for bind variables passed to + :meth:`callfunc()` or :meth:`callproc()`. Instead, use :meth:`var()`. - When repeated calls to :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()` are made binding different string data - lengths, using :meth:`~Cursor.setinputsizes()` can help reduce the - database's SQL "version count" for the statement. See :ref:`Reducing the - SQL Version Count `. + If this function is used with :meth:`callfunc()`, the first parameter + in the list refers to the return value of the PL/SQL function. - .. note:: +.. automethod:: Cursor.setoutputsize - :meth:`Cursor.setinputsizes()` should not be used for bind variables - passed to :meth:`Cursor.callfunc()` or - :meth:`Cursor.callproc()`. Instead, use :meth:`Cursor.var()`. - - If :meth:`Cursor.setinputsizes()` is used with - :meth:`Cursor.callfunc()`, the first parameter in the list refers to - the return value of the PL/SQL function. - -.. method:: Cursor.setoutputsize(size, [column]) - - This method does nothing and is retained solely for compatibility with the - DB API. Python-oracledb automatically allocates as much space as needed to - fetch LONG and LONG RAW columns, and also to fetch CLOB as string and BLOB - as bytes. - -.. method:: Cursor.var(typ, [size, arraysize, inconverter, outconverter, \ - typename, encoding_errors, bypass_decode, convert_nulls]) - - Creates a :ref:`variable object ` with the specified - characteristics. This method can be used for binding to PL/SQL IN and OUT - parameters where the length or type cannot be determined automatically from - the Python variable being bound. It can also be used in :ref:`input - ` and :ref:`output ` type handlers. - - The ``typ`` parameter specifies the type of data that should be stored in the - variable. This should be one of the :ref:`database type constants - `, :ref:`DB API constants `, an object type returned from - the method :meth:`Connection.gettype()` or one of the following Python - types: - - .. list-table-with-summary:: - :header-rows: 1 - :class: wy-table-responsive - :align: center - :summary: The first column is the Python Type. The second column is the corresponding Database Type. - - * - Python Type - - Database Type - * - bool - - :attr:`oracledb.DB_TYPE_BOOLEAN` - * - bytes - - :attr:`oracledb.DB_TYPE_RAW` - * - datetime.date - - :attr:`oracledb.DB_TYPE_DATE` - * - datetime.datetime - - :attr:`oracledb.DB_TYPE_DATE` - * - datetime.timedelta - - :attr:`oracledb.DB_TYPE_INTERVAL_DS` - * - decimal.Decimal - - :attr:`oracledb.DB_TYPE_NUMBER` - * - float - - :attr:`oracledb.DB_TYPE_NUMBER` - * - int - - :attr:`oracledb.DB_TYPE_NUMBER` - * - str - - :attr:`oracledb.DB_TYPE_VARCHAR` - - The ``size`` parameter specifies the length of string and raw variables and is - ignored in all other cases. If not specified for string and raw variables, - the value *4000* is used. - - The ``arraysize`` parameter specifies the number of elements the variable will - have. If not specified the bind array size (usually *1*) is used. When a - variable is created in an output type handler this parameter should be set - to the cursor's array size. - - The ``inconverter`` and ``outconverter`` parameters specify methods used for - converting values to/from the database. More information can be found in - the section on :ref:`variable objects`. - - The ``typename`` parameter specifies the name of a SQL object type and must be - specified when using type :data:`oracledb.OBJECT` unless the type object - was passed directly as the first parameter. - - The ``encoding_errors`` parameter specifies what should happen when decoding - byte strings fetched from the database into strings. It should be one of - the values noted in the builtin - `decode `__ - function. - - The ``bypass_decode`` parameter, if specified, should be passed as a - boolean value. Passing a *True* value causes values of database types - :data:`~oracledb.DB_TYPE_VARCHAR`, :data:`~oracledb.DB_TYPE_CHAR`, - :data:`~oracledb.DB_TYPE_NVARCHAR`, :data:`~oracledb.DB_TYPE_NCHAR` and - :data:`~oracledb.DB_TYPE_LONG` to be returned as bytes instead of str, - meaning that python-oracledb does not do any decoding. See :ref:`Fetching raw - data ` for more information. - - The ``convert_nulls`` parameter, if specified, should be passed as a boolean - value. Passing the value *True* causes the ``outconverter`` to be called - when a null value is fetched from the database; otherwise, the - ``outconverter`` is only called when non-null values are fetched from the - database. - - For consistency and compliance with the PEP 8 naming style, the parameter - ``encodingErrors`` was renamed to ``encoding_errors``. The old name will - continue to work as a keyword parameter for a period of time. +.. automethod:: Cursor.var .. versionchanged:: 1.4.0 @@ -491,50 +157,20 @@ Cursor Methods Cursor Attributes ================= -.. attribute:: Cursor.arraysize - - This read-write attribute can be used to tune the number of rows internally - fetched and buffered by internal calls to the database when fetching rows - from SELECT statements and REF CURSORS. The value can drastically affect - the performance of a query since it directly affects the number of network - round trips between Python and the database. For methods like - :meth:`Cursor.fetchone()` and :meth:`Cursor.fetchall()` it affects internal - behavior but does not change how many rows are returned to the - application. For :meth:`Cursor.fetchmany()` it is the default number of - rows to fetch. - - The attribute is only used for tuning row and SODA document fetches from - the database. It does not affect data inserts. - - Due to the performance benefits, the default ``arraysize`` is *100* instead - of the *1* that the Python DB API recommends. +.. autoproperty:: Cursor.arraysize See :ref:`Tuning Fetch Performance ` for more information. -.. attribute:: Cursor.bindvars - - This read-only attribute provides the bind variables used for the last - statement that was executed on the cursor. The value will be either a list - or a dictionary, depending on whether binding was done by position or - name. Care should be taken when referencing this attribute. In particular, - elements should not be removed or replaced. +.. autoproperty:: Cursor.bindvars .. dbapiattributeextension:: -.. attribute:: Cursor.connection +.. autoproperty:: Cursor.connection - This read-only attribute returns a reference to the connection object on - which the cursor was created. - - .. dbapimethodextension:: It is mentioned in PEP 249 as an optional extension. - -.. attribute:: Cursor.description + .. dbapimethodextension:: + It is mentioned in PEP 249 as an optional extension. - This read-only attribute contains information about the columns used in a - query. It is a sequence of :ref:`FetchInfo ` objects, one per - column. This attribute will be *None* for statements that are not SELECT or - WITH statements, or if the cursor has not had :meth:`Cursor.execute()` - invoked yet. +.. autoproperty:: Cursor.description .. versionchanged:: 1.4.0 @@ -542,43 +178,19 @@ Cursor Attributes tuples contained information describing one query column: "(name, type, display_size, internal_size, precision, scale, null_ok)". -.. attribute:: Cursor.fetchvars - - This read-only attribute specifies the list of variables created for the - last query that was executed on the cursor. Care should be taken when - referencing this attribute. In particular, elements should not be removed - or replaced. +.. autoproperty:: Cursor.fetchvars .. dbapiattributeextension:: -.. attribute:: Cursor.inputtypehandler - - This read-write attribute specifies a method called for each value that is - bound to a statement executed on the cursor and overrides the attribute - with the same name on the connection if specified. The method signature is - handler(cursor, value, arraysize) and the return value is expected to be a - variable object or *None* in which case a default variable object will be - created. If this attribute is *None*, the default behavior will take place - for all values bound to the statements. +.. autoproperty:: Cursor.inputtypehandler See :ref:`inputtypehandlers`. .. dbapiattributeextension:: -.. attribute:: Cursor.lastrowid - - This read-only attribute returns the rowid of the last row modified by the - cursor. If no row was modified by the last operation performed on the - cursor, the value *None* is returned. +.. autoproperty:: Cursor.lastrowid -.. attribute:: Cursor.outputtypehandler - - This read-write attribute specifies a method called for each column that is - to be fetched from this cursor. The method signature is - handler(cursor, metadata) and the return value is expected to be a - :ref:`variable object ` or *None* in which case a default variable - object will be created. If this attribute is *None*, then the default - behavior will take place for all columns fetched from this cursor. +.. autoproperty:: Cursor.outputtypehandler See :ref:`outputtypehandlers`. @@ -590,72 +202,29 @@ Cursor Attributes handler(cursor, name, default_type, length, precision, scale) will still work but is deprecated and will be removed in a future version. -.. attribute:: Cursor.prefetchrows - - This read-write attribute can be used to tune the number of rows that the - Oracle Client library fetches when a SELECT statement is executed. This - value can reduce the number of round-trips to the database that are - required to fetch rows but at the cost of additional memory. Setting this - value to *0* can be useful when the timing of fetches must be explicitly - controlled. - - The attribute is only used for tuning row fetches from the database. It - does not affect data inserts. - - Queries that return LOBs and similar types will never prefetch rows, so the - ``prefetchrows`` value is ignored in those cases. +.. autoproperty:: Cursor.prefetchrows See :ref:`Tuning Fetch Performance ` for more information. .. dbapimethodextension:: -.. attribute:: Cursor.rowcount - - This read-only attribute specifies the number of rows that have currently - been fetched from the cursor (for select statements) or that have been - affected by the operation (for insert, update, delete, and merge - statements). For all other statements the value is always *0*. If the - cursor or connection is closed, the value returned is *-1*. - -.. attribute:: Cursor.rowfactory +.. autoproperty:: Cursor.rowcount - This read-write attribute specifies a method to call for each row that is - retrieved from the database. Ordinarily, a tuple is returned for each row - but if this attribute is set, the method is called with the tuple that - would normally be returned, and the result of the method is returned - instead. +.. autoproperty:: Cursor.rowfactory See :ref:`rowfactories`. .. dbapiattributeextension:: -.. attribute:: Cursor.scrollable - - This read-write boolean attribute specifies whether the cursor can be - scrolled or not. By default, cursors are not scrollable, as the server - resources and response times are greater than nonscrollable cursors. This - attribute is checked and the corresponding mode set in Oracle when calling - the method :meth:`Cursor.execute()`. +.. autoproperty:: Cursor.scrollable .. dbapiattributeextension:: -.. attribute:: Cursor.statement - - This read-only attribute provides the string object that was previously - prepared with :meth:`Cursor.prepare()` or executed with - :meth:`Cursor.execute()`. +.. autoproperty:: Cursor.statement .. dbapiattributeextension:: -.. attribute:: Cursor.warning - - This read-only attribute provides an :ref:`oracledb._Error` - object giving information about any database warnings (such as PL/SQL - compilation warnings) that were generated during the last call to - :meth:`Cursor.execute()` or :meth:`Cursor.executemany()`. This value is - automatically cleared on the next call to :meth:`Cursor.execute()` or - :meth:`Cursor.executemany()`. If no warning was generated the value - *None* is returned. +.. autoproperty:: Cursor.warning See :ref:`plsqlwarning` for more information. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 5913cc0e..50e1e2b8 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -2154,13 +2154,6 @@ Oracledb Methods The ``connection_id_prefix`` parameter was added. -.. function:: Cursor(connection) - - Constructor for creating a cursor. Returns a new - :ref:`cursor object ` using the connection. - - .. dbapimethodextension:: - .. function:: Date(year, month, day) Constructs an object holding a date value. diff --git a/doc/src/conf.py b/doc/src/conf.py index 534ccf3a..ddc4fef6 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -12,11 +12,15 @@ # serve to show the default value. import os +import pathlib import sys # If your extensions are in another directory, add it here. sys.path.append(os.path.abspath("_ext")) +# include the path of the source so that autodoc willfunction +sys.path.insert(0, str(pathlib.Path("..", "src").resolve())) + # General configuration # --------------------- @@ -27,8 +31,12 @@ "oracle_deprecated", "dbapi_extension", "sphinx_rtd_theme", + "sphinx.ext.autodoc", ] +# preserve defaults in function signatures +autodoc_preserve_defaults = True + # Add any paths that contain templates here, relative to this directory. templates_path = [".templates"] diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index fcdd360a..40c199a1 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -23,6 +23,8 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) API documentation is now generated from the source code. + oracledb `3.3.0 `__ (July 2025) -------------------------------------------------------------------------------------------------- diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 7c018cbc..a9da0ebd 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -36,7 +36,7 @@ import collections import functools import ssl -from typing import Any, Callable, Type, Optional, Union +from typing import Any, Callable, Iterator, Type, Optional, Union import oracledb @@ -48,6 +48,7 @@ from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor +from .dataframe import DataFrame from .dbobject import DbObjectType, DbObject from .lob import AsyncLOB, LOB from .pipeline import Pipeline @@ -94,25 +95,25 @@ def _verify_xid(self, xid: Xid) -> None: raise TypeError(message) @property - def action(self) -> None: + def action(self) -> str: + """ + This write-only attribute sets the ACTION column in the V$SESSION view. + It is a string attribute but the value *None* is accepted and treated + as an empty string. + """ raise AttributeError("action is not readable") @action.setter def action(self, value: str) -> None: - """ - Specifies the action column in the v$session table. It is a string - attribute but the value None is also accepted and treated as an empty - string. - """ self._verify_connected() self._impl.set_action(value) @property def autocommit(self) -> bool: """ - Specifies whether autocommit mode is on or off. When autocommit mode is - on, all statements are committed as soon as they have completed - executing successfully. + This read-write attribute determines whether autocommit mode is on or + off. When autocommit mode is on, all statements are committed as soon + as they have completed executing. """ self._verify_connected() return self._impl.autocommit @@ -129,19 +130,33 @@ def begin_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Begins a new sessionless transaction. - - Parameters: - transaction_id (str or bytes, optional): A Transaction Identifier. - If None, a random transaction_id will be generated. - timeout (int, optional): Timeout value in seconds. - Must be a positive integer. Defaults to 60 if not provided. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used for the transaction. + Begins a new sessionless transaction. This method returns the + transaction identifier specified by the user or generated by + python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value + is not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) + `__ value. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". Any user-chosen value cannot + exceed 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this + transaction can stay suspended when + :meth:`suspend_sessionless_transaction()` is later called, + or if the transaction is automatically suspended when the + ``suspend_on_success`` parameter is set to to *True* in + :meth:`Cursor.execute()` or :meth:`Cursor.executemany()`. The default + value is *60* seconds. If a transaction is not resumed within this + specified duration, the transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() normalized_txnid = normalize_sessionless_transaction_id(transaction_id) @@ -157,9 +172,23 @@ def begin_sessionless_transaction( @property def call_timeout(self) -> int: """ - Specifies the amount of time (in milliseconds) that a single round-trip - to the database may take before a timeout will occur. A value of 0 - means that no timeout will take place. + This read-write attribute specifies the amount of time (in + milliseconds) that a single round-trip to the database may take before + a timeout will occur. A value of *0* means that no timeout will take + place. + + In python-oracledb Thick mode, this attribute is only available in + Oracle Client 18c or later. + + If a timeout occurs, the error ``DPI-1067`` will be returned if the + connection is still usable. Alternatively the error ``DPI-1080`` will + be returned if the connection has become invalid and can no longer be + used. + + For consistency and compliance with the PEP 8 naming style, the + attribute ``callTimeout`` was renamed to ``call_timeout``. The old name + will continue to work for a period of time. The error ``DPI-1080`` was + also introduced in this release. """ self._verify_connected() return self._impl.get_call_timeout() @@ -171,25 +200,30 @@ def call_timeout(self, value: int) -> None: def cancel(self) -> None: """ - Break a long-running transaction. + Breaks a long-running statement. """ self._verify_connected() self._impl.cancel() @property - def client_identifier(self) -> None: + def client_identifier(self) -> str: + """ + This write-only attribute sets the CLIENT_IDENTIFIER column in the + V$SESSION view. + """ raise AttributeError("client_identifier is not readable") @client_identifier.setter def client_identifier(self, value: str) -> None: - """ - Specifies the client_identifier column in the v$session table. - """ self._verify_connected() self._impl.set_client_identifier(value) @property - def clientinfo(self) -> None: + def clientinfo(self) -> str: + """ + This write-only attribute sets the CLIENT_INFO column in the V$SESSION + view. + """ raise AttributeError("clientinfo is not readable") @clientinfo.setter @@ -203,11 +237,12 @@ def clientinfo(self, value: str) -> None: @property def current_schema(self) -> str: """ - Specifies the current schema for the session. Setting this value is the - same as executing the SQL statement "ALTER SESSION SET CURRENT_SCHEMA". - The attribute is set (and verified) on the next call that does a round - trip to the server. The value is placed before unqualified database - objects in SQL statements you then execute. + This read-write attribute sets the current schema attribute for the + session. Setting this value is the same as executing the SQL statement + ``ALTER SESSION SET CURRENT_SCHEMA``. The attribute is set (and + verified) on the next call that does a round trip to the server. The + value is placed before unqualified database objects in SQL statements + you then execute. """ self._verify_connected() return self._impl.get_current_schema() @@ -218,86 +253,98 @@ def current_schema(self, value: str) -> None: self._impl.set_current_schema(value) @property - def dbop(self) -> None: - raise AttributeError("dbop is not readable") - - @dbop.setter - def dbop(self, value: str) -> None: + def db_domain(self) -> str: """ - Specifies the database operation that is to be monitored. This can be - viewed in the DBOP_NAME column of the V$SQL_MONITOR table. + This read-only attribute specifies the Oracle Database domain name + associated with the connection. It is the same value returned by the + SQL ``SELECT value FROM V$PARAMETER WHERE NAME = 'db_domain'``. """ self._verify_connected() - self._impl.set_dbop(value) + return self._impl.get_db_domain() @property - def dsn(self) -> str: + def db_name(self) -> str: """ - Specifies the connection string (TNS entry) of the database to which a - connection has been established. + This read-only attribute specifies the Oracle Database name associated + with the connection. It is the same value returned by the SQL ``SELECT + NAME FROM V$DATABASE``. """ self._verify_connected() - return self._impl.dsn + return self._impl.get_db_name() @property - def econtext_id(self) -> None: - raise AttributeError("econtext_id is not readable") - - @econtext_id.setter - def econtext_id(self, value: str) -> None: + def dbop(self) -> str: """ - Specifies the execution context id. This value can be found as ecid in - the v$session table and econtext_id in the auditing tables. The maximum - length is 64 bytes. + This write-only attribute sets the database operation that is to be + monitored. This can be viewed in the DBOP_NAME column of the + V$SQL_MONITOR view. """ + raise AttributeError("dbop is not readable") + + @dbop.setter + def dbop(self, value: str) -> None: self._verify_connected() - self._impl.set_econtext_id(value) + self._impl.set_dbop(value) - @property - def db_domain(self) -> str: + def decode_oson(self, data: bytes) -> Any: """ - Specifies the name of the database domain. + Decodes `OSON-encoded + `__ bytes and returns the + object encoded in those bytes. This is useful for fetching columns + which have the check constraint ``IS JSON FORMAT OSON`` enabled. """ self._verify_connected() - return self._impl.get_db_domain() + return self._impl.decode_oson(data) @property - def db_name(self) -> str: + def dsn(self) -> str: """ - Specifies the name of the database. + This read-only attribute returns the TNS entry of the database to which + a connection has been established. """ self._verify_connected() - return self._impl.get_db_name() + return self._impl.dsn @property - def session_id(self) -> int: + def econtext_id(self) -> str: """ - Specifies the session identifier. + This write-only attribute specifies the execution context id. This + value can be found as the ECID column in the V$SESSION view and + ECONTEXT_ID in the auditing tables. The maximum length is 64 bytes. """ + raise AttributeError("econtext_id is not readable") + + @econtext_id.setter + def econtext_id(self, value: str) -> None: self._verify_connected() - return self._impl.get_session_id() + self._impl.set_econtext_id(value) @property - def serial_num(self) -> int: + def edition(self) -> str: """ - Specifies the session serial number. + This read-only attribute gets the session edition and is only available + with Oracle Database 11.2, or later. """ self._verify_connected() - return self._impl.get_serial_num() + return self._impl.get_edition() - @property - def edition(self) -> str: + def encode_oson(self, value: Any) -> bytes: """ - Specifies the session edition. + Encodes a Python value into `OSON-encoded + `__ bytes and returns + them. This is useful for inserting into columns which have the check + constraint ``IS JSON FORMAT OSON`` enabled. """ self._verify_connected() - return self._impl.get_edition() + return self._impl.encode_oson(value) @property def external_name(self) -> str: """ - Specifies the external name that is used by the connection when logging - distributed transactions. + This read-write attribute specifies the external name that is used by + the connection when logging distributed transactions. """ self._verify_connected() return self._impl.get_external_name() @@ -310,12 +357,13 @@ def external_name(self, value: str) -> None: @property def inputtypehandler(self) -> Callable: """ - Specifies a method called for each value that is bound to a statement - executed on any cursor associated with this connection. The method - signature is handler(cursor, value, arraysize) and the return value is - expected to be a variable object or None in which case a default - variable object will be created. If this attribute is None, the default - behavior will take place for all values bound to statements. + This read-write attribute specifies a method called for each value that + is bound to a statement executed on any cursor associated with this + connection. The method signature is handler(cursor, value, arraysize) + and the return value is expected to be a variable object or *None* in + which case a default variable object will be created. If this attribute + is *None*, the default behavior will take place for all values bound to + statements. """ self._verify_connected() return self._impl.inputtypehandler @@ -328,10 +376,9 @@ def inputtypehandler(self, value: Callable) -> None: @property def instance_name(self) -> str: """ - Returns the instance name associated with the connection. This is the - equivalent of the SQL expression: - - sys_context('userenv', 'instance_name') + This read-only attribute specifies the Oracle Database instance name + associated with the connection. It is the same value as the SQL + expression ``sys_context('userenv', 'instance_name')``. """ self._verify_connected() return self._impl.get_instance_name() @@ -339,8 +386,8 @@ def instance_name(self) -> str: @property def internal_name(self) -> str: """ - Specifies the internal name that is used by the connection when logging - distributed transactions. + This read-write attribute specifies the internal name that is used by + the connection when logging distributed transactions. """ self._verify_connected() return self._impl.get_internal_name() @@ -352,32 +399,40 @@ def internal_name(self, value: str) -> None: def is_healthy(self) -> bool: """ - Returns a boolean indicating the health status of a connection. + This function returns a boolean indicating the health status of a + connection. - Connections may become unusable in several cases, such as if the + Connections may become unusable in several cases, such as, if the network socket is broken, if an Oracle error indicates the connection - is unusable, or after receiving a planned down notification from the + is unusable, or, after receiving a planned down notification from the database. This function is best used before starting a new database request on an - existing standalone connection. Pooled connections internally perform - this check before returning a connection to the application. + existing :ref:`standalone connections `. For + pooled connections, the :meth:`ConnectionPool.acquire()` method + internally performs this check before returning a connection to the + application, see :ref:`poolhealth`. - If this function returns False, the connection should be not be used by - the application and a new connection should be established instead. + If this function returns *False*, the connection should be not be used + by the application and a new connection should be established instead. This function performs a local check. To fully check a connection's - health, use ping() which performs a round-trip to the database. + health, use :meth:`ping()` which performs a round-trip to + the database. """ return self._impl is not None and self._impl.get_is_healthy() @property def ltxid(self) -> bytes: """ - Returns the logical transaction id for the connection. It is used - within Oracle Transaction Guard as a means of ensuring that - transactions are not duplicated. See the Oracle documentation and the - provided sample for more information. + This read-only attribute returns the logical transaction id for the + connection. It is used within Oracle Transaction Guard as a means of + ensuring that transactions are not duplicated. See :ref:`tg` for more + information. + + This is only available with Oracle Database 12.1 or later. In + python-oracledb Thick mode, it also requires Oracle Client libraries + 12.1 or later. """ self._verify_connected() return self._impl.get_ltxid() @@ -385,8 +440,15 @@ def ltxid(self) -> bytes: @property def max_identifier_length(self) -> int: """ - Returns the maximum length of identifiers supported by the database to - which this connection has been established. + This read-only attribute specifies the maximum database identifier + length in bytes supported by the database to which the connection has + been established. See `Database Object Naming Rules + `__. The value may be + *None*, *30*, or *128*. The value *None* indicates the size cannot be + reliably determined by python-oracledb, which occurs when using Thick + mode with Oracle Client libraries 12.1 (or older) to connect to Oracle + Database 12.2, or later. """ self._verify_connected() return self._impl.get_max_identifier_length() @@ -394,23 +456,26 @@ def max_identifier_length(self) -> int: @property def max_open_cursors(self) -> int: """ - Specifies the maximum number of cursors that the database can have open - concurrently. + This read-only attribute specifies the maximum number of cursors that + the database can have open concurrently. It is the same value returned + by the SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = + 'open_cursors'``. When using python-oracledb Thick mode, Oracle Client + libraries 12.1 (or later) are required. """ self._verify_connected() return self._impl.get_max_open_cursors() @property - def module(self) -> None: + def module(self) -> str: + """ + This write-only attribute sets the MODULE column in the V$SESSION view. + The maximum length for this string is 48 and if you exceed this length + you will get ``ORA-24960``. + """ raise AttributeError("module is not readable") @module.setter def module(self, value: str) -> None: - """ - Specifies the module column in the v$session table. The maximum length - for this string is 48 and if you exceed this length you will get - ORA-24960. - """ self._verify_connected() self._impl.set_module(value) @@ -425,9 +490,11 @@ def msgproperties( recipients: Optional[list] = None, ) -> MessageProperties: """ - Create and return a message properties object. If the parameters are - not None, they act as a shortcut for setting each of the equivalently - named properties. + Returns an object specifying the properties of messages used in + advanced queuing. See :ref:`msgproperties` for more information. + + Each of the parameters are optional. If specified, they act as a + shortcut for setting each of the equivalently named properties. """ impl = self._impl.create_msg_props_impl() props = MessageProperties._from_impl(impl) @@ -455,16 +522,21 @@ def queue( payloadType: Optional[DbObjectType] = None, ) -> Queue: """ - Creates and returns a queue which is used to enqueue and dequeue - messages in Advanced Queueing (AQ). + Creates a :ref:`queue ` which is used to enqueue and dequeue + messages in Advanced Queuing. + + The ``name`` parameter is expected to be a string identifying the queue + in which messages are to be enqueued or dequeued. - The name parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. + The ``payload_type`` parameter, if specified, is expected to be an + :ref:`object type ` that identifies the type of payload + the queue expects. If the string "JSON" is specified, JSON data is + enqueued and dequeued. If not specified, RAW data is enqueued and + dequeued. - The payload_type parameter, if specified, is expected to be an - object type that identifies the type of payload the queue expects. - If the string "JSON" is specified, JSON data is enqueued and dequeued. - If not specified, RAW data is enqueued and dequeued. + For consistency and compliance with the PEP 8 naming style, the + parameter ``payloadType`` was renamed to ``payload_type``. The old name + will continue to work as a keyword parameter for a period of time. """ self._verify_connected() payload_type_impl = None @@ -491,13 +563,13 @@ def queue( @property def outputtypehandler(self) -> Callable: """ - Specifies a method called for each column that is going to be fetched - from any cursor associated with this connection. The method signature - is handler(cursor, name, defaultType, length, precision, scale) and the - return value is expected to be a variable object or None in which case - a default variable object will be created. If this attribute is None, - the default behavior will take place for all columns fetched from - cursors associated with this connection. + This read-write attribute specifies a method called for each column + that is going to be fetched from any cursor associated with this + connection. The method signature is ``handler(cursor, metadata)`` and + the return value is expected to be a :ref:`variable object` or + *None* in which case a default variable object will be created. If this + attribute is *None*, the default behavior will take place for all + columns fetched from cursors. """ self._verify_connected() return self._impl.outputtypehandler @@ -507,6 +579,15 @@ def outputtypehandler(self, value: Callable) -> None: self._verify_connected() self._impl.outputtypehandler = value + @property + def proxy_user(self) -> Union[str, None]: + """ + This read-only attribute returns the name of the user which was used as + a proxy when creating the connection to the database. + """ + self._verify_connected() + return self._impl.proxy_user + def resume_sessionless_transaction( self, transaction_id: Union[str, bytes], @@ -514,22 +595,38 @@ def resume_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Resumes an existing sessionless transaction using the given - transaction_id. - - Parameters: - transaction_id (str or bytes): A Transaction Identifier that - uniquely identifies the sessionless transaction to be - resumed. This parameter is mandatory. - timeout (int, optional): Timeout in seconds for the resumed - transaction. Must be a positive integer. Defaults to 60. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used to resume the - sessionless transaction. + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using + it. When ``defer_round_trip`` is set to *False*, the wait happens in + the ``resume_sessionless_transaction()`` call itself, and the function + blocks until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and + the wait occurs at the time of the next database operation instead. At + the start of the wait period, if the transaction is not in use by any + other connection, the resume happens immediately. If the transaction + remains in use by the other connection after the timeout period, the + error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. + These error messages are only thrown for non-RAC instances. For + information on using Oracle RAC, see :ref:`Sessionless Transactions + with Oracle RAC `. The default value is *60* + seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() if transaction_id is None: @@ -548,27 +645,67 @@ def resume_sessionless_transaction( @property def sdu(self) -> int: """ - Specifies the size of the Session Data Unit (SDU) that is being used by - the connection. + This read-only attribute specifies the size of the Session Data Unit + (SDU) that is being used by the connection. The value will be the + lesser of the requested python-oracledb size and the maximum size + allowed by the database network configuration. It is available only in + python-oracledb Thin mode. """ self._verify_connected() return self._impl.get_sdu() + @property + def serial_num(self) -> int: + """ + This read-only attribute specifies the session serial number associated + with the connection. It is the same value returned by the SQL ``SELECT + SERIAL# FROM V$SESSION WHERE SID=SYS_CONTEXT('USERENV', 'SID')``. It + is available only in python-oracledb Thin mode. + + For applications using :ref:`drcp`, the ``serial_num`` attribute may + not contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a + round-trip. + """ + self._verify_connected() + return self._impl.get_serial_num() + @property def service_name(self) -> str: """ - Specifies the name of the service that was used to connect to the - database. + This read-only attribute specifies the Oracle Database service name + associated with the connection. This is the same value returned by the + SQL ``SELECT SYS_CONTEXT('USERENV', 'SERVICE_NAME') FROM DUAL``. """ self._verify_connected() return self._impl.get_service_name() + @property + def session_id(self) -> int: + """ + This read-only attribute specifies the session identifier associated + with the connection. It is the same value returned by the SQL ``SELECT + SYS_CONTEXT('USERENV', 'SID') FROM DUAL``. It is available only in + python-oracledb Thin mode. + + For applications using :ref:`drcp`, the ``session_id`` attribute may + not contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a + round-trip. + """ + self._verify_connected() + return self._impl.get_session_id() + @property def stmtcachesize(self) -> int: """ - Specifies the size of the statement cache. This value can make a - significant difference in performance (up to 100x) if you have a small - number of statements that you execute repeatedly. + This read-write attribute specifies the size of the statement cache. + This value can make a significant difference in performance if you have + a small number of statements that you execute repeatedly. + + The default value is *20*. """ self._verify_connected() return self._impl.get_stmt_cache_size() @@ -578,11 +715,35 @@ def stmtcachesize(self, value: int) -> None: self._verify_connected() self._impl.set_stmt_cache_size(value) + @property + def tag(self) -> str: + """ + This read-write attribute initially contains the actual tag of the + session that was acquired from a pool by + :meth:`ConnectionPool.acquire()`. If the connection was not acquired + from a pool or no tagging parameters were specified (``tag`` and + ``matchanytag``) when the connection was acquired from the pool, this + value will be None. If the value is changed, it must be a string + containing name=value pairs like "k1=v1;k2=v2". + + If this value is not *None* when the connection is released back to the + pool it will be used to retag the session. This value can be overridden + in the call to :meth:`ConnectionPool.release()`. + """ + self._verify_connected() + return self._impl.tag + + @tag.setter + def tag(self, value: str) -> None: + self._verify_connected() + self._impl.tag = value + @property def thin(self) -> bool: """ - Returns a boolean indicating if the connection was established in - python-oracledb's thin mode (True) or thick mode (False). + This read-only attribute returns a boolean indicating if the connection + was established with the python-oracledb Thin mode (*True*) or + python-oracledb Thick mode (*False*). """ self._verify_connected() return self._impl.thin @@ -590,8 +751,8 @@ def thin(self) -> bool: @property def transaction_in_progress(self) -> bool: """ - Specifies whether a transaction is currently in progress on the - database using this connection. + This read-only attribute specifies whether a transaction is currently + in progress on the database associated with the connection. """ self._verify_connected() return self._impl.get_transaction_in_progress() @@ -599,8 +760,8 @@ def transaction_in_progress(self) -> bool: @property def username(self) -> str: """ - Returns the name of the user which established the connection to the - database. + This read-only attribute returns the name of the user which established + the connection to the database. """ self._verify_connected() return self._impl.username @@ -608,8 +769,8 @@ def username(self) -> str: @property def version(self) -> str: """ - Returns the version of the database to which the connection has been - established. + This read-only attribute returns the version of the database to which a + connection has been established. """ if self._version is None: self._verify_connected() @@ -617,11 +778,32 @@ def version(self) -> str: return self._version @property - def warning(self) -> errors._Error: + def warning(self) -> Union[errors._Error, None]: """ - Returns any warning that was generated when the connection was created, - or the value None if no warning was generated. The value will be - cleared for pooled connections after they are returned to the pool. + This read-only attribute provides an + :ref:`oracledb._Error` object giving information about any + database warnings (such as the password being in the grace period, or + the pool being created with a smaller than requested size due to + database resource restrictions) that were generated during connection + establishment or by :meth:`oracledb.create_pool()`. The attribute will + be present if there was a warning, but creation otherwise completed + successfully. The connection will be usable despite the warning. + + For :ref:`standalone connections `, + ``Connection.warning`` will be present for the lifetime of the + connection. + + For :ref:`pooled connections `, ``Connection.warning`` + will be cleared when a connection is released to the pool such as with + :meth:`ConnectionPool.release()`. + + In python-oracledb Thick mode, warnings may be generated during pool + creation itself. These warnings will be placed on new connections + created by the pool, provided no warnings were generated by the + individual connection creations, in which case those connection + warnings will be returned. + + If no warning was generated the value *None* is returned. """ self._verify_connected() return self._impl.warning @@ -633,13 +815,25 @@ def xid( branch_qualifier: Union[bytes, str], ) -> Xid: """ - Returns a global transaction identifier that can be used with the TPC - (two-phase commit) functions. + Returns a global transaction identifier (xid) that can be used with the + Two-Phase Commit (TPC) functions. + + The ``xid`` contains a format identifier, a global transaction + identifier, and a branch identifier. There are no checks performed at + the Python level. The values are checked by ODPI-C when they are passed + to the relevant functions. .. When this functionality is also + supported in the thin driver the checks will be performed at the Python + level as well. + + The ``format_id`` parameter should be a positive 32-bit integer. This + value identifies the format of the ``global_transaction_id`` and + ``branch_qualifier`` parameters and the value is determined by the + Transaction Manager (TM), if one is in use. - The format_id parameter should be a non-negative 32-bit integer. The - global_transaction_id and branch_qualifier parameters should be bytes - (or a string which will be UTF-8 encoded to bytes) of no more than 64 - bytes. + The ``global_transaction_id`` and ``branch_qualifier`` parameters + should be of type bytes or string. If a value of type string is passed, + then this value will be UTF-8 encoded to bytes. The values cannot + exceed 64 bytes in length. """ return Xid(format_id, global_transaction_id, branch_qualifier) @@ -739,10 +933,18 @@ def __del__(self): self._close(in_del=True) def __enter__(self): + """ + The entry point for the connection as a context manager. It returns + itself. + """ self._verify_connected() return self def __exit__(self, exc_type, exc_value, exc_tb): + """ + The exit point for the connection as a context manager. This will close + the connection and roll back any uncommitted transaction. + """ if self._impl is not None: self._close() @@ -796,7 +998,14 @@ def begin( branch_id: str = "", ) -> None: """ - Deprecated. Use tpc_begin() instead. + Explicitly begins a new transaction. Without parameters, this + explicitly begins a local transaction; otherwise, this explicitly + begins a distributed (global) transaction with the given parameters. + See the Oracle documentation for more details. + + Note that in order to make use of global (distributed) transactions, + the :attr:`~Connection.internal_name` and + :attr:`~Connection.external_name` attributes must be set. """ if format_id != -1: self.tpc_begin(self.xid(format_id, transaction_id, branch_id)) @@ -822,9 +1031,19 @@ def changepassword(self, old_password: str, new_password: str) -> None: def close(self) -> None: """ - Closes the connection and makes it unusable for further operations. An - Error exception will be raised if any operation is attempted with this - connection after this method completes successfully. + Closes the connection now and makes it unusable for further operations. + An Error exception will be raised if any operation is attempted with + this connection after this method is completed successfully. + + All open cursors and LOBs created by the connection will be closed and + will also no longer be usable. + + Internally, references to the connection are held by cursor objects, + LOB objects, subscription objects, etc. Once all of these references + are released, the connection itself will be closed automatically. + Either control references to these related objects carefully or + explicitly close connections in order to ensure sufficient resources + are available. """ self._verify_connected() self._close() @@ -840,7 +1059,13 @@ def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> LOB: """ - Create and return a new temporary LOB of the specified type. + Creates and returns a new temporary :ref:`LOB object ` of the + specified type. The ``lob_type`` parameter should be one of + :data:`oracledb.DB_TYPE_CLOB`, :data:`oracledb.DB_TYPE_BLOB`, or + :data:`oracledb.DB_TYPE_NCLOB`. + + If data is supplied, it will be written to the temporary LOB before it + is returned. """ self._verify_connected() if lob_type not in (DB_TYPE_CLOB, DB_TYPE_NCLOB, DB_TYPE_BLOB): @@ -857,33 +1082,34 @@ def createlob( def cursor(self, scrollable: bool = False) -> Cursor: """ - Returns a cursor associated with the connection. + Returns a new :ref:`cursor object ` using the connection. """ self._verify_connected() return Cursor(self, scrollable) - def decode_oson(self, data): - """ - Decode OSON-encoded bytes and return the object encoded in those bytes. - """ - self._verify_connected() - return self._impl.decode_oson(data) - - def encode_oson(self, value): - """ - Return OSON-encoded bytes encoded from the supplied object. - """ - self._verify_connected() - return self._impl.encode_oson(value) - def fetch_df_all( self, statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, - ): + ) -> DataFrame: """ - Fetch all data as an instance of DataFrame. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``arraysize`` parameter can be specified to tune performance of + fetching data across the network. It defaults to + :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s + :attr:`Cursor.prefetchrows` size is always set to the value of the + explicit or default ``arraysize`` parameter value. + + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -898,9 +1124,25 @@ def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, - ): + ) -> Iterator[DataFrame]: """ - Fetch data in batches. Each batch is an instance of DataFrame + This returns an iterator yielding the next ``size`` rows of the SQL + query ``statement`` in each iteration as a :ref:`DataFrame + ` object. An empty DataFrame is returned if there + are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``size`` parameter controls the number of records fetched in each + batch. It defaults to :attr:`defaults.arraysize`. Internally, the + ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + :attr:`Cursor.prefetchrows` sizes are always set to the value of the + explicit or default ``size`` parameter value. + + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -915,8 +1157,13 @@ def fetch_df_batches( def getSodaDatabase(self) -> SodaDatabase: """ - Return a SODA database object for performing all operations on Simple - Oracle Document Access (SODA). + Returns a :ref:`SodaDatabase ` object for Simple Oracle + Document Access (SODA). All SODA operations are performed either on the + returned SodaDatabase object or from objects created by the returned + SodaDatabase object. See `here + `__ for + additional information on SODA. """ self._verify_connected() db_impl = self._impl.create_soda_database_impl(self) @@ -924,8 +1171,9 @@ def getSodaDatabase(self) -> SodaDatabase: def gettype(self, name: str) -> DbObjectType: """ - Return a type object given its name. This can then be used to create - objects which can be bound to cursors created by this connection. + Returns a :ref:`type object ` given its name. This can + then be used to create objects which can be bound to cursors created by + this connection. """ self._verify_connected() obj_type_impl = self._impl.get_type(self, name) @@ -934,9 +1182,10 @@ def gettype(self, name: str) -> DbObjectType: @property def handle(self) -> int: """ - Returns the OCI service context handle for the connection. It is - primarily provided to facilitate testing the creation of a connection - using the OCI service context handle. + This read-only attribute returns the Oracle Call Interface (OCI) + service context handle for the connection. It is primarily provided to + facilitate testing the creation of a connection using the OCI service + context handle. This property is only relevant to python-oracledb's thick mode. """ @@ -952,25 +1201,29 @@ def maxBytesPerCharacter(self) -> int: def ping(self) -> None: """ - Pings the database to verify the connection is valid. + Pings the database to verify if the connection is valid. An exception + is thrown if it is not, in which case the connection should not be used + by the application and a new connection should be established instead. + + This function performs a :ref:`round-trip ` to the + database, so it should not be used unnecessarily. + + Note connection pools will perform the same health check automatically, + based on configuration settings. See :ref:`poolhealth`. + + Also, see :meth:`is_healthy()` for a lightweight alternative. """ self._verify_connected() self._impl.ping() def prepare(self) -> bool: """ - Deprecated. Use tpc_prepare() instead. + Prepares the distributed (global) transaction for commit. Returns a + boolean indicating if a transaction was actually prepared in order to + avoid the error ``ORA-24756 (transaction does not exist)``. """ return self.tpc_prepare() - @property - def proxy_user(self) -> Union[str, None]: - """ - Returns the name of the proxy user, if applicable. - """ - self._verify_connected() - return self._impl.proxy_user - def rollback(self) -> None: """ Rolls back any pending transactions. @@ -980,9 +1233,10 @@ def rollback(self) -> None: def shutdown(self, mode: int = 0) -> None: """ - Shutdown the database. In order to do this the connection must be - connected as SYSDBA or SYSOPER. Two calls must be made unless the mode - specified is DBSHUTDOWN_ABORT. + Shuts down the database. In order to do this the connection must be + connected as :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER`. Two + calls must be made unless the mode specified is + :data:`~oracledb.DBSHUTDOWN_ABORT`. """ self._verify_connected() self._impl.shutdown(mode) @@ -994,11 +1248,12 @@ def startup( pfile: Optional[str] = None, ) -> None: """ - Startup the database. This is equivalent to the SQL*Plus command - “startup nomount”. The connection must be connected as SYSDBA or - SYSOPER with the PRELIM_AUTH option specified for this to work. + Starts up the database. This is equivalent to the SQL*Plus command + ``startup nomount``. The connection must be connected as + :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER` with the + :data:`~oracledb.PRELIM_AUTH` option specified for this to work. - The pfile parameter, if specified, is expected to be a string + The ``pfile`` parameter, if specified, is expected to be a string identifying the location of the parameter file (PFILE) which will be used instead of the stored parameter file (SPFILE). """ @@ -1028,66 +1283,77 @@ def subscribe( clientInitiated: bool = False, ) -> Subscription: """ - Return a new subscription object that receives notification for events - that take place in the database that match the given parameters. + Returns a new :ref:`subscription object ` that receives + notifications for events that take place in the database that match the + given parameters. - The namespace parameter specifies the namespace the subscription uses. - It can be one of SUBSCR_NAMESPACE_DBCHANGE or SUBSCR_NAMESPACE_AQ. + The ``namespace`` parameter specifies the namespace the subscription + uses. It can be one of :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` or + :data:`oracledb.SUBSCR_NAMESPACE_AQ`. - The protocol parameter specifies the protocol to use when notifications - are sent. Currently the only valid value is SUBSCR_PROTO_CALLBACK. + The ``protocol`` parameter specifies the protocol to use when + notifications are sent. Currently the only valid value is + :data:`oracledb.SUBSCR_PROTO_OCI`. - The callback is expected to be a callable that accepts a single - parameter. A message object is passed to this callback whenever a - notification is received. + The ``callback`` is expected to be a callable that accepts a single + parameter. A :ref:`message object ` is passed to this + callback whenever a notification is received. - The timeout value specifies that the subscription expires after the - given time in seconds. The default value of 0 indicates that the + The ``timeout`` value specifies that the subscription expires after the + given time in seconds. The default value of *0* indicates that the subscription never expires. - The operations parameter enables filtering of the messages that are + The ``operations`` parameter enables filtering of the messages that are sent (insert, update, delete). The default value will send notifications for all operations. This parameter is only used when the - namespace is set to SUBSCR_NAMESPACE_DBCHANGE. + namespace is set to :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE`. - The port parameter specifies the listening port for callback + The ``port`` parameter specifies the listening port for callback notifications from the database server. If not specified, an unused port will be selected by the Oracle Client libraries. - The qos parameter specifies quality of service options. It should be - one or more of the following flags, OR'ed together: - SUBSCR_QOS_RELIABLE, - SUBSCR_QOS_DEREG_NFY, - SUBSCR_QOS_ROWIDS, - SUBSCR_QOS_QUERY, - SUBSCR_QOS_BEST_EFFORT. - - The ip_address parameter specifies the IP address (IPv4 or IPv6) in - standard string notation to bind for callback notifications from the - database server. If not specified, the client IP address will be - determined by the Oracle Client libraries. - - The grouping_class parameter specifies what type of grouping of - notifications should take place. Currently, if set, this value can - only be set to the value SUBSCR_GROUPING_CLASS_TIME, which will group - notifications by the number of seconds specified in the grouping_value - parameter. The grouping_type parameter should be one of the values - SUBSCR_GROUPING_TYPE_SUMMARY (the default) or - SUBSCR_GROUPING_TYPE_LAST. - - The name parameter is used to identify the subscription and is specific - to the selected namespace. If the namespace parameter is - SUBSCR_NAMESPACE_DBCHANGE then the name is optional and can be any - value. If the namespace parameter is SUBSCR_NAMESPACE_AQ, however, the - name must be in the format '' for single consumer queues - and ':' for multiple consumer queues, and + The ``qos`` parameter specifies quality of service options. It should + be one or more of the following flags, OR'ed together: + :data:`oracledb.SUBSCR_QOS_RELIABLE`, + :data:`oracledb.SUBSCR_QOS_DEREG_NFY`, + :data:`oracledb.SUBSCR_QOS_ROWIDS`, :data:`oracledb.SUBSCR_QOS_QUERY`, + :data:`oracledb.SUBSCR_QOS_BEST_EFFORT`. + + The ``ip_address`` parameter specifies the IP address (*IPv4* or + *IPv6*) in standard string notation to bind for callback notifications + from the database server. If not specified, the client IP address will + be determined by the Oracle Client libraries. + + The ``grouping_class`` parameter specifies what type of grouping of + notifications should take place. Currently, if set, this value can only + be set to the value :data:`oracledb.SUBSCR_GROUPING_CLASS_TIME`, which + will group notifications by the number of seconds specified in the + ``grouping_value`` parameter. The ``grouping_type`` parameter should be + one of the values :data:`oracledb.SUBSCR_GROUPING_TYPE_SUMMARY` (the + default) or :data:`oracledb.SUBSCR_GROUPING_TYPE_LAST`. + + The ``name`` parameter is used to identify the subscription and is + specific to the selected namespace. If the namespace parameter is + :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` then the name is optional + and can be any value. If the namespace parameter is + :data:`oracledb.SUBSCR_NAMESPACE_AQ`, however, the name must be in the + format '' for single consumer queues and + ':' for multiple consumer queues, and identifies the queue that will be monitored for messages. The queue name may include the schema, if needed. - The client_initiated parameter is used to determine if client initiated - connections or server initiated connections (the default) will be - established. Client initiated connections are only available in Oracle - Client 19.4 and Oracle Database 19.4 and higher. + The ``client_initiated`` parameter is used to determine if client + initiated connections or server initiated connections (the default) + will be established. Client initiated connections are only available in + Oracle Client 19.4 and Oracle Database 19.4 and higher. + + For consistency and compliance with the PEP 8 naming style, the + parameter ``ipAddress`` was renamed to ``ip_address``, the parameter + ``groupingClass`` was renamed to ``grouping_class``, the parameter + ``groupingValue`` was renamed to ``grouping_value``, the parameter + ``groupingType`` was renamed to ``grouping_type`` and the parameter + ``clientInitiated`` was renamed to ``client_initiated``. The old names + will continue to work as keyword parameters for a period of time. """ self._verify_connected() if ipAddress is not None: @@ -1152,46 +1418,43 @@ def subscribe( def suspend_sessionless_transaction(self) -> None: """ - Suspends the currently active sessionless transaction. - - This temporarily detaches the transaction from the session, - allowing it to be resumed later using its transaction_id. + Suspends the currently active sessionless transaction immediately. - Returns: - None + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. The ``timeout`` previously + passed to :meth:`begin_sessionless_transaction()` determines how long + the transaction can stay suspended before it is automatically rolled + back. """ self._verify_connected() self._impl.suspend_sessionless_transaction() - @property - def tag(self) -> str: - """ - This property initially contains the actual tag of the session that was - acquired from a pool. If the connection was not acquired from a pool or - no tagging parameters were specified (tag and matchanytag) when the - connection was acquired from the pool, this value will be None. If the - value is changed, it must be a string containing name=value pairs like - “k1=v1;k2=v2”. - - If this value is not None when the connection is released back to the - pool it will be used to retag the session. This value can be overridden - in the call to SessionPool.release(). - """ - self._verify_connected() - return self._impl.tag - - @tag.setter - def tag(self, value: str) -> None: - self._verify_connected() - self._impl.tag = value - def tpc_begin( self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ - Begins a TPC (two-phase commit) transaction with the given transaction - id. This method should be called outside of a transaction (i.e. nothing - may have executed since the last commit() or rollback() was performed). + Begins a Two-Phase Commit (TPC) on a global transaction using the + specified transaction identifier (xid). + + The ``xid`` parameter should be an object returned by the + :meth:`xid()` method. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, + :data:`oracledb.TPC_BEGIN_PROMOTE`, or + :data:`oracledb.TPC_BEGIN_RESUME`. The default is + :data:`oracledb.TPC_BEGIN_NEW`. + + The ``timeout`` parameter is the number of seconds to wait for a + transaction to become available for resumption when + :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` + parameter. When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the + ``flags`` parameter, the ``timeout`` parameter indicates the number of + seconds the transaction can be inactive before it is automatically + terminated by the system. A transaction is inactive between the time it + is detached with :meth:`tpc_end()` and the time it is resumed with + :meth:`tpc_begin()`.The default is *0* seconds. """ self._verify_connected() self._verify_xid(xid) @@ -1208,18 +1471,24 @@ def tpc_commit( self, xid: Optional[Xid] = None, one_phase: bool = False ) -> None: """ - Prepare the global transaction for commit. Return a boolean indicating - if a transaction was actually prepared in order to avoid the error - ORA-24756 (transaction does not exist). + Commits a global transaction. When called with no arguments, this + method commits a transaction previously prepared with + :meth:`tpc_begin()` and optionally prepared with :meth:`tpc_prepare()`. + If :meth:`tpc_prepare()` is not called, a single phase commit is + performed. A transaction manager may choose to do this if only a single + resource is participating in the global transaction. - When called with no arguments, commits a transaction previously - prepared with tpc_prepare(). If tpc_prepare() is not called, a single - phase commit is performed. A transaction manager may choose to do this - if only a single resource is participating in the global transaction. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. This form should be called outside of a + transaction and is intended for use in recovery. - When called with a transaction id, the database commits the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + The ``one_phase`` parameter is a boolean identifying whether to perform + a one-phase or two-phase commit. If ``one_phase`` parameter is *True*, + a single-phase commit is performed. The default value is *False*. This + parameter is only examined if a value is provided for the ``xid`` + parameter. Otherwise, the driver already knows whether + :meth:`tpc_prepare()` was called for the transaction and whether a + one-phase or two-phase commit is required. """ self._verify_connected() if xid is not None: @@ -1230,7 +1499,21 @@ def tpc_end( self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ - Ends (detaches from) a TPC (two-phase commit) transaction. + Ends or suspends work on a global transaction. This function is only + intended for use by transaction managers. + + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If no xid parameter is passed, then the + transaction identifier used by the previous :meth:`tpc_begin()` is + used. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. + The default is :data:`oracledb.TPC_END_NORMAL`. + + If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction + may be resumed later by calling :meth:`tpc_begin()` with the flag + :data:`oracledb.TPC_BEGIN_RESUME`. """ self._verify_connected() if xid is not None: @@ -1241,7 +1524,12 @@ def tpc_end( def tpc_forget(self, xid: Xid) -> None: """ - Forgets a TPC (two-phase commit) transaction. + Causes the database to forget a heuristically completed TPC + transaction. This function is only intended to be called by + transaction managers. + + The ``xid`` parameter is mandatory and should be an object should be + returned by the :meth:`xid()` function. """ self._verify_connected() self._verify_xid(xid) @@ -1249,13 +1537,18 @@ def tpc_forget(self, xid: Xid) -> None: def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: """ - Prepares a global transaction for commit. After calling this function, - no further activity should take place on this connection until either - tpc_commit() or tpc_rollback() have been called. + Prepares a two-phase transaction for commit. After this function is + called, no further activity should take place on this connection until + either :meth:`tpc_commit()` or :meth:`tpc_rollback()` have been called. + + Returns a boolean indicating whether a commit is needed or not. If you + attempt to commit when not needed, then it results in the error + ``ORA-24756: transaction does not exist``. - A boolean is returned indicating whether a commit is needed or not. If - a commit is performed when one is not needed the error ORA-24756: - transaction does not exist is raised. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If an ``xid`` parameter is not passed, then + the transaction identifier used by the previous :meth:`tpc_begin()` is + used. """ self._verify_connected() if xid is not None: @@ -1264,11 +1557,13 @@ def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: def tpc_recover(self) -> list: """ - Returns a list of pending transaction ids suitable for use with - tpc_commit() or tpc_rollback(). + Returns a list of pending transaction identifiers that require + recovery. Objects of type ``Xid`` (as returned by the + :meth:`xid()` function) are returned and these can be passed to + :meth:`tpc_commit()` or :meth:`tpc_rollback()` as needed. - This function requires select privilege on the view - DBA_PENDING_TRANSACTIONS. + This function queries the DBA_PENDING_TRANSACTIONS view and requires + "SELECT" privilege on that view. """ with self.cursor() as cursor: cursor.execute( @@ -1284,12 +1579,14 @@ def tpc_recover(self) -> list: def tpc_rollback(self, xid: Optional[Xid] = None) -> None: """ - When called with no arguments, rolls back the transaction previously - started with tpc_begin(). + If an ``xid`` parameter is not passed, then it rolls back the + transaction that was previously started with + :meth:`tpc_begin()`. - When called with a transaction id, the database rolls back the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + If an ``xid`` parameter is passed, then an object should be returned by + :meth:`xid()` and the specified transaction is rolled back. This form + should be called outside of a transaction and is intended for use in + recovery. """ self._verify_connected() if xid is not None: @@ -1299,9 +1596,9 @@ def tpc_rollback(self, xid: Optional[Xid] = None) -> None: def unsubscribe(self, subscr: Subscription) -> None: """ Unsubscribe from events in the database that were originally subscribed - to using subscribe(). The connection used to unsubscribe should be the - same one used to create the subscription, or should access the same - database and be connected as the same user name. + to using :meth:`subscribe()`. The connection used to unsubscribe should + be the same one used to create the subscription, or should access the + same database and be connected as the same user name. """ self._verify_connected() if not isinstance(subscr, Subscription): @@ -1683,6 +1980,10 @@ def __await__(self): return coroutine.__await__() async def __aenter__(self): + """ + The entry point for the asynchronous connection as a context manager. + It returns itself. + """ if self._connect_coroutine is not None: await self._connect_coroutine else: @@ -1690,6 +1991,11 @@ async def __aenter__(self): return self async def __aexit__(self, *exc_info): + """ + The exit point for the asynchronous connection as a context manager. + This will close the connection and roll back any uncommitted + transaction. + """ if self._impl is not None: await self._close() @@ -1792,19 +2098,33 @@ async def begin_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Begins a new sessionless transaction. - - Parameters: - transaction_id (str or bytes, optional): A Transaction Identifier. - If None, a random transaction_id will be generated. - timeout (int, optional): Timeout value in seconds. - Must be a positive integer. Defaults to 60 if not provided. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used for the transaction. + Begins a new sessionless transaction. This method returns the + transaction identifier specified by the user or generated by + python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value + is not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) `__ value when this function is called. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot + exceed 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this + transaction can stay suspended when + :meth:`suspend_sessionless_transaction()` is later called, or if the + transaction is automatically suspended when the ``suspend_on_success`` + parameter is set to to *True* in :meth:`AsyncCursor.execute()` or + :meth:`AsyncCursor.executemany()`. The default value is *60* seconds. + If a transaction is not resumed within this specified duration, the + transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() normalized_txnid = normalize_sessionless_transaction_id(transaction_id) @@ -1825,10 +2145,10 @@ async def callfunc( keyword_parameters: Optional[dict] = None, ) -> Any: """ - Call a PL/SQL function with the given name. + Calls a PL/SQL function with the given name. - This is a shortcut for creating a cursor, calling the stored function - with the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.callfunc()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: return await cursor.callfunc( @@ -1842,10 +2162,10 @@ async def callproc( keyword_parameters: Optional[dict] = None, ) -> list: """ - Call a PL/SQL procedure with the given name. + Calls a PL/SQL procedure with the given name. - This is a shortcut for creating a cursor, calling the stored procedure - with the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.callproc()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: return await cursor.callproc(name, parameters, keyword_parameters) @@ -1877,7 +2197,7 @@ async def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> AsyncLOB: """ - Create and return a new temporary LOB of the specified type. + Creates and returns a new temporary LOB of the specified type. """ self._verify_connected() if lob_type not in (DB_TYPE_CLOB, DB_TYPE_NCLOB, DB_TYPE_BLOB): @@ -1894,7 +2214,8 @@ async def createlob( def cursor(self, scrollable: bool = False) -> AsyncCursor: """ - Returns a cursor associated with the connection. + Returns an :ref:`AsyncCursor object ` associated with + the connection. """ self._verify_connected() return AsyncCursor(self, scrollable) @@ -1905,10 +2226,10 @@ async def execute( parameters: Optional[Union[list, tuple, dict]] = None, ) -> None: """ - Execute a statement against the database. + Executes a statement against the database. - This is a shortcut for creating a cursor, executing a statement with - the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.execute()`, and then :meth:`AsyncCursor.close()` """ with self.cursor() as cursor: await cursor.execute(statement, parameters) @@ -1917,12 +2238,22 @@ async def executemany( self, statement: Union[str, None], parameters: Any ) -> None: """ - Prepare a statement for execution against a database and then execute - it against all parameter mappings or sequences found in the sequence - parameters. + Executes a SQL statement once using all bind value mappings or + sequences found in the sequence parameters. This can be used to insert, + update, or delete multiple rows in a table with a single + python-oracledb call. It can also invoke a PL/SQL procedure multiple + times. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one bind variable placeholder in ``statement``. It can + also be a list of dictionaries, where the keys match the bind variable + placeholder names in ``statement``. If there are no bind values, or + values have previously been bound, the ``parameters`` value can be an + integer specifying the number of iterations. - This is a shortcut for creating a cursor, calling executemany() on the - cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.executemany()`, and then + :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: await cursor.executemany(statement, parameters) @@ -1935,8 +2266,15 @@ async def fetchall( rowfactory: Optional[Callable] = None, ) -> list: """ - Executes a query and returns all of the rows. After the rows are - fetched, the cursor is closed. + Executes a query and returns all of the rows. + + The default value for ``arraysize`` is :attr:`defaults.arraysize`. + + Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set + to the value of the explicit or default ``arraysize`` parameter value. + + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchall()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: if arraysize is not None: @@ -1951,9 +2289,22 @@ async def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, - ): + ) -> DataFrame: """ - Fetch all data as an instance of DataFrame. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``arraysize`` parameter can be specified to tune performance of + fetching data across the network. It defaults to + :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s + :attr:`Cursor.prefetchrows` size is always set to the value of the + explicit or default ``arraysize`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1968,9 +2319,23 @@ async def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, - ): + ) -> Iterator[DataFrame]: """ - Fetch data in batches. Each batch is an instance of DataFrame + This returns an iterator yielding the next ``size`` rows of the SQL + query ``statement`` in each iteration as a :ref:`DataFrame + ` object. An empty DataFrame is returned if there + are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``size`` parameter controls the number of records fetched in each + batch. It defaults to :attr:`defaults.arraysize`. Internally, the + ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + :attr:`Cursor.prefetchrows` sizes are always set to the value of the + explicit or default ``size`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1992,8 +2357,21 @@ async def fetchmany( rowfactory: Optional[Callable] = None, ) -> list: """ - Executes a query and returns up to the specified number of rows. After - the rows are fetched, the cursor is closed. + Executes a query and returns up to the specified number of rows. + + The default value for ``num_rows`` is the value of + :attr:`defaults.arraysize`. + + Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set + to the value of the explicit or default ``num_rows`` parameter, + allowing all rows to be fetched in one :ref:`round-trip ` + + Since only one fetch is performed for a query, consider adding a + ``FETCH NEXT`` clause to the statement to prevent the database + processing rows that will never be fetched, see :ref:`rowlimit`. + + This a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchmany()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: if num_rows is None: @@ -2013,8 +2391,18 @@ async def fetchone( ) -> Any: """ Executes a query and returns the first row of the result set if one - exists (or None if no rows exist). After the row is fetched the cursor - is closed. + exists (or *None* if no rows exist). + + Internally, this method's :attr:`Cursor.prefetchrows` and + :attr:`Cursor.arraysize` sizes will be set to *1*. + + Since only one fetch is performed for a query, consider adding a + ``WHERE`` condition or using a ``FETCH NEXT`` clause in the statement + to prevent the database processing rows that will never be fetched, see + :ref:`rowlimit`. + + This a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchone()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: cursor.prefetchrows = cursor.arraysize = 1 @@ -2024,8 +2412,9 @@ async def fetchone( async def gettype(self, name: str) -> DbObjectType: """ - Return a type object given its name. This can then be used to create - objects which can be bound to cursors created by this connection. + Returns a :ref:`type object ` given its name. This can + then be used to create objects which can be bound to cursors created by + this connection. """ self._verify_connected() obj_type_impl = await self._impl.get_type(self, name) @@ -2033,7 +2422,7 @@ async def gettype(self, name: str) -> DbObjectType: async def ping(self) -> None: """ - Pings the database to verify the connection is valid. + Pings the database to verify if the connection is valid. """ self._verify_connected() await self._impl.ping() @@ -2045,22 +2434,38 @@ async def resume_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Resumes an existing sessionless transaction using the given - transaction_id. - - Parameters: - transaction_id (str or bytes): A Transaction Identifier that - uniquely identifies the sessionless transaction to be - resumed. This parameter is mandatory. - timeout (int, optional): Timeout in seconds for the resumed - transaction. Must be a positive integer. Defaults to 60. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used to resume the - sessionless transaction. + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using + it. When ``defer_round_trip`` is set to *False*, the wait happens in + the ``resume_sessionless_transaction()`` call itself, and the function + blocks until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and + the wait occurs at the time of the next database operation instead. At + the start of the wait period, if the transaction is not in use by any + other connection, the resume happens immediately. If the transaction + remains in use by the other connection after the timeout period, the + error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. + These error messages are only thrown for non-RAC instances. For + information on using Oracle RAC, see :ref:`Sessionless Transactions + with Oracle RAC `. The default value is *60* + seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() if transaction_id is None: @@ -2089,16 +2494,19 @@ async def run_pipeline( continue_on_error: bool = False, ) -> list: """ - Runs all of the operations in the pipeline on the connection. If the - database is Oracle Database 23ai or higher, the operations will be - performed in a single round trip, subject to the following caveats: - - queries that contain LOBs require an additional round trip - - queries that contain DbObject values may require multiple round - trips - - queries that fetch all of the rows may require multiple round - trips - For all other databases, the operations will be performed in the same - way as they would be performed independently of the pipeline. + Runs all of the operations in the :ref:`pipeline ` and + returns a list of :ref:`PipelineOpResult Objects + `, each entry corresponding to an operation + executed in the pipeline. + + The ``continue_on_error`` parameter determines whether operations + should continue to run after an error has occurred. If this parameter + is set to *True*, then the :attr:`PipelineOpResult.error` attribute + will be populated with an :ref:`_Error ` instance which + identifies the error that occurred. If this parameter is set to + *False*, then an exception will be raised as soon as an error is + detected and all subsequent operations will be terminated. The default + value is *False*. """ self._verify_connected() results = [op._create_result() for op in pipeline.operations] @@ -2114,13 +2522,14 @@ async def run_pipeline( async def suspend_sessionless_transaction(self) -> None: """ - Suspends the currently active sessionless transaction. - - This temporarily detaches the transaction from the session, - allowing it to be resumed later using its transaction_id. + Suspends the currently active sessionless transaction immediately. - Returns: - None + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. The ``timeout`` previously + passed to :meth:`AsyncConnection.begin_sessionless_transaction()` + determines how long the transaction can stay suspended before it is + automatically rolled back. """ self._verify_connected() await self._impl.suspend_sessionless_transaction() @@ -2129,9 +2538,28 @@ async def tpc_begin( self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ - Begins a TPC (two-phase commit) transaction with the given transaction - id. This method should be called outside of a transaction (i.e. nothing - may have executed since the last commit() or rollback() was performed). + Begins a Two-Phase Commit (TPC) on a global transaction using the + specified transaction identifier (xid). + + The ``xid`` parameter should be an object returned by the + :meth:`xid()` method. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, + :data:`oracledb.TPC_BEGIN_PROMOTE`, or + :data:`oracledb.TPC_BEGIN_RESUME`. The default is + :data:`oracledb.TPC_BEGIN_NEW`. + + The ``timeout`` parameter is the number of seconds to wait for a + transaction to become available for resumption when + :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` + parameter. When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the + ``flags`` parameter, the ``timeout`` parameter indicates the number of + seconds the transaction can be inactive before it is automatically + terminated by the system. A transaction is inactive between the time it + is detached with :meth:`AsyncConnection.tpc_end()` and the time it is + resumed with :meth:`AsyncConnection.tpc_begin()`.The default is *0* + seconds. """ self._verify_connected() self._verify_xid(xid) @@ -2148,18 +2576,25 @@ async def tpc_commit( self, xid: Optional[Xid] = None, one_phase: bool = False ) -> None: """ - Prepare the global transaction for commit. Return a boolean indicating - if a transaction was actually prepared in order to avoid the error - ORA-24756 (transaction does not exist). + Commits a global transaction. When called with no arguments, this + method commits a transaction previously prepared with + :meth:`~AsyncConnection.tpc_begin()` and optionally prepared with + :meth:`~AsyncConnection.tpc_prepare()`. If + :meth:`~AsyncConnection.tpc_prepare()` is not called, a single phase + commit is performed. A transaction manager may choose to do this if + only a single resource is participating in the global transaction. - When called with no arguments, commits a transaction previously - prepared with tpc_prepare(). If tpc_prepare() is not called, a single - phase commit is performed. A transaction manager may choose to do this - if only a single resource is participating in the global transaction. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`~Connection.xid()` function. This form should be called + outside of a transaction and is intended for use in recovery. - When called with a transaction id, the database commits the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + The ``one_phase`` parameter is a boolean identifying whether to perform + a one-phase or two-phase commit. If ``one_phase`` parameter is *True*, + a single-phase commit is performed. The default value is *False*. This + parameter is only examined if a value is provided for the ``xid`` + parameter. Otherwise, the driver already knows whether + :meth:`tpc_prepare()` was called for the transaction and whether a + one-phase or two-phase commit is required. """ self._verify_connected() if xid is not None: @@ -2170,7 +2605,21 @@ async def tpc_end( self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ - Ends (detaches from) a TPC (two-phase commit) transaction. + Ends or suspends work on a global transaction. This function is only + intended for use by transaction managers. + + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`~Connection.xid()` function. If no xid parameter is passed, + then the transaction identifier used by the previous + :meth:`~Connection.tpc_begin()` is used. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. + The default is :data:`oracledb.TPC_END_NORMAL`. + + If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction + may be resumed later by calling :meth:`AsyncConnection.tpc_begin()` + with the flag :data:`oracledb.TPC_BEGIN_RESUME`. """ self._verify_connected() if xid is not None: @@ -2181,7 +2630,12 @@ async def tpc_end( async def tpc_forget(self, xid: Xid) -> None: """ - Forgets a TPC (two-phase commit) transaction. + Causes the database to forget a heuristically completed TPC + transaction. This function is only intended to be called by + transaction managers. + + The ``xid`` parameter is mandatory and should be an object should be + returned by the :meth:`xid()` function. """ self._verify_connected() self._verify_xid(xid) @@ -2189,13 +2643,18 @@ async def tpc_forget(self, xid: Xid) -> None: async def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: """ - Prepares a global transaction for commit. After calling this function, - no further activity should take place on this connection until either - tpc_commit() or tpc_rollback() have been called. + Prepares a two-phase transaction for commit. After this function is + called, no further activity should take place on this connection until + either :meth:`tpc_commit()` or :meth:`tpc_rollback()` have been called. + + Returns a boolean indicating whether a commit is needed or not. If you + attempt to commit when not needed, then it results in the error + ``ORA-24756: transaction does not exist``. - A boolean is returned indicating whether a commit is needed or not. If - a commit is performed when one is not needed the error ORA-24756: - transaction does not exist is raised. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If an ``xid`` parameter is not passed, then + the transaction identifier used by the previous :meth:`tpc_begin()` is + used. """ self._verify_connected() if xid is not None: @@ -2204,11 +2663,13 @@ async def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: async def tpc_recover(self) -> list: """ - Returns a list of pending transaction ids suitable for use with - tpc_commit() or tpc_rollback(). + Returns a list of pending transaction identifiers that require + recovery. Objects of type ``Xid`` (as returned by the + :meth:`~Connection.xid()` function) are returned and these can be + passed to :meth:`tpc_commit()` or :meth:`tpc_rollback()` as needed. - This function requires select privilege on the view - DBA_PENDING_TRANSACTIONS. + This function queries the view ``DBA_PENDING_TRANSACTIONS`` and + requires ``SELECT`` privilege on that view. """ with self.cursor() as cursor: await cursor.execute( @@ -2224,12 +2685,16 @@ async def tpc_recover(self) -> list: async def tpc_rollback(self, xid: Optional[Xid] = None) -> None: """ - When called with no arguments, rolls back the transaction previously - started with tpc_begin(). + Rolls back a global transaction. + + If an ``xid`` parameter is not passed, then it rolls back the + transaction that was previously started with + :meth:`~AsyncConnection.tpc_begin()`. - When called with a transaction id, the database rolls back the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + If an ``xid`` parameter is passed, then an object should be returned by + :meth:`~Connection.xid()` and the specified transaction is rolled back. + This form should be called outside of a transaction and is intended for + use in recovery. """ self._verify_connected() if xid is not None: diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index eb11d227..e0eea0f0 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -49,7 +49,7 @@ def __init__( connection: "connection_module.Connection", scrollable: bool = False, ) -> None: - self.connection = connection + self._connection = connection self._impl = connection._impl.create_cursor_impl(scrollable) def __del__(self): @@ -57,10 +57,17 @@ def __del__(self): self._impl.close(in_del=True) def __enter__(self): + """ + The entry point for the cursor as a context manager. It returns itself. + """ self._verify_open() return self def __exit__(self, exc_type, exc_value, exc_tb): + """ + The exit point for the cursor as a context manager. It closes the + cursor. + """ self._verify_open() self._impl.close(in_del=True) self._impl = None @@ -158,17 +165,22 @@ def _verify_open(self) -> None: @property def arraysize(self) -> int: """ - Tunes the number of rows fetched and buffered by internal calls to the - database when fetching rows from SELECT statements and REF CURSORS. The - value can drastically affect the performance of a query since it - directly affects the number of network round trips between Python and - the database. For methods like fetchone() and fetchall() it does not - change how many rows are returned to the application. For fetchmany() + This read-write attribute can be used to tune the number of rows + internally fetched and buffered by internal calls to the database when + fetching rows from SELECT statements and REF CURSORS. + + The value of ``arraysize`` can drastically affect the performance of a + query since it directly affects the number of network round trips + between Python and the database. For methods like :meth:`fetchone()` + and :meth:`fetchall()` it affects internal behavior but does not change + how many rows are returned to the application. For :meth:`fetchmany()` it is the default number of rows to fetch. - Due to the performance benefits, the default value is 100 instead of - the 1 that the DB API recommends. This value means that 100 rows are - fetched by each internal call to the database. + The attribute is only used for tuning row and SODA document fetches + from the database. It does not affect data inserts. + + Due to the performance benefits, the default ``arraysize`` is *100* + instead of the *1* that the Python DB API recommends. """ self._verify_open() return self._impl.arraysize @@ -187,20 +199,15 @@ def arrayvar( size: int = 0, ) -> Var: """ - Create an array variable associated with the cursor of the given type - and size and return a variable object. The value is either an integer - specifying the number of elements to allocate or it is a list and the - number of elements allocated is drawn from the size of the list. If the - value is a list, the variable is also set with the contents of the - list. If the size is not specified and the type is a string or binary, - 4000 bytes is allocated. This is needed for passing arrays to PL/SQL - (in cases where the list might be empty and the type cannot be - determined automatically) or returning arrays from PL/SQL. - - Array variables can only be used for PL/SQL associative arrays with - contiguous keys. For PL/SQL associative arrays with sparsely populated - keys or for varrays and nested tables, the DbObject approach needs to - be used instead. + Creates an array variable associated with the cursor of the given type + and size and returns a :ref:`variable object `. The value is + either an integer specifying the number of elements to allocate or it + is a list and the number of elements allocated is drawn from the size + of the list. If the value is a list, the variable is also set with the + contents of the list. If the size is not specified and the type is a + string or binary, 4000 bytes is allocated. This is needed for passing + arrays to PL/SQL (in cases where the list might be empty and the type + cannot be determined automatically) or returning arrays from PL/SQL. """ self._verify_open() if isinstance(value, list): @@ -222,7 +229,7 @@ def arrayvar( def bindnames(self) -> list: """ - Return the list of bind variable names bound to the statement. Note + Returns the list of bind variable names bound to the statement. Note that a statement must have been prepared first. """ self._verify_open() @@ -233,17 +240,18 @@ def bindnames(self) -> list: @property def bindvars(self) -> list: """ - Returns the bind variables used for the last execute. The value will be - either a list or a dictionary depending on whether binding was done by - position or name. Care should be taken when referencing this attribute. - In particular, elements should not be removed or replaced. + This read-only attribute provides the bind variables used for the last + statement that was executed on the cursor. The value will be either a + list or a dictionary, depending on whether binding was done by position + or name. Care should be taken when referencing this attribute. In + particular, elements should not be removed or replaced. """ self._verify_open() return self._impl.get_bind_vars() def close(self) -> None: """ - Close the cursor now, rather than whenever __del__ is called. The + Closes the cursor now, rather than whenever ``__del__`` is called. The cursor will be unusable from this point forward; an Error exception will be raised if any operation is attempted with the cursor. """ @@ -252,13 +260,21 @@ def close(self) -> None: self._impl = None @property - def description(self) -> tuple: + def connection(self) -> "connection_module.Connection": + """ + This read-only attribute returns a reference to the connection object + on which the cursor was created. """ - Returns a sequence of 7-item sequences. Each of these sequences - contains information describing one result column: (name, type, - display_size, internal_size, precision, scale, null_ok). This - attribute will be None for operations that do not return rows or if the - cursor has not had an operation invoked via the execute() method yet. + return self._connection + + @property + def description(self) -> FetchInfo: + """ + This read-only attribute contains information about the columns used in + a query. It is a sequence of :ref:`FetchInfo ` objects, + one per column. This attribute will be *None* for statements that are + not SELECT or WITH statements, or if the cursor has not had + :meth:`execute()` invoked yet. """ self._verify_open() if self._impl.is_query(self): @@ -267,43 +283,50 @@ def description(self) -> tuple: @property def fetchvars(self) -> list: """ - Specifies the list of variables created for the last query that was - executed on the cursor. Care should be taken when referencing this - attribute. In particular, elements should not be removed or replaced. + This read-only attribute specifies the list of variables created for + the last SELECT query that was executed on the cursor. Care should be + taken when referencing this attribute. In particular, elements should + not be removed or replaced. """ self._verify_open() return self._impl.get_fetch_vars() def getarraydmlrowcounts(self) -> list: """ - Return the DML row counts after a call to executemany() with - arraydmlrowcounts enabled. This will return a list of integers + Retrieves the DML row counts after a call to :meth:`executemany()` with + ``arraydmlrowcounts`` enabled. This will return a list of integers corresponding to the number of rows affected by the DML statement for - each element of the array passed to executemany(). + each element of the array passed to :meth:`executemany()`. + + This method is only available for Oracle Database 12.1 and later. """ self._verify_open() return self._impl.get_array_dml_row_counts() def getbatcherrors(self) -> list: """ - Return the exceptions that took place after a call to executemany() - with batcherrors enabled. This will return a list of Error objects, one - error for each iteration that failed. The offset can be determined by - looking at the offset attribute of the error object. + Retrieves the exceptions that took place after a call to + :meth:`executemany()` with ``batcherrors`` enabled. This will + return a list of Error objects, one error for each iteration that + failed. The offset can be determined by looking at the offset attribute + of the error object. """ self._verify_open() return self._impl.get_batch_errors() def getimplicitresults(self) -> list: """ - Return a list of cursors which correspond to implicit results made + Returns a list of cursors which correspond to implicit results made available from a PL/SQL block or procedure without the use of OUT ref cursor parameters. The PL/SQL block or procedure opens the cursors and marks them for return to the client using the procedure - dbms_sql.return_result. Cursors returned in this fashion should not be - closed. They will be closed automatically by the parent cursor when it - is closed. Closing the parent cursor will invalidate the cursors - returned by this method. + dbms_sql.return_result. In python-oracledb Thick mode, closing the + parent cursor will result in the automatic closure of the implicit + result set cursors. See :ref:`implicitresults`. + + This method is only available for Oracle Database 12.1 (or later). For + python-oracledb :ref:`Thick ` mode, Oracle Client 12.1 + (or later) is additionally required. """ self._verify_open() return self._impl.get_implicit_results(self.connection) @@ -311,12 +334,14 @@ def getimplicitresults(self) -> list: @property def inputtypehandler(self) -> Callable: """ - Specifies a method called for each value that is bound to a statement - executed on this cursor. The method signature is handler(cursor, value, - arraysize) and the return value is expected to be a variable object or - None in which case a default variable object will be created. If this - attribute is None, the default behavior will take place for all values - bound to statements. + This read-write attribute specifies a method called for each value that + is bound to a statement executed on the cursor and overrides the + attribute with the same name on the connection if specified. The method + signature is handler(cursor, value, arraysize) and the return value is + expected to be a variable object or *None* in which case a default + variable object will be created. If this attribute is *None*, the + default behavior will take place for all values bound to the + statements. """ self._verify_open() return self._impl.inputtypehandler @@ -329,9 +354,9 @@ def inputtypehandler(self, value: Callable) -> None: @property def lastrowid(self) -> str: """ - Returns the rowid of the last row modified by the cursor. If no row was - modified by the last operation performed on the cursor, the value None - is returned. + This read-only attribute returns the rowid of the last row modified by + the cursor. If no row was modified by the last operation performed on + the cursor, the value *None* is returned. """ self._verify_open() return self._impl.get_lastrowid() @@ -339,12 +364,13 @@ def lastrowid(self) -> str: @property def outputtypehandler(self) -> Callable: """ - Specifies a method called for each column that is going to be fetched - from this cursor. The method signature is handler(cursor, name, - defaultType, length, precision, scale) and the return value is expected - to be a variable object or None in which case a default variable object - will be created. If this attribute is None, the default behavior will - take place for all columns fetched from this cursor. + This read-write attribute specifies a method called for each column + that is to be fetched from this cursor. The method signature is + handler(cursor, metadata) and the return value is expected to be a + :ref:`variable object ` or *None* in which case a default + variable object will be created. If this attribute is *None*, then the + default behavior will take place for all columns fetched from this + cursor. """ self._verify_open() return self._impl.outputtypehandler @@ -357,11 +383,22 @@ def outputtypehandler(self, value: Callable) -> None: @property def prefetchrows(self) -> int: """ - Used to tune the number of rows fetched when a SELECT statement is - executed. This value can reduce the number of round-trips to the - database that are required to fetch rows but at the cost of additional - memory. Setting this value to 0 can be useful when the timing of - fetches must be explicitly controlled. + This read-write attribute can be used to tune the number of rows that + python-oracledb initially fetches from Oracle Database when a SELECT + query is executed. The value can improve performance by reducing the + number of round-trips to the database. The attribute does not affect + data insertion. + + In python-oracledb Thin mode, prefetching can reuse the + :attr:`arraysize` buffer. However in Thick mode, extra memory is + required. + + Setting this value to *0* can be useful when the timing of fetches must + be explicitly controlled. + + Queries that return :ref:`LOB ` objects and similar types do + not support prefetching. The ``prefetchrows`` attribute is ignored in + queries that involve these types. """ self._verify_open() return self._impl.prefetchrows @@ -375,15 +412,19 @@ def prepare( self, statement: str, tag: str = None, cache_statement: bool = True ) -> None: """ - This can be used before a call to execute() to define the statement - that will be executed. When this is done, the prepare phase will not be - performed when the call to execute() is made with None or the same - string object as the statement. If the tag parameter is specified and - the cache_statement parameter is True, the statement will be returned - to the statement cache with the given tag. If the cache_statement - parameter is False, the statement will be removed from the statement - cache (if it was found there) or will simply not be cached. See the - Oracle documentation for more information about the statement cache. + This can be used before a call to :meth:`execute()` or + :meth:`executemany()` to define the statement that will be + executed. When this is done, the prepare phase will not be performed + when the call to :meth:`execute()` or :meth:`executemany()` is made + with *None* or the same string object as the statement. + + If the ``tag`` parameter is specified and the ``cache_statement`` + parameter is *True*, the statement will be returned to the statement + cache with the given tag. + + If the ``cache_statement`` parameter is *False*, the statement will be + removed from the statement cache (if it was found there) or will simply + not be cached. """ self._verify_open() self._prepare(statement, tag, cache_statement) @@ -392,10 +433,10 @@ def prepare( def rowcount(self) -> int: """ This read-only attribute specifies the number of rows that have - currently been fetched from the cursor (for select statements), that - have been affected by the operation (for insert, update, delete and - merge statements), or the number of successful executions of the - statement (for PL/SQL statements). + currently been fetched from the cursor (for select statements) or that + have been affected by the operation (for insert, update, delete, and + merge statements). For all other statements the value is always *0*. If + the cursor or connection is closed, the value returned is *-1*. """ if self._impl is not None and self.connection._impl is not None: return self._impl.rowcount @@ -404,10 +445,11 @@ def rowcount(self) -> int: @property def rowfactory(self) -> Callable: """ - Specifies a method to call for each row that is retrieved from the - database. Ordinarily a tuple is returned for each row but if this - attribute is set, the method is called with the tuple that would - normally be returned, and the result of the method is returned instead. + This read-write attribute specifies a method to call for each row that + is retrieved from the database. Ordinarily, a tuple is returned for + each row but if this attribute is set, the method is called with the + tuple that would normally be returned, and the result of the method is + returned instead. """ self._verify_open() return self._impl.rowfactory @@ -420,11 +462,11 @@ def rowfactory(self, value: Callable) -> None: @property def scrollable(self) -> bool: """ - Specifies whether the cursor can be scrolled or not. By default, - cursors are not scrollable, as the server resources and response times - are greater than for nonscrollable cursors. This attribute is checked - and the corresponding mode set in Oracle when calling the method - execute(). + This read-write boolean attribute specifies whether the cursor can be + scrolled or not. By default, cursors are not scrollable, as the server + resources and response times are greater than nonscrollable cursors. + This attribute is checked and the corresponding mode set in Oracle when + calling the method :meth:`execute()`. """ self._verify_open() return self._impl.scrollable @@ -436,14 +478,28 @@ def scrollable(self, value: bool) -> None: def setinputsizes(self, *args: Any, **kwargs: Any) -> Union[list, dict]: """ - This can be used before a call to execute(), callfunc() or callproc() - to predefine memory areas for the operation’s parameters. Each - parameter should be a type object corresponding to the input that will - be used or it should be an integer specifying the maximum length of a - string parameter. Use keyword parameters when binding by name and - positional parameters when binding by position. The singleton None can - be used as a parameter when using positional parameters to indicate - that no space should be reserved for that position. + This can be used before calls to :meth:`execute()` or + :meth:`executemany()` to predefine memory areas used for + :ref:`bind variables `. Each parameter should be a type object + corresponding to the data that will be used for a bind variable + placeholder in the SQL or PL/SQL statement. Alternatively, it can be an + integer specifying the maximum length of a string bind variable value. + + Use keyword parameters when :ref:`binding by name `. Use + positional parameters when :ref:`binding by position `. + The parameter value can be *None* to indicate that python-oracledb + should determine the required space from the data value provided. + + The parameters or keyword names correspond to the bind variable + placeholders used in the SQL or PL/SQL statement. Note this means that + for use with :meth:`executemany()` it does not correspond to the number + of bind value mappings or sequences being passed. + + When repeated calls to :meth:`execute()` or :meth:`executemany()` are + made binding different string data lengths, using + :meth:`setinputsizes()` can help reduce the database's SQL "version + count" for the statement. See + :ref:`Reducing the SQL Version Count `. """ if args and kwargs: errors._raise_err(errors.ERR_ARGS_AND_KEYWORD_ARGS) @@ -454,15 +510,18 @@ def setinputsizes(self, *args: Any, **kwargs: Any) -> Union[list, dict]: def setoutputsize(self, size: int, column: int = 0) -> None: """ - Sets a column buffer size for fetches of long columns. However - python-oracledb does not require it so this method does nothing. + This method does nothing and is retained solely for compatibility with + the DB API. Python-oracledb automatically allocates as much space as + needed to fetch LONG and LONG RAW columns, and also to fetch CLOB as + string and BLOB as bytes. """ pass @property def statement(self) -> Union[str, None]: """ - Returns the statement associated with the cursor, if one is present. + This read-only attribute provides the string object that was previously + prepared with :meth:`prepare()` or executed with :meth:`execute()`. """ if self._impl is not None: return self._impl.statement @@ -482,57 +541,70 @@ def var( encodingErrors: str = None, ) -> "Var": """ - Create a variable with the specified characteristics. This method was - designed for use with PL/SQL in/out variables where the length or type - cannot be determined automatically from the Python object passed in or - for use in input and output type handlers defined on cursors or - connections. - - The typ parameter specifies the type of data that should be stored - in the variable. This should be one of the database type constants, DB - API constants, an object type returned from the method - Connection.gettype() or one of the following Python types: - - Python Type Database Type - bool DB_TYPE_BOOLEAN - bytes DB_TYPE_RAW - datetime.date DB_TYPE_DATE - datetime.datetime DB_TYPE_DATE - datetime.timedelta DB_TYPE_INTERVAL_DS - decimal.Decimal DB_TYPE_NUMBER - float DB_TYPE_NUMBER - int DB_TYPE_NUMBER - str DB_TYPE_VARCHAR - - The size parameter specifies the length of string and raw variables and - is ignored in all other cases. If not specified for string and raw - variables, the value 4000 is used. - - The arraysize parameter specifies the number of elements the variable - will have. If not specified the bind array size (usually 1) is used. - When a variable is created in an output type handler this parameter - should be set to the cursor’s array size. - - The inconverter and outconverter parameters specify methods used for - converting values to/from the database. More information can be found - in the section on variable objects. - - The typename parameter specifies the name of a SQL object type and must - be specified when using type DB_TYPE_OBJECT unless the type object - was passed directly as the first parameter. - - The encoding_errors parameter specifies what should happen when + Creates a :ref:`variable object ` with the specified + characteristics. This method can be used for binding to PL/SQL IN and + OUT parameters where the length or type cannot be determined + automatically from the Python variable being bound. It can also be used + in :ref:`input ` and :ref:`output + ` type handlers. + + The ``typ`` parameter specifies the type of data that should be stored + in the variable. This should be one of the :ref:`database type + constants `, :ref:`DB API constants `, an object type + returned from the method :meth:`Connection.gettype()` or one of the + following Python types: + + - bool (uses :attr:`oracledb.DB_TYPE_BOOLEAN`) + - bytes (uses :attr:`oracledb.DB_TYPE_RAW`) + - datetime.date (uses :attr:`oracledb.DB_TYPE_DATE`) + - datetime.datetime (uses :attr:`oracledb.DB_TYPE_DATE`) + - datetime.timedelta (uses :attr:`oracledb.DB_TYPE_INTERVAL_DS`) + - decimal.Decimal (uses :attr:`oracledb.DB_TYPE_NUMBER`) + - float (uses :attr:`oracledb.DB_TYPE_NUMBER`) + - int (uses :attr:`oracledb.DB_TYPE_NUMBER`) + - str (uses :attr:`oracledb.DB_TYPE_VARCHAR`) + + The ``size`` parameter specifies the length of string and raw variables + and is ignored in all other cases. If not specified for string and raw + variables, the value *4000* is used. + + The ``arraysize`` parameter specifies the number of elements the + variable will have. If not specified the bind array size (usually *1*) + is used. When a variable is created in an output type handler this + parameter should be set to the cursor's array size. + + The ``inconverter`` and ``outconverter`` parameters specify methods + used for converting values to/from the database. More information can + be found in the section on :ref:`variable objects`. + + The ``typename`` parameter specifies the name of a SQL object type and + must be specified when using type :data:`oracledb.OBJECT` unless the + type object was passed directly as the first parameter. + + The ``encoding_errors`` parameter specifies what should happen when decoding byte strings fetched from the database into strings. It should - be one of the values noted in the builtin decode function. - - The bypass_decode parameter, if specified, should be passed as a - boolean value. Passing a True value causes values of database types - DB_TYPE_VARCHAR, DB_TYPE_CHAR, DB_TYPE_NVARCHAR, DB_TYPE_NCHAR and - DB_TYPE_LONG to be returned as bytes instead of str, meaning that - oracledb doesn't do any decoding. - - The convert_nulls parameter specifies whether the outconverter should - be called when null values are fetched from the database. + be one of the values noted in the builtin `decode + `__ + function. + + The ``bypass_decode`` parameter, if specified, should be passed as a + boolean value. Passing a *True* value causes values of database types + :data:`~oracledb.DB_TYPE_VARCHAR`, :data:`~oracledb.DB_TYPE_CHAR`, + :data:`~oracledb.DB_TYPE_NVARCHAR`, :data:`~oracledb.DB_TYPE_NCHAR` and + :data:`~oracledb.DB_TYPE_LONG` to be returned as bytes instead of str, + meaning that python-oracledb does not do any decoding. See + :ref:`Fetching raw data ` for more information. + + The ``convert_nulls`` parameter, if specified, should be passed as a + boolean value. Passing the value *True* causes the ``outconverter`` to + be called when a null value is fetched from the database; otherwise, + the ``outconverter`` is only called when non-null values are fetched + from the database. + + For consistency and compliance with the PEP 8 naming style, the + parameter ``encodingErrors`` was renamed to ``encoding_errors``. The + old name will continue to work as a keyword parameter for a period of + time. """ self._verify_open() if typename is not None: @@ -562,10 +634,13 @@ def var( @property def warning(self) -> Union[errors._Error, None]: """ - Returns any warning that was generated during the last call to - execute() or executemany(), or the value None if no warning was - generated. This value will be cleared on the next call to execute() or - executemany(). + This read-only attribute provides an + :ref:`oracledb._Error` object giving information about any + database warnings (such as PL/SQL compilation warnings) that were + generated during the last call to :meth:`execute()` or + :meth:`executemany()`. This value is automatically cleared on + the next call to :meth:`execute()` or :meth:`executemany()`. If no + warning was generated the value *None* is returned. """ self._verify_open() return self._impl.warning @@ -575,6 +650,9 @@ class Cursor(BaseCursor): __module__ = MODULE_NAME def __iter__(self): + """ + Returns the cursor itself to be used as an iterator. + """ return self def __next__(self): @@ -612,11 +690,21 @@ def callfunc( keywordParameters: Optional[dict] = None, ) -> Any: """ - Call a function with the given name. The return type is specified in - the same notation as is required by setinputsizes(). The sequence of - parameters must contain one entry for each parameter that the function - expects. Any keyword parameters will be included after the positional - parameters. The result of the call is the return value of the function. + Calls a PL/SQL function with the given name and returns its value. + + The ``return_type`` parameter is expected to be a Python type, one of + the :ref:`oracledb types ` or an + :ref:`Object Type `. + + The sequence of parameters must contain one entry for each parameter + that the PL/SQL function expects. Any keyword parameters will be + included after the positional parameters. + + Use :meth:`var()` to define any OUT or IN OUT parameters, if necessary. + + For consistency and compliance with the PEP 8 naming style, the + parameter ``keywordParameters`` was renamed to ``keyword_parameters``. + The old name will continue to work for a period of time. """ var = self.var(return_type) if keywordParameters is not None: @@ -639,13 +727,24 @@ def callproc( keywordParameters: Optional[dict] = None, ) -> list: """ - Call a procedure with the given name. The sequence of parameters must - contain one entry for each parameter that the procedure expects. The - result of the call is a modified copy of the input sequence. Input - parameters are left untouched; output and input/output parameters are - replaced with possibly new values. Keyword parameters will be included - after the positional parameters and are not returned as part of the - output sequence. + Calls a PL/SQL procedure with the given name. + + The sequence of parameters must contain one entry for each parameter + that the procedure expects. The result of the call is a modified copy + of the input sequence. Input parameters are left untouched; output and + input/output parameters are replaced with possibly new values. Keyword + parameters will be included after the positional parameters and are not + returned as part of the output sequence. + + Use :meth:`var()` to define any OUT or IN OUT parameters if necessary. + + No query result set is returned by this method. Instead, use + :ref:`REF CURSOR ` parameters or + :ref:`Implicit Results `. + + For consistency and compliance with the PEP 8 naming style, the + parameter ``keywordParameters`` was renamed to ``keyword_parameters``. + The old name will continue to work for a period of time. """ if keywordParameters is not None: if keyword_parameters is not None: @@ -670,7 +769,7 @@ def execute( **keyword_parameters: Any, ) -> Any: """ - Execute a statement against the database. + Executes a statement against the database. See :ref:`sqlexecution`. Parameters may be passed as a dictionary or sequence or as keyword parameters. If the parameters are a dictionary, the values will be @@ -685,28 +784,29 @@ def execute( maps to the bind variable name used by the statement and the value maps to the Python value you wish bound to that bind variable. - A reference to the statement will be retained by the cursor. If None or - the same string object is passed in again, the cursor will execute that - statement again without performing a prepare or rebinding and - redefining. This is most effective for algorithms where the same + A reference to the statement will be retained by the cursor. If *None* + or the same string object is passed in again, the cursor will execute + that statement again without performing a prepare or rebinding and + redefining. This is most effective for algorithms where the same statement is used, but different parameters are bound to it (many times). Note that parameters that are not passed in during subsequent executions will retain the value passed in during the last execution that contained them. - For maximum efficiency when reusing an statement, it is best to use the - setinputsizes() method to specify the parameter types and sizes ahead - of time; in particular, None is assumed to be a string of length 1 so - any values that are later bound as numbers or dates will raise a - TypeError exception. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active + sessionless transaction will be suspended when ``execute()`` completes + successfully. See :ref:`suspendtxns`. - If the statement is a query, the cursor is returned as a convenience to - the caller (so it can be used directly as an iterator over the rows in - the cursor); otherwise, None is returned. + For maximum efficiency when reusing a statement, it is best to use the + :meth:`Cursor.setinputsizes()` method to specify the parameter types + and sizes ahead of time; in particular, *None* is assumed to be a + string of length 1 so any values that are later bound as numbers or + dates will raise a TypeError exception. - suspend_on_success parameter is specific to sessionless transactions. - When set to True, the active sessionless transaction will be suspended - after the successful execution of the current statement. + If the statement is a SELECT query, the cursor is returned as a + convenience to the caller (so it can be used directly as an iterator + over the rows in the cursor); otherwise, *None* is returned. """ self._prepare_for_execute(statement, parameters, keyword_parameters) self._impl.suspend_on_success = suspend_on_success @@ -724,46 +824,55 @@ def executemany( suspend_on_success: bool = False, ) -> None: """ - Prepare a statement for execution against a database and then execute - it against all parameter mappings or sequences found in the sequence - parameters. - - The statement is managed in the same way as the execute() method - manages it. If the size of the buffers allocated for any of the - parameters exceeds 2 GB and you are using the thick implementation, you - will receive the error “DPI-1015: array size of is too large”, - where varies with the size of each element being allocated in the - buffer. If you receive this error, decrease the number of elements in - the sequence parameters. - - If there are no parameters, or parameters have previously been bound, - the number of iterations can be specified as an integer instead of - needing to provide a list of empty mappings or sequences. - - A data frame can also be supplied as the parameters, in which case the - Arrow arrays found within it are extracted and used as the parameters. - - When true, the batcherrors parameter enables batch error support within - Oracle and ensures that the call succeeds even if an exception takes - place in one or more of the sequence of parameters. The errors can then - be retrieved using getbatcherrors(). - - When true, the arraydmlrowcounts parameter enables DML row counts to be - retrieved from Oracle after the method has completed. The row counts - can then be retrieved using getarraydmlrowcounts(). - - Both the batcherrors parameter and the arraydmlrowcounts parameter can - only be true when executing an insert, update, delete or merge - statement; in all other cases an error will be raised. - - For maximum efficiency, it is best to use the setinputsizes() method to - specify the parameter types and sizes ahead of time; in particular, - None is assumed to be a string of length 1 so any values that are later - bound as numbers or dates will raise a TypeError exception. - - suspend_on_success parameter is specific to sessionless transactions. - When set to True, the active sessionless transaction will be suspended - after the successful execution of the current statement. + Executes a SQL statement once using all bind value mappings or + sequences found in the sequence parameters. This can be used to insert, + update, or delete multiple rows in a table with a single + python-oracledb call. It can also invoke a PL/SQL procedure multiple + times. See :ref:`batchstmnt`. + + The ``statement`` parameter is managed in the same way as the + :meth:`execute()` method manages it. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one bind variable placeholder in ``statement``. It can + also be a list of dictionaries, where the keys match the bind variable + placeholder names in ``statement``. If there are no bind values, or + values have previously been bound, the ``parameters`` value can be an + integer specifying the number of iterations. The ``parameters`` + parameter can also be a :ref:`DataFrame `, or a + third-party data frame that supports the `Apache Arrow PyCapsule + `__ Interface. + + In python-oracledb Thick mode, if the size of the buffers allocated for + any of the parameters exceeds 2 GB, you will receive the error + ``DPI-1015: array size of is too large``. If you receive this + error, decrease the number of rows being inserted. + + When *True*, the ``batcherrors`` parameter enables batch error support + within Oracle Database and ensures that the call succeeds even if an + exception takes place in one or more of the sequence of bind values. + The errors can then be retrieved using :meth:`getbatcherrors()`. + + When *True*, the ``arraydmlrowcounts`` parameter enables DML row counts + to be retrieved from Oracle after the method has completed. The row + counts can then be retrieved using + :meth:`getarraydmlrowcounts()`. + + Both the ``batcherrors`` parameter and the ``arraydmlrowcounts`` + parameter can only be *True* when executing an insert, update, delete, + or merge statement; in all other cases an error will be raised. + + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active + sessionless transaction will be suspended when ``executemany()`` + completes successfully. See :ref:`suspendtxns`. + + For maximum efficiency, it is best to use the :meth:`setinputsizes()` + method to specify the bind value types and sizes. In particular, if the + type is not explicitly specified, the value *None* is assumed to be a + string of length 1 so any values that are later bound as numbers or + dates will raise a TypeError exception. """ self._verify_open() num_execs = self._impl._prepare_for_executemany( @@ -777,14 +886,19 @@ def executemany( def fetchall(self) -> list: """ - Fetch all (remaining) rows of a query result, returning them as a list - of tuples. An empty list is returned if no more rows are available. - Note that the cursor’s arraysize attribute can affect the performance - of this operation, as internally reads from the database are done in - batches corresponding to the arraysize. + Fetches all (remaining) rows of a SELECT query result, returning them + as a list of tuples. An empty list is returned if no more rows are + available. An exception is raised if the previous call to + :meth:`execute()` did not produce any result set or no call was issued + yet. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + Note that the cursor's :attr:`~Cursor.arraysize` attribute can affect + the performance of this operation, as internally data is fetched in + batches of that size from the database. See :ref:`Tuning Fetch + Performance `. + + An exception is raised if the previous call to :meth:`execute()` did + not produce any result set or no call was issued yet. """ self._verify_fetch() result = [] @@ -800,20 +914,19 @@ def fetchmany( self, size: Optional[int] = None, numRows: Optional[int] = None ) -> list: """ - Fetch the next set of rows of a query result, returning a list of - tuples. An empty list is returned if no more rows are available. Note - that the cursor’s arraysize attribute can affect the performance of - this operation. + Fetches the next set of rows of a SELECT query result, returning a list + of tuples. An empty list is returned if no more rows are available. + Note that the cursor's :attr:`arraysize` attribute can affect the + performance of this operation. - The number of rows to fetch is specified by the parameter (the second - parameter is retained for backwards compatibility and should not be - used). If it is not given, the cursor’s arraysize attribute determines + The number of rows to fetch is specified by the ``size`` parameter. If + it is not given, the cursor's :attr:`arraysize` attribute determines the number of rows to be fetched. If the number of rows available to be fetched is fewer than the amount requested, fewer rows will be returned. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + An exception is raised if the previous call to :meth:`execute()` did + not produce any result set or no call was issued yet. """ self._verify_fetch() if size is None: @@ -838,11 +951,14 @@ def fetchmany( def fetchone(self) -> Any: """ - Fetch the next row of a query result set, returning a single tuple or - None when no more data is available. + Fetches the next row of a SELECT query result set, returning a single + tuple or *None* when no more data is available. An exception is raised + if the previous call to :meth:`execute()` did not produce any result + set or no call was issued yet. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + When ``fetchone()`` is used to iterate over a result set, the cursor’s + :attr:`arraysize` attribute can affect performance, as internally data + is fetched in batches of that size from Oracle Database. """ self._verify_fetch() return self._impl.fetch_next_row(self) @@ -850,8 +966,8 @@ def fetchone(self) -> Any: def parse(self, statement: str) -> None: """ This can be used to parse a statement without actually executing it - (this step is done automatically by Oracle when a statement is - executed). + (parsing step is done automatically by Oracle when a statement is + :meth:`executed `). """ self._verify_open() self._prepare(statement) @@ -859,16 +975,16 @@ def parse(self, statement: str) -> None: def scroll(self, value: int = 0, mode: str = "relative") -> None: """ - Scroll the cursor in the result set to a new position according to the + Scrolls the cursor in the result set to a new position according to the mode. - If mode is "relative" (the default value), the value is taken as an - offset to the current position in the result set. If set to "absolute", - value states an absolute target position. If set to "first", the cursor - is positioned at the first row and if set to "last", the cursor is set + If mode is *relative* (the default value), the value is taken as an + offset to the current position in the result set. If set to *absolute*, + value states an absolute target position. If set to *first*, the cursor + is positioned at the first row and if set to *last*, the cursor is set to the last row in the result set. - An error is raised if the mode is "relative" or "absolute" and the + An error is raised if the mode is *relative* or *absolute* and the scroll operation would position the cursor outside of the result set. """ self._verify_open() @@ -879,15 +995,25 @@ class AsyncCursor(BaseCursor): __module__ = MODULE_NAME async def __aenter__(self): + """ + The entry point for the cursor as a context manager. It returns itself. + """ self._verify_open() return self async def __aexit__(self, *exc_info): + """ + The exit point for the cursor as a context manager. It closes the + cursor. + """ self._verify_open() self._impl.close(in_del=True) self._impl = None def __aiter__(self): + """ + Returns the cursor itself to be used as an asynchronous iterator. + """ return self async def __anext__(self): @@ -905,11 +1031,17 @@ async def callfunc( keyword_parameters: Optional[dict] = None, ) -> Any: """ - Call a function with the given name. The return type is specified in - the same notation as is required by setinputsizes(). The sequence of - parameters must contain one entry for each parameter that the function - expects. Any keyword parameters will be included after the positional - parameters. The result of the call is the return value of the function. + Calls a PL/SQL function with the given name and returns its value. + + The ``return_type`` parameter is expected to be a Python type, one of + the :ref:`oracledb types ` or an :ref:`Object Type + `. + + The sequence of parameters must contain one entry for each parameter + that the PL/SQL function expects. Any keyword parameters will be + included after the positional parameters. + + Use :meth:`var()` to define any OUT or IN OUT parameters, if necessary. """ var = self.var(return_type) await self._call(name, parameters, keyword_parameters, var) @@ -922,13 +1054,20 @@ async def callproc( keyword_parameters: Optional[dict] = None, ) -> list: """ - Call a procedure with the given name. The sequence of parameters must - contain one entry for each parameter that the procedure expects. The - result of the call is a modified copy of the input sequence. Input - parameters are left untouched; output and input/output parameters are - replaced with possibly new values. Keyword parameters will be included - after the positional parameters and are not returned as part of the - output sequence. + Calls a PL/SQL procedure with the given name. + + The sequence of parameters must contain one entry for each parameter + that the procedure expects. The result of the call is a modified copy + of the input sequence. Input parameters are left untouched; output and + input/output parameters are replaced with possibly new values. Keyword + parameters will be included after the positional parameters and are not + returned as part of the output sequence. + + Use :meth:`var()` to define any OUT or IN OUT parameters if necessary. + + No query result set is returned by :meth:`callproc()`. Instead, use + :ref:`REF CURSOR ` parameters or :ref:`Implicit Results + `. """ await self._call(name, parameters, keyword_parameters) if parameters is None: @@ -945,7 +1084,7 @@ async def execute( **keyword_parameters: Any, ) -> None: """ - Execute a statement against the database. + Executes a statement against the database. See :ref:`sqlexecution`. Parameters may be passed as a dictionary or sequence or as keyword parameters. If the parameters are a dictionary, the values will be @@ -960,24 +1099,29 @@ async def execute( maps to the bind variable name used by the statement and the value maps to the Python value you wish bound to that bind variable. - A reference to the statement will be retained by the cursor. If None or - the same string object is passed in again, the cursor will execute that - statement again without performing a prepare or rebinding and - redefining. This is most effective for algorithms where the same + A reference to the statement will be retained by the cursor. If *None* + or the same string object is passed in again, the cursor will execute + that statement again without performing a prepare or rebinding and + redefining. This is most effective for algorithms where the same statement is used, but different parameters are bound to it (many times). Note that parameters that are not passed in during subsequent executions will retain the value passed in during the last execution that contained them. - For maximum efficiency when reusing an statement, it is best to use the - setinputsizes() method to specify the parameter types and sizes ahead - of time; in particular, None is assumed to be a string of length 1 so - any values that are later bound as numbers or dates will raise a - TypeError exception. + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active + sessionless transaction will be suspended when ``execute()`` completes + successfully. See :ref:`suspendtxns`. + + For maximum efficiency when reusing a statement, it is best to use the + :meth:`setinputsizes()` method to specify the parameter types and sizes + ahead of time; in particular, *None* is assumed to be a string of + length 1 so any values that are later bound as numbers or dates will + raise a TypeError exception. - suspend_on_success parameter is specific to sessionless transactions. - When set to True, the active sessionless transaction will be suspended - after the successful execution of the current statement. + If the statement is a SELECT query, the cursor is returned as a + convenience to the caller (so it can be used directly as an iterator + over the rows in the cursor); otherwise, *None* is returned. """ self._prepare_for_execute(statement, parameters, keyword_parameters) self._impl.suspend_on_success = suspend_on_success @@ -992,43 +1136,54 @@ async def executemany( suspend_on_success: bool = False, ) -> None: """ - Prepare a statement for execution against a database and then execute - it against all parameter mappings or sequences found in the sequence - parameters. - - The statement is managed in the same way as the execute() method - manages it. If the size of the buffers allocated for any of the - parameters exceeds 2 GB and you are using the thick implementation, you - will receive the error “DPI-1015: array size of is too large”, - where varies with the size of each element being allocated in the - buffer. If you receive this error, decrease the number of elements in - the sequence parameters. - - If there are no parameters, or parameters have previously been bound, - the number of iterations can be specified as an integer instead of - needing to provide a list of empty mappings or sequences. - - When true, the batcherrors parameter enables batch error support within - Oracle and ensures that the call succeeds even if an exception takes - place in one or more of the sequence of parameters. The errors can then - be retrieved using getbatcherrors(). - - When true, the arraydmlrowcounts parameter enables DML row counts to be - retrieved from Oracle after the method has completed. The row counts - can then be retrieved using getarraydmlrowcounts(). - - Both the batcherrors parameter and the arraydmlrowcounts parameter can - only be true when executing an insert, update, delete or merge - statement; in all other cases an error will be raised. - - For maximum efficiency, it is best to use the setinputsizes() method to - specify the parameter types and sizes ahead of time; in particular, - None is assumed to be a string of length 1 so any values that are later - bound as numbers or dates will raise a TypeError exception. - - suspend_on_success parameter is specific to sessionless transactions. - When set to True, the active sessionless transaction will be suspended - after the successful execution of the current statement. + Executes a SQL statement once using all bind value mappings or + sequences found in the sequence parameters. This can be used to insert, + update, or delete multiple rows in a table with a single + python-oracledb call. It can also invoke a PL/SQL procedure multiple + times. See :ref:`batchstmnt`. + + The ``statement`` parameter is managed in the same way as the + :meth:`execute()` method manages it. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one bind variable placeholder in ``statement``. It can + also be a list of dictionaries, where the keys match the bind variable + placeholder names in ``statement``. If there are no bind values, or + values have previously been bound, the ``parameters`` value can be an + integer specifying the number of iterations. The ``parameters`` + parameter can also be a :ref:`DataFrame `, or a + third-party data frame that supports the `Apache Arrow PyCapsule + `__ Interface. + + In python-oracledb Thick mode, if the size of the buffers allocated for + any of the parameters exceeds 2 GB, you will receive the error + ``DPI-1015: array size of is too large``. If you receive this + error, decrease the number of rows being inserted. + + When True, the ``batcherrors`` parameter enables batch error support + within Oracle and ensures that the call succeeds even if an exception + takes place in one or more of the sequence of parameters. The errors + can then be retrieved using :meth:`getbatcherrors()`. + + When True, the ``arraydmlrowcounts`` parameter enables DML row counts + to be retrieved from Oracle after the method has completed. The row + counts can then be retrieved using :meth:`getarraydmlrowcounts()`. + + Both the ``batcherrors`` parameter and the ``arraydmlrowcounts`` + parameter can only be True when executing an insert, update, delete, or + merge statement. In all other cases, an error will be raised. + + The ``suspend_on_success`` parameter is specific to :ref:`sessionless + transactions `. When set to *True*, the active + sessionless transaction will be suspended when ``executemany()`` + completes successfully. See :ref:`suspendtxns`. + + For maximum efficiency, it is best to use the :meth:`setinputsizes()` + method to specify the parameter types and sizes ahead of time. In + particular, the value *None* is assumed to be a string of length 1 so + any values that are later bound as numbers or dates will raise a + TypeError exception. """ self._verify_open() num_execs = self._impl._prepare_for_executemany( @@ -1042,14 +1197,15 @@ async def executemany( async def fetchall(self) -> list: """ - Fetch all (remaining) rows of a query result, returning them as a list - of tuples. An empty list is returned if no more rows are available. - Note that the cursor’s arraysize attribute can affect the performance - of this operation, as internally reads from the database are done in - batches corresponding to the arraysize. + Fetches all (remaining) rows of a SELECT query result, returning them + as a list of tuples. An empty list is returned if no more rows are + available. An exception is raised if the previous call to + :meth:`execute()` did not produce any result set or no call was issued + yet. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + Note that the cursor's :attr:`~AsyncCursor.arraysize` attribute can + affect the performance of this operation, as internally data is fetched + in batches of that size from the database. """ self._verify_fetch() result = [] @@ -1063,20 +1219,18 @@ async def fetchall(self) -> list: async def fetchmany(self, size: Optional[int] = None) -> list: """ - Fetch the next set of rows of a query result, returning a list of - tuples. An empty list is returned if no more rows are available. Note - that the cursor’s arraysize attribute can affect the performance of - this operation. + Fetches the next set of rows of a SELECT query result, returning a list + of tuples. An empty list is returned if no more rows are available. + Note that the cursor's :attr:`arraysize` attribute can affect the + performance of this operation. - The number of rows to fetch is specified by the parameter (the second - parameter is retained for backwards compatibility and should not be - used). If it is not given, the cursor’s arraysize attribute determines - the number of rows to be fetched. If the number of rows available to be - fetched is fewer than the amount requested, fewer rows will be - returned. + The number of rows to fetch is specified by the parameter. If it is not + given, the cursor's :attr:`arraysize` attribute determines the number + of rows to be fetched. If the number of rows available to be fetched is + fewer than the amount requested, fewer rows will be returned. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + An exception is raised if the previous call to :meth:`execute()` did + not produce any result set or no call was issued yet. """ self._verify_fetch() if size is None: @@ -1092,11 +1246,14 @@ async def fetchmany(self, size: Optional[int] = None) -> list: async def fetchone(self) -> Any: """ - Fetch the next row of a query result set, returning a single tuple or - None when no more data is available. + Fetches the next row of a SELECT query result set, returning a single + tuple or *None* when no more data is available. An exception is raised + if the previous call to :meth:`execute()` did not produce any result + set or no call was issued yet. - An exception is raised if the previous call to execute() did not - produce any result set or no call was issued yet. + When ``fetchone()`` is used to iterate over a result set, the cursor’s + :attr:`arraysize` attribute can affect performance, as internally data + is fetched in batches of that size from Oracle Database. """ self._verify_fetch() return await self._impl.fetch_next_row(self) @@ -1104,8 +1261,8 @@ async def fetchone(self) -> Any: async def parse(self, statement: str) -> None: """ This can be used to parse a statement without actually executing it - (this step is done automatically by Oracle when a statement is - executed). + (parsing step is done automatically by Oracle when a statement is + :meth:`executed `). """ self._verify_open() self._prepare(statement) @@ -1113,16 +1270,16 @@ async def parse(self, statement: str) -> None: async def scroll(self, value: int = 0, mode: str = "relative") -> None: """ - Scroll the cursor in the result set to a new position according to the + Scrolls the cursor in the result set to a new position according to the mode. - If mode is "relative" (the default value), the value is taken as an - offset to the current position in the result set. If set to "absolute", - value states an absolute target position. If set to "first", the cursor - is positioned at the first row and if set to "last", the cursor is set + If mode is *relative* (the default value), the value is taken as an + offset to the current position in the result set. If set to *absolute*, + value states an absolute target position. If set to *first*, the cursor + is positioned at the first row and if set to *last*, the cursor is set to the last row in the result set. - An error is raised if the mode is "relative" or "absolute" and the + An error is raised if the mode is *relative* or *absolute* and the scroll operation would position the cursor outside of the result set. """ self._verify_open() diff --git a/utils/templates/connection.py b/utils/templates/connection.py index fb4e4317..7cddf7df 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -34,7 +34,7 @@ import collections import functools import ssl -from typing import Any, Callable, Type, Optional, Union +from typing import Any, Callable, Iterator, Type, Optional, Union import oracledb @@ -46,6 +46,7 @@ from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor +from .dataframe import DataFrame from .dbobject import DbObjectType, DbObject from .lob import AsyncLOB, LOB from .pipeline import Pipeline @@ -92,25 +93,25 @@ def _verify_xid(self, xid: Xid) -> None: raise TypeError(message) @property - def action(self) -> None: + def action(self) -> str: + """ + This write-only attribute sets the ACTION column in the V$SESSION view. + It is a string attribute but the value *None* is accepted and treated + as an empty string. + """ raise AttributeError("action is not readable") @action.setter def action(self, value: str) -> None: - """ - Specifies the action column in the v$session table. It is a string - attribute but the value None is also accepted and treated as an empty - string. - """ self._verify_connected() self._impl.set_action(value) @property def autocommit(self) -> bool: """ - Specifies whether autocommit mode is on or off. When autocommit mode is - on, all statements are committed as soon as they have completed - executing successfully. + This read-write attribute determines whether autocommit mode is on or + off. When autocommit mode is on, all statements are committed as soon + as they have completed executing. """ self._verify_connected() return self._impl.autocommit @@ -127,19 +128,33 @@ def begin_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Begins a new sessionless transaction. - - Parameters: - transaction_id (str or bytes, optional): A Transaction Identifier. - If None, a random transaction_id will be generated. - timeout (int, optional): Timeout value in seconds. - Must be a positive integer. Defaults to 60 if not provided. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used for the transaction. + Begins a new sessionless transaction. This method returns the + transaction identifier specified by the user or generated by + python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value + is not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) + `__ value. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". Any user-chosen value cannot + exceed 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this + transaction can stay suspended when + :meth:`suspend_sessionless_transaction()` is later called, + or if the transaction is automatically suspended when the + ``suspend_on_success`` parameter is set to to *True* in + :meth:`Cursor.execute()` or :meth:`Cursor.executemany()`. The default + value is *60* seconds. If a transaction is not resumed within this + specified duration, the transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() normalized_txnid = normalize_sessionless_transaction_id(transaction_id) @@ -155,9 +170,23 @@ def begin_sessionless_transaction( @property def call_timeout(self) -> int: """ - Specifies the amount of time (in milliseconds) that a single round-trip - to the database may take before a timeout will occur. A value of 0 - means that no timeout will take place. + This read-write attribute specifies the amount of time (in + milliseconds) that a single round-trip to the database may take before + a timeout will occur. A value of *0* means that no timeout will take + place. + + In python-oracledb Thick mode, this attribute is only available in + Oracle Client 18c or later. + + If a timeout occurs, the error ``DPI-1067`` will be returned if the + connection is still usable. Alternatively the error ``DPI-1080`` will + be returned if the connection has become invalid and can no longer be + used. + + For consistency and compliance with the PEP 8 naming style, the + attribute ``callTimeout`` was renamed to ``call_timeout``. The old name + will continue to work for a period of time. The error ``DPI-1080`` was + also introduced in this release. """ self._verify_connected() return self._impl.get_call_timeout() @@ -169,25 +198,30 @@ def call_timeout(self, value: int) -> None: def cancel(self) -> None: """ - Break a long-running transaction. + Breaks a long-running statement. """ self._verify_connected() self._impl.cancel() @property - def client_identifier(self) -> None: + def client_identifier(self) -> str: + """ + This write-only attribute sets the CLIENT_IDENTIFIER column in the + V$SESSION view. + """ raise AttributeError("client_identifier is not readable") @client_identifier.setter def client_identifier(self, value: str) -> None: - """ - Specifies the client_identifier column in the v$session table. - """ self._verify_connected() self._impl.set_client_identifier(value) @property - def clientinfo(self) -> None: + def clientinfo(self) -> str: + """ + This write-only attribute sets the CLIENT_INFO column in the V$SESSION + view. + """ raise AttributeError("clientinfo is not readable") @clientinfo.setter @@ -201,11 +235,12 @@ def clientinfo(self, value: str) -> None: @property def current_schema(self) -> str: """ - Specifies the current schema for the session. Setting this value is the - same as executing the SQL statement "ALTER SESSION SET CURRENT_SCHEMA". - The attribute is set (and verified) on the next call that does a round - trip to the server. The value is placed before unqualified database - objects in SQL statements you then execute. + This read-write attribute sets the current schema attribute for the + session. Setting this value is the same as executing the SQL statement + ``ALTER SESSION SET CURRENT_SCHEMA``. The attribute is set (and + verified) on the next call that does a round trip to the server. The + value is placed before unqualified database objects in SQL statements + you then execute. """ self._verify_connected() return self._impl.get_current_schema() @@ -216,86 +251,98 @@ def current_schema(self, value: str) -> None: self._impl.set_current_schema(value) @property - def dbop(self) -> None: - raise AttributeError("dbop is not readable") - - @dbop.setter - def dbop(self, value: str) -> None: + def db_domain(self) -> str: """ - Specifies the database operation that is to be monitored. This can be - viewed in the DBOP_NAME column of the V$SQL_MONITOR table. + This read-only attribute specifies the Oracle Database domain name + associated with the connection. It is the same value returned by the + SQL ``SELECT value FROM V$PARAMETER WHERE NAME = 'db_domain'``. """ self._verify_connected() - self._impl.set_dbop(value) + return self._impl.get_db_domain() @property - def dsn(self) -> str: + def db_name(self) -> str: """ - Specifies the connection string (TNS entry) of the database to which a - connection has been established. + This read-only attribute specifies the Oracle Database name associated + with the connection. It is the same value returned by the SQL ``SELECT + NAME FROM V$DATABASE``. """ self._verify_connected() - return self._impl.dsn + return self._impl.get_db_name() @property - def econtext_id(self) -> None: - raise AttributeError("econtext_id is not readable") - - @econtext_id.setter - def econtext_id(self, value: str) -> None: + def dbop(self) -> str: """ - Specifies the execution context id. This value can be found as ecid in - the v$session table and econtext_id in the auditing tables. The maximum - length is 64 bytes. + This write-only attribute sets the database operation that is to be + monitored. This can be viewed in the DBOP_NAME column of the + V$SQL_MONITOR view. """ + raise AttributeError("dbop is not readable") + + @dbop.setter + def dbop(self, value: str) -> None: self._verify_connected() - self._impl.set_econtext_id(value) + self._impl.set_dbop(value) - @property - def db_domain(self) -> str: + def decode_oson(self, data: bytes) -> Any: """ - Specifies the name of the database domain. + Decodes `OSON-encoded + `__ bytes and returns the + object encoded in those bytes. This is useful for fetching columns + which have the check constraint ``IS JSON FORMAT OSON`` enabled. """ self._verify_connected() - return self._impl.get_db_domain() + return self._impl.decode_oson(data) @property - def db_name(self) -> str: + def dsn(self) -> str: """ - Specifies the name of the database. + This read-only attribute returns the TNS entry of the database to which + a connection has been established. """ self._verify_connected() - return self._impl.get_db_name() + return self._impl.dsn @property - def session_id(self) -> int: + def econtext_id(self) -> str: """ - Specifies the session identifier. + This write-only attribute specifies the execution context id. This + value can be found as the ECID column in the V$SESSION view and + ECONTEXT_ID in the auditing tables. The maximum length is 64 bytes. """ + raise AttributeError("econtext_id is not readable") + + @econtext_id.setter + def econtext_id(self, value: str) -> None: self._verify_connected() - return self._impl.get_session_id() + self._impl.set_econtext_id(value) @property - def serial_num(self) -> int: + def edition(self) -> str: """ - Specifies the session serial number. + This read-only attribute gets the session edition and is only available + with Oracle Database 11.2, or later. """ self._verify_connected() - return self._impl.get_serial_num() + return self._impl.get_edition() - @property - def edition(self) -> str: + def encode_oson(self, value: Any) -> bytes: """ - Specifies the session edition. + Encodes a Python value into `OSON-encoded + `__ bytes and returns + them. This is useful for inserting into columns which have the check + constraint ``IS JSON FORMAT OSON`` enabled. """ self._verify_connected() - return self._impl.get_edition() + return self._impl.encode_oson(value) @property def external_name(self) -> str: """ - Specifies the external name that is used by the connection when logging - distributed transactions. + This read-write attribute specifies the external name that is used by + the connection when logging distributed transactions. """ self._verify_connected() return self._impl.get_external_name() @@ -308,12 +355,13 @@ def external_name(self, value: str) -> None: @property def inputtypehandler(self) -> Callable: """ - Specifies a method called for each value that is bound to a statement - executed on any cursor associated with this connection. The method - signature is handler(cursor, value, arraysize) and the return value is - expected to be a variable object or None in which case a default - variable object will be created. If this attribute is None, the default - behavior will take place for all values bound to statements. + This read-write attribute specifies a method called for each value that + is bound to a statement executed on any cursor associated with this + connection. The method signature is handler(cursor, value, arraysize) + and the return value is expected to be a variable object or *None* in + which case a default variable object will be created. If this attribute + is *None*, the default behavior will take place for all values bound to + statements. """ self._verify_connected() return self._impl.inputtypehandler @@ -326,10 +374,9 @@ def inputtypehandler(self, value: Callable) -> None: @property def instance_name(self) -> str: """ - Returns the instance name associated with the connection. This is the - equivalent of the SQL expression: - - sys_context('userenv', 'instance_name') + This read-only attribute specifies the Oracle Database instance name + associated with the connection. It is the same value as the SQL + expression ``sys_context('userenv', 'instance_name')``. """ self._verify_connected() return self._impl.get_instance_name() @@ -337,8 +384,8 @@ def instance_name(self) -> str: @property def internal_name(self) -> str: """ - Specifies the internal name that is used by the connection when logging - distributed transactions. + This read-write attribute specifies the internal name that is used by + the connection when logging distributed transactions. """ self._verify_connected() return self._impl.get_internal_name() @@ -350,32 +397,40 @@ def internal_name(self, value: str) -> None: def is_healthy(self) -> bool: """ - Returns a boolean indicating the health status of a connection. + This function returns a boolean indicating the health status of a + connection. - Connections may become unusable in several cases, such as if the + Connections may become unusable in several cases, such as, if the network socket is broken, if an Oracle error indicates the connection - is unusable, or after receiving a planned down notification from the + is unusable, or, after receiving a planned down notification from the database. This function is best used before starting a new database request on an - existing standalone connection. Pooled connections internally perform - this check before returning a connection to the application. + existing :ref:`standalone connections `. For + pooled connections, the :meth:`ConnectionPool.acquire()` method + internally performs this check before returning a connection to the + application, see :ref:`poolhealth`. - If this function returns False, the connection should be not be used by - the application and a new connection should be established instead. + If this function returns *False*, the connection should be not be used + by the application and a new connection should be established instead. This function performs a local check. To fully check a connection's - health, use ping() which performs a round-trip to the database. + health, use :meth:`ping()` which performs a round-trip to + the database. """ return self._impl is not None and self._impl.get_is_healthy() @property def ltxid(self) -> bytes: """ - Returns the logical transaction id for the connection. It is used - within Oracle Transaction Guard as a means of ensuring that - transactions are not duplicated. See the Oracle documentation and the - provided sample for more information. + This read-only attribute returns the logical transaction id for the + connection. It is used within Oracle Transaction Guard as a means of + ensuring that transactions are not duplicated. See :ref:`tg` for more + information. + + This is only available with Oracle Database 12.1 or later. In + python-oracledb Thick mode, it also requires Oracle Client libraries + 12.1 or later. """ self._verify_connected() return self._impl.get_ltxid() @@ -383,8 +438,15 @@ def ltxid(self) -> bytes: @property def max_identifier_length(self) -> int: """ - Returns the maximum length of identifiers supported by the database to - which this connection has been established. + This read-only attribute specifies the maximum database identifier + length in bytes supported by the database to which the connection has + been established. See `Database Object Naming Rules + `__. The value may be + *None*, *30*, or *128*. The value *None* indicates the size cannot be + reliably determined by python-oracledb, which occurs when using Thick + mode with Oracle Client libraries 12.1 (or older) to connect to Oracle + Database 12.2, or later. """ self._verify_connected() return self._impl.get_max_identifier_length() @@ -392,23 +454,26 @@ def max_identifier_length(self) -> int: @property def max_open_cursors(self) -> int: """ - Specifies the maximum number of cursors that the database can have open - concurrently. + This read-only attribute specifies the maximum number of cursors that + the database can have open concurrently. It is the same value returned + by the SQL ``SELECT VALUE FROM V$PARAMETER WHERE NAME = + 'open_cursors'``. When using python-oracledb Thick mode, Oracle Client + libraries 12.1 (or later) are required. """ self._verify_connected() return self._impl.get_max_open_cursors() @property - def module(self) -> None: + def module(self) -> str: + """ + This write-only attribute sets the MODULE column in the V$SESSION view. + The maximum length for this string is 48 and if you exceed this length + you will get ``ORA-24960``. + """ raise AttributeError("module is not readable") @module.setter def module(self, value: str) -> None: - """ - Specifies the module column in the v$session table. The maximum length - for this string is 48 and if you exceed this length you will get - ORA-24960. - """ self._verify_connected() self._impl.set_module(value) @@ -423,9 +488,11 @@ def msgproperties( recipients: Optional[list] = None, ) -> MessageProperties: """ - Create and return a message properties object. If the parameters are - not None, they act as a shortcut for setting each of the equivalently - named properties. + Returns an object specifying the properties of messages used in + advanced queuing. See :ref:`msgproperties` for more information. + + Each of the parameters are optional. If specified, they act as a + shortcut for setting each of the equivalently named properties. """ impl = self._impl.create_msg_props_impl() props = MessageProperties._from_impl(impl) @@ -453,16 +520,21 @@ def queue( payloadType: Optional[DbObjectType] = None, ) -> Queue: """ - Creates and returns a queue which is used to enqueue and dequeue - messages in Advanced Queueing (AQ). + Creates a :ref:`queue ` which is used to enqueue and dequeue + messages in Advanced Queuing. + + The ``name`` parameter is expected to be a string identifying the queue + in which messages are to be enqueued or dequeued. - The name parameter is expected to be a string identifying the queue in - which messages are to be enqueued or dequeued. + The ``payload_type`` parameter, if specified, is expected to be an + :ref:`object type ` that identifies the type of payload + the queue expects. If the string "JSON" is specified, JSON data is + enqueued and dequeued. If not specified, RAW data is enqueued and + dequeued. - The payload_type parameter, if specified, is expected to be an - object type that identifies the type of payload the queue expects. - If the string "JSON" is specified, JSON data is enqueued and dequeued. - If not specified, RAW data is enqueued and dequeued. + For consistency and compliance with the PEP 8 naming style, the + parameter ``payloadType`` was renamed to ``payload_type``. The old name + will continue to work as a keyword parameter for a period of time. """ self._verify_connected() payload_type_impl = None @@ -489,13 +561,13 @@ def queue( @property def outputtypehandler(self) -> Callable: """ - Specifies a method called for each column that is going to be fetched - from any cursor associated with this connection. The method signature - is handler(cursor, name, defaultType, length, precision, scale) and the - return value is expected to be a variable object or None in which case - a default variable object will be created. If this attribute is None, - the default behavior will take place for all columns fetched from - cursors associated with this connection. + This read-write attribute specifies a method called for each column + that is going to be fetched from any cursor associated with this + connection. The method signature is ``handler(cursor, metadata)`` and + the return value is expected to be a :ref:`variable object` or + *None* in which case a default variable object will be created. If this + attribute is *None*, the default behavior will take place for all + columns fetched from cursors. """ self._verify_connected() return self._impl.outputtypehandler @@ -505,6 +577,15 @@ def outputtypehandler(self, value: Callable) -> None: self._verify_connected() self._impl.outputtypehandler = value + @property + def proxy_user(self) -> Union[str, None]: + """ + This read-only attribute returns the name of the user which was used as + a proxy when creating the connection to the database. + """ + self._verify_connected() + return self._impl.proxy_user + def resume_sessionless_transaction( self, transaction_id: Union[str, bytes], @@ -512,22 +593,38 @@ def resume_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Resumes an existing sessionless transaction using the given - transaction_id. - - Parameters: - transaction_id (str or bytes): A Transaction Identifier that - uniquely identifies the sessionless transaction to be - resumed. This parameter is mandatory. - timeout (int, optional): Timeout in seconds for the resumed - transaction. Must be a positive integer. Defaults to 60. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used to resume the - sessionless transaction. + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using + it. When ``defer_round_trip`` is set to *False*, the wait happens in + the ``resume_sessionless_transaction()`` call itself, and the function + blocks until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and + the wait occurs at the time of the next database operation instead. At + the start of the wait period, if the transaction is not in use by any + other connection, the resume happens immediately. If the transaction + remains in use by the other connection after the timeout period, the + error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. + These error messages are only thrown for non-RAC instances. For + information on using Oracle RAC, see :ref:`Sessionless Transactions + with Oracle RAC `. The default value is *60* + seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() if transaction_id is None: @@ -546,27 +643,67 @@ def resume_sessionless_transaction( @property def sdu(self) -> int: """ - Specifies the size of the Session Data Unit (SDU) that is being used by - the connection. + This read-only attribute specifies the size of the Session Data Unit + (SDU) that is being used by the connection. The value will be the + lesser of the requested python-oracledb size and the maximum size + allowed by the database network configuration. It is available only in + python-oracledb Thin mode. """ self._verify_connected() return self._impl.get_sdu() + @property + def serial_num(self) -> int: + """ + This read-only attribute specifies the session serial number associated + with the connection. It is the same value returned by the SQL ``SELECT + SERIAL# FROM V$SESSION WHERE SID=SYS_CONTEXT('USERENV', 'SID')``. It + is available only in python-oracledb Thin mode. + + For applications using :ref:`drcp`, the ``serial_num`` attribute may + not contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a + round-trip. + """ + self._verify_connected() + return self._impl.get_serial_num() + @property def service_name(self) -> str: """ - Specifies the name of the service that was used to connect to the - database. + This read-only attribute specifies the Oracle Database service name + associated with the connection. This is the same value returned by the + SQL ``SELECT SYS_CONTEXT('USERENV', 'SERVICE_NAME') FROM DUAL``. """ self._verify_connected() return self._impl.get_service_name() + @property + def session_id(self) -> int: + """ + This read-only attribute specifies the session identifier associated + with the connection. It is the same value returned by the SQL ``SELECT + SYS_CONTEXT('USERENV', 'SID') FROM DUAL``. It is available only in + python-oracledb Thin mode. + + For applications using :ref:`drcp`, the ``session_id`` attribute may + not contain the current session state until a round-trip is made to the + database after acquiring a session. It is recommended to not use this + attribute if your application uses DRCP but may not perform a + round-trip. + """ + self._verify_connected() + return self._impl.get_session_id() + @property def stmtcachesize(self) -> int: """ - Specifies the size of the statement cache. This value can make a - significant difference in performance (up to 100x) if you have a small - number of statements that you execute repeatedly. + This read-write attribute specifies the size of the statement cache. + This value can make a significant difference in performance if you have + a small number of statements that you execute repeatedly. + + The default value is *20*. """ self._verify_connected() return self._impl.get_stmt_cache_size() @@ -576,11 +713,35 @@ def stmtcachesize(self, value: int) -> None: self._verify_connected() self._impl.set_stmt_cache_size(value) + @property + def tag(self) -> str: + """ + This read-write attribute initially contains the actual tag of the + session that was acquired from a pool by + :meth:`ConnectionPool.acquire()`. If the connection was not acquired + from a pool or no tagging parameters were specified (``tag`` and + ``matchanytag``) when the connection was acquired from the pool, this + value will be None. If the value is changed, it must be a string + containing name=value pairs like "k1=v1;k2=v2". + + If this value is not *None* when the connection is released back to the + pool it will be used to retag the session. This value can be overridden + in the call to :meth:`ConnectionPool.release()`. + """ + self._verify_connected() + return self._impl.tag + + @tag.setter + def tag(self, value: str) -> None: + self._verify_connected() + self._impl.tag = value + @property def thin(self) -> bool: """ - Returns a boolean indicating if the connection was established in - python-oracledb's thin mode (True) or thick mode (False). + This read-only attribute returns a boolean indicating if the connection + was established with the python-oracledb Thin mode (*True*) or + python-oracledb Thick mode (*False*). """ self._verify_connected() return self._impl.thin @@ -588,8 +749,8 @@ def thin(self) -> bool: @property def transaction_in_progress(self) -> bool: """ - Specifies whether a transaction is currently in progress on the - database using this connection. + This read-only attribute specifies whether a transaction is currently + in progress on the database associated with the connection. """ self._verify_connected() return self._impl.get_transaction_in_progress() @@ -597,8 +758,8 @@ def transaction_in_progress(self) -> bool: @property def username(self) -> str: """ - Returns the name of the user which established the connection to the - database. + This read-only attribute returns the name of the user which established + the connection to the database. """ self._verify_connected() return self._impl.username @@ -606,8 +767,8 @@ def username(self) -> str: @property def version(self) -> str: """ - Returns the version of the database to which the connection has been - established. + This read-only attribute returns the version of the database to which a + connection has been established. """ if self._version is None: self._verify_connected() @@ -615,11 +776,32 @@ def version(self) -> str: return self._version @property - def warning(self) -> errors._Error: + def warning(self) -> Union[errors._Error, None]: """ - Returns any warning that was generated when the connection was created, - or the value None if no warning was generated. The value will be - cleared for pooled connections after they are returned to the pool. + This read-only attribute provides an + :ref:`oracledb._Error` object giving information about any + database warnings (such as the password being in the grace period, or + the pool being created with a smaller than requested size due to + database resource restrictions) that were generated during connection + establishment or by :meth:`oracledb.create_pool()`. The attribute will + be present if there was a warning, but creation otherwise completed + successfully. The connection will be usable despite the warning. + + For :ref:`standalone connections `, + ``Connection.warning`` will be present for the lifetime of the + connection. + + For :ref:`pooled connections `, ``Connection.warning`` + will be cleared when a connection is released to the pool such as with + :meth:`ConnectionPool.release()`. + + In python-oracledb Thick mode, warnings may be generated during pool + creation itself. These warnings will be placed on new connections + created by the pool, provided no warnings were generated by the + individual connection creations, in which case those connection + warnings will be returned. + + If no warning was generated the value *None* is returned. """ self._verify_connected() return self._impl.warning @@ -631,13 +813,25 @@ def xid( branch_qualifier: Union[bytes, str], ) -> Xid: """ - Returns a global transaction identifier that can be used with the TPC - (two-phase commit) functions. + Returns a global transaction identifier (xid) that can be used with the + Two-Phase Commit (TPC) functions. + + The ``xid`` contains a format identifier, a global transaction + identifier, and a branch identifier. There are no checks performed at + the Python level. The values are checked by ODPI-C when they are passed + to the relevant functions. .. When this functionality is also + supported in the thin driver the checks will be performed at the Python + level as well. + + The ``format_id`` parameter should be a positive 32-bit integer. This + value identifies the format of the ``global_transaction_id`` and + ``branch_qualifier`` parameters and the value is determined by the + Transaction Manager (TM), if one is in use. - The format_id parameter should be a non-negative 32-bit integer. The - global_transaction_id and branch_qualifier parameters should be bytes - (or a string which will be UTF-8 encoded to bytes) of no more than 64 - bytes. + The ``global_transaction_id`` and ``branch_qualifier`` parameters + should be of type bytes or string. If a value of type string is passed, + then this value will be UTF-8 encoded to bytes. The values cannot + exceed 64 bytes in length. """ return Xid(format_id, global_transaction_id, branch_qualifier) @@ -737,10 +931,18 @@ def __del__(self): self._close(in_del=True) def __enter__(self): + """ + The entry point for the connection as a context manager. It returns + itself. + """ self._verify_connected() return self def __exit__(self, exc_type, exc_value, exc_tb): + """ + The exit point for the connection as a context manager. This will close + the connection and roll back any uncommitted transaction. + """ if self._impl is not None: self._close() @@ -794,7 +996,14 @@ def begin( branch_id: str = "", ) -> None: """ - Deprecated. Use tpc_begin() instead. + Explicitly begins a new transaction. Without parameters, this + explicitly begins a local transaction; otherwise, this explicitly + begins a distributed (global) transaction with the given parameters. + See the Oracle documentation for more details. + + Note that in order to make use of global (distributed) transactions, + the :attr:`~Connection.internal_name` and + :attr:`~Connection.external_name` attributes must be set. """ if format_id != -1: self.tpc_begin(self.xid(format_id, transaction_id, branch_id)) @@ -820,9 +1029,19 @@ def changepassword(self, old_password: str, new_password: str) -> None: def close(self) -> None: """ - Closes the connection and makes it unusable for further operations. An - Error exception will be raised if any operation is attempted with this - connection after this method completes successfully. + Closes the connection now and makes it unusable for further operations. + An Error exception will be raised if any operation is attempted with + this connection after this method is completed successfully. + + All open cursors and LOBs created by the connection will be closed and + will also no longer be usable. + + Internally, references to the connection are held by cursor objects, + LOB objects, subscription objects, etc. Once all of these references + are released, the connection itself will be closed automatically. + Either control references to these related objects carefully or + explicitly close connections in order to ensure sufficient resources + are available. """ self._verify_connected() self._close() @@ -838,7 +1057,13 @@ def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> LOB: """ - Create and return a new temporary LOB of the specified type. + Creates and returns a new temporary :ref:`LOB object ` of the + specified type. The ``lob_type`` parameter should be one of + :data:`oracledb.DB_TYPE_CLOB`, :data:`oracledb.DB_TYPE_BLOB`, or + :data:`oracledb.DB_TYPE_NCLOB`. + + If data is supplied, it will be written to the temporary LOB before it + is returned. """ self._verify_connected() if lob_type not in (DB_TYPE_CLOB, DB_TYPE_NCLOB, DB_TYPE_BLOB): @@ -855,33 +1080,34 @@ def createlob( def cursor(self, scrollable: bool = False) -> Cursor: """ - Returns a cursor associated with the connection. + Returns a new :ref:`cursor object ` using the connection. """ self._verify_connected() return Cursor(self, scrollable) - def decode_oson(self, data): - """ - Decode OSON-encoded bytes and return the object encoded in those bytes. - """ - self._verify_connected() - return self._impl.decode_oson(data) - - def encode_oson(self, value): - """ - Return OSON-encoded bytes encoded from the supplied object. - """ - self._verify_connected() - return self._impl.encode_oson(value) - def fetch_df_all( self, statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, - ): + ) -> DataFrame: """ - Fetch all data as an instance of DataFrame. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``arraysize`` parameter can be specified to tune performance of + fetching data across the network. It defaults to + :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s + :attr:`Cursor.prefetchrows` size is always set to the value of the + explicit or default ``arraysize`` parameter value. + + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -896,9 +1122,25 @@ def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, - ): + ) -> Iterator[DataFrame]: """ - Fetch data in batches. Each batch is an instance of DataFrame + This returns an iterator yielding the next ``size`` rows of the SQL + query ``statement`` in each iteration as a :ref:`DataFrame + ` object. An empty DataFrame is returned if there + are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``size`` parameter controls the number of records fetched in each + batch. It defaults to :attr:`defaults.arraysize`. Internally, the + ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + :attr:`Cursor.prefetchrows` sizes are always set to the value of the + explicit or default ``size`` parameter value. + + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -913,8 +1155,13 @@ def fetch_df_batches( def getSodaDatabase(self) -> SodaDatabase: """ - Return a SODA database object for performing all operations on Simple - Oracle Document Access (SODA). + Returns a :ref:`SodaDatabase ` object for Simple Oracle + Document Access (SODA). All SODA operations are performed either on the + returned SodaDatabase object or from objects created by the returned + SodaDatabase object. See `here + `__ for + additional information on SODA. """ self._verify_connected() db_impl = self._impl.create_soda_database_impl(self) @@ -922,8 +1169,9 @@ def getSodaDatabase(self) -> SodaDatabase: def gettype(self, name: str) -> DbObjectType: """ - Return a type object given its name. This can then be used to create - objects which can be bound to cursors created by this connection. + Returns a :ref:`type object ` given its name. This can + then be used to create objects which can be bound to cursors created by + this connection. """ self._verify_connected() obj_type_impl = self._impl.get_type(self, name) @@ -932,9 +1180,10 @@ def gettype(self, name: str) -> DbObjectType: @property def handle(self) -> int: """ - Returns the OCI service context handle for the connection. It is - primarily provided to facilitate testing the creation of a connection - using the OCI service context handle. + This read-only attribute returns the Oracle Call Interface (OCI) + service context handle for the connection. It is primarily provided to + facilitate testing the creation of a connection using the OCI service + context handle. This property is only relevant to python-oracledb's thick mode. """ @@ -950,25 +1199,29 @@ def maxBytesPerCharacter(self) -> int: def ping(self) -> None: """ - Pings the database to verify the connection is valid. + Pings the database to verify if the connection is valid. An exception + is thrown if it is not, in which case the connection should not be used + by the application and a new connection should be established instead. + + This function performs a :ref:`round-trip ` to the + database, so it should not be used unnecessarily. + + Note connection pools will perform the same health check automatically, + based on configuration settings. See :ref:`poolhealth`. + + Also, see :meth:`is_healthy()` for a lightweight alternative. """ self._verify_connected() self._impl.ping() def prepare(self) -> bool: """ - Deprecated. Use tpc_prepare() instead. + Prepares the distributed (global) transaction for commit. Returns a + boolean indicating if a transaction was actually prepared in order to + avoid the error ``ORA-24756 (transaction does not exist)``. """ return self.tpc_prepare() - @property - def proxy_user(self) -> Union[str, None]: - """ - Returns the name of the proxy user, if applicable. - """ - self._verify_connected() - return self._impl.proxy_user - def rollback(self) -> None: """ Rolls back any pending transactions. @@ -978,9 +1231,10 @@ def rollback(self) -> None: def shutdown(self, mode: int = 0) -> None: """ - Shutdown the database. In order to do this the connection must be - connected as SYSDBA or SYSOPER. Two calls must be made unless the mode - specified is DBSHUTDOWN_ABORT. + Shuts down the database. In order to do this the connection must be + connected as :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER`. Two + calls must be made unless the mode specified is + :data:`~oracledb.DBSHUTDOWN_ABORT`. """ self._verify_connected() self._impl.shutdown(mode) @@ -992,11 +1246,12 @@ def startup( pfile: Optional[str] = None, ) -> None: """ - Startup the database. This is equivalent to the SQL*Plus command - “startup nomount”. The connection must be connected as SYSDBA or - SYSOPER with the PRELIM_AUTH option specified for this to work. + Starts up the database. This is equivalent to the SQL*Plus command + ``startup nomount``. The connection must be connected as + :data:`~oracledb.SYSDBA` or :data:`~oracledb.SYSOPER` with the + :data:`~oracledb.PRELIM_AUTH` option specified for this to work. - The pfile parameter, if specified, is expected to be a string + The ``pfile`` parameter, if specified, is expected to be a string identifying the location of the parameter file (PFILE) which will be used instead of the stored parameter file (SPFILE). """ @@ -1026,66 +1281,77 @@ def subscribe( clientInitiated: bool = False, ) -> Subscription: """ - Return a new subscription object that receives notification for events - that take place in the database that match the given parameters. + Returns a new :ref:`subscription object ` that receives + notifications for events that take place in the database that match the + given parameters. - The namespace parameter specifies the namespace the subscription uses. - It can be one of SUBSCR_NAMESPACE_DBCHANGE or SUBSCR_NAMESPACE_AQ. + The ``namespace`` parameter specifies the namespace the subscription + uses. It can be one of :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` or + :data:`oracledb.SUBSCR_NAMESPACE_AQ`. - The protocol parameter specifies the protocol to use when notifications - are sent. Currently the only valid value is SUBSCR_PROTO_CALLBACK. + The ``protocol`` parameter specifies the protocol to use when + notifications are sent. Currently the only valid value is + :data:`oracledb.SUBSCR_PROTO_OCI`. - The callback is expected to be a callable that accepts a single - parameter. A message object is passed to this callback whenever a - notification is received. + The ``callback`` is expected to be a callable that accepts a single + parameter. A :ref:`message object ` is passed to this + callback whenever a notification is received. - The timeout value specifies that the subscription expires after the - given time in seconds. The default value of 0 indicates that the + The ``timeout`` value specifies that the subscription expires after the + given time in seconds. The default value of *0* indicates that the subscription never expires. - The operations parameter enables filtering of the messages that are + The ``operations`` parameter enables filtering of the messages that are sent (insert, update, delete). The default value will send notifications for all operations. This parameter is only used when the - namespace is set to SUBSCR_NAMESPACE_DBCHANGE. + namespace is set to :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE`. - The port parameter specifies the listening port for callback + The ``port`` parameter specifies the listening port for callback notifications from the database server. If not specified, an unused port will be selected by the Oracle Client libraries. - The qos parameter specifies quality of service options. It should be - one or more of the following flags, OR'ed together: - SUBSCR_QOS_RELIABLE, - SUBSCR_QOS_DEREG_NFY, - SUBSCR_QOS_ROWIDS, - SUBSCR_QOS_QUERY, - SUBSCR_QOS_BEST_EFFORT. - - The ip_address parameter specifies the IP address (IPv4 or IPv6) in - standard string notation to bind for callback notifications from the - database server. If not specified, the client IP address will be - determined by the Oracle Client libraries. - - The grouping_class parameter specifies what type of grouping of - notifications should take place. Currently, if set, this value can - only be set to the value SUBSCR_GROUPING_CLASS_TIME, which will group - notifications by the number of seconds specified in the grouping_value - parameter. The grouping_type parameter should be one of the values - SUBSCR_GROUPING_TYPE_SUMMARY (the default) or - SUBSCR_GROUPING_TYPE_LAST. - - The name parameter is used to identify the subscription and is specific - to the selected namespace. If the namespace parameter is - SUBSCR_NAMESPACE_DBCHANGE then the name is optional and can be any - value. If the namespace parameter is SUBSCR_NAMESPACE_AQ, however, the - name must be in the format '' for single consumer queues - and ':' for multiple consumer queues, and + The ``qos`` parameter specifies quality of service options. It should + be one or more of the following flags, OR'ed together: + :data:`oracledb.SUBSCR_QOS_RELIABLE`, + :data:`oracledb.SUBSCR_QOS_DEREG_NFY`, + :data:`oracledb.SUBSCR_QOS_ROWIDS`, :data:`oracledb.SUBSCR_QOS_QUERY`, + :data:`oracledb.SUBSCR_QOS_BEST_EFFORT`. + + The ``ip_address`` parameter specifies the IP address (*IPv4* or + *IPv6*) in standard string notation to bind for callback notifications + from the database server. If not specified, the client IP address will + be determined by the Oracle Client libraries. + + The ``grouping_class`` parameter specifies what type of grouping of + notifications should take place. Currently, if set, this value can only + be set to the value :data:`oracledb.SUBSCR_GROUPING_CLASS_TIME`, which + will group notifications by the number of seconds specified in the + ``grouping_value`` parameter. The ``grouping_type`` parameter should be + one of the values :data:`oracledb.SUBSCR_GROUPING_TYPE_SUMMARY` (the + default) or :data:`oracledb.SUBSCR_GROUPING_TYPE_LAST`. + + The ``name`` parameter is used to identify the subscription and is + specific to the selected namespace. If the namespace parameter is + :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` then the name is optional + and can be any value. If the namespace parameter is + :data:`oracledb.SUBSCR_NAMESPACE_AQ`, however, the name must be in the + format '' for single consumer queues and + ':' for multiple consumer queues, and identifies the queue that will be monitored for messages. The queue name may include the schema, if needed. - The client_initiated parameter is used to determine if client initiated - connections or server initiated connections (the default) will be - established. Client initiated connections are only available in Oracle - Client 19.4 and Oracle Database 19.4 and higher. + The ``client_initiated`` parameter is used to determine if client + initiated connections or server initiated connections (the default) + will be established. Client initiated connections are only available in + Oracle Client 19.4 and Oracle Database 19.4 and higher. + + For consistency and compliance with the PEP 8 naming style, the + parameter ``ipAddress`` was renamed to ``ip_address``, the parameter + ``groupingClass`` was renamed to ``grouping_class``, the parameter + ``groupingValue`` was renamed to ``grouping_value``, the parameter + ``groupingType`` was renamed to ``grouping_type`` and the parameter + ``clientInitiated`` was renamed to ``client_initiated``. The old names + will continue to work as keyword parameters for a period of time. """ self._verify_connected() if ipAddress is not None: @@ -1150,46 +1416,43 @@ def subscribe( def suspend_sessionless_transaction(self) -> None: """ - Suspends the currently active sessionless transaction. - - This temporarily detaches the transaction from the session, - allowing it to be resumed later using its transaction_id. + Suspends the currently active sessionless transaction immediately. - Returns: - None + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. The ``timeout`` previously + passed to :meth:`begin_sessionless_transaction()` determines how long + the transaction can stay suspended before it is automatically rolled + back. """ self._verify_connected() self._impl.suspend_sessionless_transaction() - @property - def tag(self) -> str: - """ - This property initially contains the actual tag of the session that was - acquired from a pool. If the connection was not acquired from a pool or - no tagging parameters were specified (tag and matchanytag) when the - connection was acquired from the pool, this value will be None. If the - value is changed, it must be a string containing name=value pairs like - “k1=v1;k2=v2”. - - If this value is not None when the connection is released back to the - pool it will be used to retag the session. This value can be overridden - in the call to SessionPool.release(). - """ - self._verify_connected() - return self._impl.tag - - @tag.setter - def tag(self, value: str) -> None: - self._verify_connected() - self._impl.tag = value - def tpc_begin( self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ - Begins a TPC (two-phase commit) transaction with the given transaction - id. This method should be called outside of a transaction (i.e. nothing - may have executed since the last commit() or rollback() was performed). + Begins a Two-Phase Commit (TPC) on a global transaction using the + specified transaction identifier (xid). + + The ``xid`` parameter should be an object returned by the + :meth:`xid()` method. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, + :data:`oracledb.TPC_BEGIN_PROMOTE`, or + :data:`oracledb.TPC_BEGIN_RESUME`. The default is + :data:`oracledb.TPC_BEGIN_NEW`. + + The ``timeout`` parameter is the number of seconds to wait for a + transaction to become available for resumption when + :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` + parameter. When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the + ``flags`` parameter, the ``timeout`` parameter indicates the number of + seconds the transaction can be inactive before it is automatically + terminated by the system. A transaction is inactive between the time it + is detached with :meth:`tpc_end()` and the time it is resumed with + :meth:`tpc_begin()`.The default is *0* seconds. """ self._verify_connected() self._verify_xid(xid) @@ -1206,18 +1469,24 @@ def tpc_commit( self, xid: Optional[Xid] = None, one_phase: bool = False ) -> None: """ - Prepare the global transaction for commit. Return a boolean indicating - if a transaction was actually prepared in order to avoid the error - ORA-24756 (transaction does not exist). + Commits a global transaction. When called with no arguments, this + method commits a transaction previously prepared with + :meth:`tpc_begin()` and optionally prepared with :meth:`tpc_prepare()`. + If :meth:`tpc_prepare()` is not called, a single phase commit is + performed. A transaction manager may choose to do this if only a single + resource is participating in the global transaction. - When called with no arguments, commits a transaction previously - prepared with tpc_prepare(). If tpc_prepare() is not called, a single - phase commit is performed. A transaction manager may choose to do this - if only a single resource is participating in the global transaction. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. This form should be called outside of a + transaction and is intended for use in recovery. - When called with a transaction id, the database commits the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + The ``one_phase`` parameter is a boolean identifying whether to perform + a one-phase or two-phase commit. If ``one_phase`` parameter is *True*, + a single-phase commit is performed. The default value is *False*. This + parameter is only examined if a value is provided for the ``xid`` + parameter. Otherwise, the driver already knows whether + :meth:`tpc_prepare()` was called for the transaction and whether a + one-phase or two-phase commit is required. """ self._verify_connected() if xid is not None: @@ -1228,7 +1497,21 @@ def tpc_end( self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ - Ends (detaches from) a TPC (two-phase commit) transaction. + Ends or suspends work on a global transaction. This function is only + intended for use by transaction managers. + + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If no xid parameter is passed, then the + transaction identifier used by the previous :meth:`tpc_begin()` is + used. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. + The default is :data:`oracledb.TPC_END_NORMAL`. + + If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction + may be resumed later by calling :meth:`tpc_begin()` with the flag + :data:`oracledb.TPC_BEGIN_RESUME`. """ self._verify_connected() if xid is not None: @@ -1239,7 +1522,12 @@ def tpc_end( def tpc_forget(self, xid: Xid) -> None: """ - Forgets a TPC (two-phase commit) transaction. + Causes the database to forget a heuristically completed TPC + transaction. This function is only intended to be called by + transaction managers. + + The ``xid`` parameter is mandatory and should be an object should be + returned by the :meth:`xid()` function. """ self._verify_connected() self._verify_xid(xid) @@ -1247,13 +1535,18 @@ def tpc_forget(self, xid: Xid) -> None: def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: """ - Prepares a global transaction for commit. After calling this function, - no further activity should take place on this connection until either - tpc_commit() or tpc_rollback() have been called. + Prepares a two-phase transaction for commit. After this function is + called, no further activity should take place on this connection until + either :meth:`tpc_commit()` or :meth:`tpc_rollback()` have been called. + + Returns a boolean indicating whether a commit is needed or not. If you + attempt to commit when not needed, then it results in the error + ``ORA-24756: transaction does not exist``. - A boolean is returned indicating whether a commit is needed or not. If - a commit is performed when one is not needed the error ORA-24756: - transaction does not exist is raised. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If an ``xid`` parameter is not passed, then + the transaction identifier used by the previous :meth:`tpc_begin()` is + used. """ self._verify_connected() if xid is not None: @@ -1262,11 +1555,13 @@ def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: def tpc_recover(self) -> list: """ - Returns a list of pending transaction ids suitable for use with - tpc_commit() or tpc_rollback(). + Returns a list of pending transaction identifiers that require + recovery. Objects of type ``Xid`` (as returned by the + :meth:`xid()` function) are returned and these can be passed to + :meth:`tpc_commit()` or :meth:`tpc_rollback()` as needed. - This function requires select privilege on the view - DBA_PENDING_TRANSACTIONS. + This function queries the DBA_PENDING_TRANSACTIONS view and requires + "SELECT" privilege on that view. """ with self.cursor() as cursor: cursor.execute( @@ -1282,12 +1577,14 @@ def tpc_recover(self) -> list: def tpc_rollback(self, xid: Optional[Xid] = None) -> None: """ - When called with no arguments, rolls back the transaction previously - started with tpc_begin(). + If an ``xid`` parameter is not passed, then it rolls back the + transaction that was previously started with + :meth:`tpc_begin()`. - When called with a transaction id, the database rolls back the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + If an ``xid`` parameter is passed, then an object should be returned by + :meth:`xid()` and the specified transaction is rolled back. This form + should be called outside of a transaction and is intended for use in + recovery. """ self._verify_connected() if xid is not None: @@ -1297,9 +1594,9 @@ def tpc_rollback(self, xid: Optional[Xid] = None) -> None: def unsubscribe(self, subscr: Subscription) -> None: """ Unsubscribe from events in the database that were originally subscribed - to using subscribe(). The connection used to unsubscribe should be the - same one used to create the subscription, or should access the same - database and be connected as the same user name. + to using :meth:`subscribe()`. The connection used to unsubscribe should + be the same one used to create the subscription, or should access the + same database and be connected as the same user name. """ self._verify_connected() if not isinstance(subscr, Subscription): @@ -1429,6 +1726,10 @@ def __await__(self): return coroutine.__await__() async def __aenter__(self): + """ + The entry point for the asynchronous connection as a context manager. + It returns itself. + """ if self._connect_coroutine is not None: await self._connect_coroutine else: @@ -1436,6 +1737,11 @@ async def __aenter__(self): return self async def __aexit__(self, *exc_info): + """ + The exit point for the asynchronous connection as a context manager. + This will close the connection and roll back any uncommitted + transaction. + """ if self._impl is not None: await self._close() @@ -1538,19 +1844,33 @@ async def begin_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Begins a new sessionless transaction. - - Parameters: - transaction_id (str or bytes, optional): A Transaction Identifier. - If None, a random transaction_id will be generated. - timeout (int, optional): Timeout value in seconds. - Must be a positive integer. Defaults to 60 if not provided. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used for the transaction. + Begins a new sessionless transaction. This method returns the + transaction identifier specified by the user or generated by + python-oracledb. + + The ``transaction_id`` parameter should be of type string or bytes. If + specified, it represents a unique identifier for the transaction. If a + string is passed, then it will be UTF-8 encoded to bytes. If this value + is not specified, then python-oracledb generates a a random + `universally-unique identifier (UUID) `__ value when this function is called. An example is + "36b8f84d-df4e-4d49-b662-bcde71a8764f". The user-chosen value cannot + exceed 64 bytes in length. + + The ``timeout`` parameter is the number of seconds that this + transaction can stay suspended when + :meth:`suspend_sessionless_transaction()` is later called, or if the + transaction is automatically suspended when the ``suspend_on_success`` + parameter is set to to *True* in :meth:`AsyncCursor.execute()` or + :meth:`AsyncCursor.executemany()`. The default value is *60* seconds. + If a transaction is not resumed within this specified duration, the + transaction will be rolled back. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to start a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() normalized_txnid = normalize_sessionless_transaction_id(transaction_id) @@ -1571,10 +1891,10 @@ async def callfunc( keyword_parameters: Optional[dict] = None, ) -> Any: """ - Call a PL/SQL function with the given name. + Calls a PL/SQL function with the given name. - This is a shortcut for creating a cursor, calling the stored function - with the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.callfunc()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: return await cursor.callfunc( @@ -1588,10 +1908,10 @@ async def callproc( keyword_parameters: Optional[dict] = None, ) -> list: """ - Call a PL/SQL procedure with the given name. + Calls a PL/SQL procedure with the given name. - This is a shortcut for creating a cursor, calling the stored procedure - with the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.callproc()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: return await cursor.callproc(name, parameters, keyword_parameters) @@ -1623,7 +1943,7 @@ async def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> AsyncLOB: """ - Create and return a new temporary LOB of the specified type. + Creates and returns a new temporary LOB of the specified type. """ self._verify_connected() if lob_type not in (DB_TYPE_CLOB, DB_TYPE_NCLOB, DB_TYPE_BLOB): @@ -1640,7 +1960,8 @@ async def createlob( def cursor(self, scrollable: bool = False) -> AsyncCursor: """ - Returns a cursor associated with the connection. + Returns an :ref:`AsyncCursor object ` associated with + the connection. """ self._verify_connected() return AsyncCursor(self, scrollable) @@ -1651,10 +1972,10 @@ async def execute( parameters: Optional[Union[list, tuple, dict]] = None, ) -> None: """ - Execute a statement against the database. + Executes a statement against the database. - This is a shortcut for creating a cursor, executing a statement with - the cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.execute()`, and then :meth:`AsyncCursor.close()` """ with self.cursor() as cursor: await cursor.execute(statement, parameters) @@ -1663,12 +1984,22 @@ async def executemany( self, statement: Union[str, None], parameters: Any ) -> None: """ - Prepare a statement for execution against a database and then execute - it against all parameter mappings or sequences found in the sequence - parameters. + Executes a SQL statement once using all bind value mappings or + sequences found in the sequence parameters. This can be used to insert, + update, or delete multiple rows in a table with a single + python-oracledb call. It can also invoke a PL/SQL procedure multiple + times. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one bind variable placeholder in ``statement``. It can + also be a list of dictionaries, where the keys match the bind variable + placeholder names in ``statement``. If there are no bind values, or + values have previously been bound, the ``parameters`` value can be an + integer specifying the number of iterations. - This is a shortcut for creating a cursor, calling executemany() on the - cursor and then closing the cursor. + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.executemany()`, and then + :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: await cursor.executemany(statement, parameters) @@ -1681,8 +2012,15 @@ async def fetchall( rowfactory: Optional[Callable] = None, ) -> list: """ - Executes a query and returns all of the rows. After the rows are - fetched, the cursor is closed. + Executes a query and returns all of the rows. + + The default value for ``arraysize`` is :attr:`defaults.arraysize`. + + Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set + to the value of the explicit or default ``arraysize`` parameter value. + + This is a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchall()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: if arraysize is not None: @@ -1697,9 +2035,22 @@ async def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, - ): + ) -> DataFrame: """ - Fetch all data as an instance of DataFrame. + Fetches all rows of the SQL query ``statement``, returning them in a + :ref:`DataFrame ` object. An empty DataFrame is + returned if there are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``arraysize`` parameter can be specified to tune performance of + fetching data across the network. It defaults to + :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s + :attr:`Cursor.prefetchrows` size is always set to the value of the + explicit or default ``arraysize`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1714,9 +2065,23 @@ async def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, - ): + ) -> Iterator[DataFrame]: """ - Fetch data in batches. Each batch is an instance of DataFrame + This returns an iterator yielding the next ``size`` rows of the SQL + query ``statement`` in each iteration as a :ref:`DataFrame + ` object. An empty DataFrame is returned if there + are no rows available. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one :ref:`bind variable placeholder ` in + ``statement``. It can also be a list of dictionaries, where the keys + match the bind variable placeholder names in ``statement``. + + The ``size`` parameter controls the number of records fetched in each + batch. It defaults to :attr:`defaults.arraysize`. Internally, the + ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + :attr:`Cursor.prefetchrows` sizes are always set to the value of the + explicit or default ``size`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -1738,8 +2103,21 @@ async def fetchmany( rowfactory: Optional[Callable] = None, ) -> list: """ - Executes a query and returns up to the specified number of rows. After - the rows are fetched, the cursor is closed. + Executes a query and returns up to the specified number of rows. + + The default value for ``num_rows`` is the value of + :attr:`defaults.arraysize`. + + Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set + to the value of the explicit or default ``num_rows`` parameter, + allowing all rows to be fetched in one :ref:`round-trip ` + + Since only one fetch is performed for a query, consider adding a + ``FETCH NEXT`` clause to the statement to prevent the database + processing rows that will never be fetched, see :ref:`rowlimit`. + + This a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchmany()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: if num_rows is None: @@ -1759,8 +2137,18 @@ async def fetchone( ) -> Any: """ Executes a query and returns the first row of the result set if one - exists (or None if no rows exist). After the row is fetched the cursor - is closed. + exists (or *None* if no rows exist). + + Internally, this method's :attr:`Cursor.prefetchrows` and + :attr:`Cursor.arraysize` sizes will be set to *1*. + + Since only one fetch is performed for a query, consider adding a + ``WHERE`` condition or using a ``FETCH NEXT`` clause in the statement + to prevent the database processing rows that will never be fetched, see + :ref:`rowlimit`. + + This a shortcut for calling :meth:`cursor()`, + :meth:`AsyncCursor.fetchone()`, and then :meth:`AsyncCursor.close()`. """ with self.cursor() as cursor: cursor.prefetchrows = cursor.arraysize = 1 @@ -1770,8 +2158,9 @@ async def fetchone( async def gettype(self, name: str) -> DbObjectType: """ - Return a type object given its name. This can then be used to create - objects which can be bound to cursors created by this connection. + Returns a :ref:`type object ` given its name. This can + then be used to create objects which can be bound to cursors created by + this connection. """ self._verify_connected() obj_type_impl = await self._impl.get_type(self, name) @@ -1779,7 +2168,7 @@ async def gettype(self, name: str) -> DbObjectType: async def ping(self) -> None: """ - Pings the database to verify the connection is valid. + Pings the database to verify if the connection is valid. """ self._verify_connected() await self._impl.ping() @@ -1791,22 +2180,38 @@ async def resume_sessionless_transaction( defer_round_trip: bool = False, ) -> bytes: """ - Resumes an existing sessionless transaction using the given - transaction_id. - - Parameters: - transaction_id (str or bytes): A Transaction Identifier that - uniquely identifies the sessionless transaction to be - resumed. This parameter is mandatory. - timeout (int, optional): Timeout in seconds for the resumed - transaction. Must be a positive integer. Defaults to 60. - defer_round_trip (bool, optional): - If True, the request is not sent immediately but included - with the next database operation. - - Returns: - bytes: The normalized transaction_id used to resume the - sessionless transaction. + Resumes an existing sessionless transaction using the specified + transaction identifier. This method returns the transaction identifier + used to resume the sessionless transaction. + + The ``transaction_id`` parameter should be a string or bytes value that + uniquely identifies an existing sessionless transaction that is to be + resumed. + + The ``timeout`` parameter is the number of seconds that the current + connection waits to resume a transaction if another connection is using + it. When ``defer_round_trip`` is set to *False*, the wait happens in + the ``resume_sessionless_transaction()`` call itself, and the function + blocks until the transaction becomes available or the timeout expires. + When ``defer_round_trip`` is set to *True*, the resume is deferred and + the wait occurs at the time of the next database operation instead. At + the start of the wait period, if the transaction is not in use by any + other connection, the resume happens immediately. If the transaction + remains in use by the other connection after the timeout period, the + error `ORA-25351 + `__ is raised. If + another connection completes the transaction, the error `ORA-24756 + `__ is raised. + These error messages are only thrown for non-RAC instances. For + information on using Oracle RAC, see :ref:`Sessionless Transactions + with Oracle RAC `. The default value is *60* + seconds. + + The ``defer_round_trip`` parameter is a boolean that determines whether + the request to resume a transaction is to be sent immediately or with + the next database operation. If set to *False*, the request is sent + immediately. If set to *True*, the request is included with the next + database operation on the connection. The default value is *False*. """ self._verify_connected() if transaction_id is None: @@ -1835,16 +2240,19 @@ async def run_pipeline( continue_on_error: bool = False, ) -> list: """ - Runs all of the operations in the pipeline on the connection. If the - database is Oracle Database 23ai or higher, the operations will be - performed in a single round trip, subject to the following caveats: - - queries that contain LOBs require an additional round trip - - queries that contain DbObject values may require multiple round - trips - - queries that fetch all of the rows may require multiple round - trips - For all other databases, the operations will be performed in the same - way as they would be performed independently of the pipeline. + Runs all of the operations in the :ref:`pipeline ` and + returns a list of :ref:`PipelineOpResult Objects + `, each entry corresponding to an operation + executed in the pipeline. + + The ``continue_on_error`` parameter determines whether operations + should continue to run after an error has occurred. If this parameter + is set to *True*, then the :attr:`PipelineOpResult.error` attribute + will be populated with an :ref:`_Error ` instance which + identifies the error that occurred. If this parameter is set to + *False*, then an exception will be raised as soon as an error is + detected and all subsequent operations will be terminated. The default + value is *False*. """ self._verify_connected() results = [op._create_result() for op in pipeline.operations] @@ -1860,13 +2268,14 @@ async def run_pipeline( async def suspend_sessionless_transaction(self) -> None: """ - Suspends the currently active sessionless transaction. - - This temporarily detaches the transaction from the session, - allowing it to be resumed later using its transaction_id. + Suspends the currently active sessionless transaction immediately. - Returns: - None + This detaches the transaction from the connection, allowing it to be + resumed later with the transaction identifier that was specified during + creation of the sessionless transaction. The ``timeout`` previously + passed to :meth:`AsyncConnection.begin_sessionless_transaction()` + determines how long the transaction can stay suspended before it is + automatically rolled back. """ self._verify_connected() await self._impl.suspend_sessionless_transaction() @@ -1875,9 +2284,28 @@ async def tpc_begin( self, xid: Xid, flags: int = oracledb.TPC_BEGIN_NEW, timeout: int = 0 ) -> None: """ - Begins a TPC (two-phase commit) transaction with the given transaction - id. This method should be called outside of a transaction (i.e. nothing - may have executed since the last commit() or rollback() was performed). + Begins a Two-Phase Commit (TPC) on a global transaction using the + specified transaction identifier (xid). + + The ``xid`` parameter should be an object returned by the + :meth:`xid()` method. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_BEGIN_JOIN`, :data:`oracledb.TPC_BEGIN_NEW`, + :data:`oracledb.TPC_BEGIN_PROMOTE`, or + :data:`oracledb.TPC_BEGIN_RESUME`. The default is + :data:`oracledb.TPC_BEGIN_NEW`. + + The ``timeout`` parameter is the number of seconds to wait for a + transaction to become available for resumption when + :data:`~oracledb.TPC_BEGIN_RESUME` is specified in the ``flags`` + parameter. When :data:`~oracledb.TPC_BEGIN_NEW` is specified in the + ``flags`` parameter, the ``timeout`` parameter indicates the number of + seconds the transaction can be inactive before it is automatically + terminated by the system. A transaction is inactive between the time it + is detached with :meth:`AsyncConnection.tpc_end()` and the time it is + resumed with :meth:`AsyncConnection.tpc_begin()`.The default is *0* + seconds. """ self._verify_connected() self._verify_xid(xid) @@ -1894,18 +2322,25 @@ async def tpc_commit( self, xid: Optional[Xid] = None, one_phase: bool = False ) -> None: """ - Prepare the global transaction for commit. Return a boolean indicating - if a transaction was actually prepared in order to avoid the error - ORA-24756 (transaction does not exist). + Commits a global transaction. When called with no arguments, this + method commits a transaction previously prepared with + :meth:`~AsyncConnection.tpc_begin()` and optionally prepared with + :meth:`~AsyncConnection.tpc_prepare()`. If + :meth:`~AsyncConnection.tpc_prepare()` is not called, a single phase + commit is performed. A transaction manager may choose to do this if + only a single resource is participating in the global transaction. - When called with no arguments, commits a transaction previously - prepared with tpc_prepare(). If tpc_prepare() is not called, a single - phase commit is performed. A transaction manager may choose to do this - if only a single resource is participating in the global transaction. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`~Connection.xid()` function. This form should be called + outside of a transaction and is intended for use in recovery. - When called with a transaction id, the database commits the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + The ``one_phase`` parameter is a boolean identifying whether to perform + a one-phase or two-phase commit. If ``one_phase`` parameter is *True*, + a single-phase commit is performed. The default value is *False*. This + parameter is only examined if a value is provided for the ``xid`` + parameter. Otherwise, the driver already knows whether + :meth:`tpc_prepare()` was called for the transaction and whether a + one-phase or two-phase commit is required. """ self._verify_connected() if xid is not None: @@ -1916,7 +2351,21 @@ async def tpc_end( self, xid: Optional[Xid] = None, flags: int = oracledb.TPC_END_NORMAL ) -> None: """ - Ends (detaches from) a TPC (two-phase commit) transaction. + Ends or suspends work on a global transaction. This function is only + intended for use by transaction managers. + + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`~Connection.xid()` function. If no xid parameter is passed, + then the transaction identifier used by the previous + :meth:`~Connection.tpc_begin()` is used. + + The ``flags`` parameter is one of the constants + :data:`oracledb.TPC_END_NORMAL` or :data:`oracledb.TPC_END_SUSPEND`. + The default is :data:`oracledb.TPC_END_NORMAL`. + + If the flag is :data:`oracledb.TPC_END_SUSPEND` then the transaction + may be resumed later by calling :meth:`AsyncConnection.tpc_begin()` + with the flag :data:`oracledb.TPC_BEGIN_RESUME`. """ self._verify_connected() if xid is not None: @@ -1927,7 +2376,12 @@ async def tpc_end( async def tpc_forget(self, xid: Xid) -> None: """ - Forgets a TPC (two-phase commit) transaction. + Causes the database to forget a heuristically completed TPC + transaction. This function is only intended to be called by + transaction managers. + + The ``xid`` parameter is mandatory and should be an object should be + returned by the :meth:`xid()` function. """ self._verify_connected() self._verify_xid(xid) @@ -1935,13 +2389,18 @@ async def tpc_forget(self, xid: Xid) -> None: async def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: """ - Prepares a global transaction for commit. After calling this function, - no further activity should take place on this connection until either - tpc_commit() or tpc_rollback() have been called. + Prepares a two-phase transaction for commit. After this function is + called, no further activity should take place on this connection until + either :meth:`tpc_commit()` or :meth:`tpc_rollback()` have been called. + + Returns a boolean indicating whether a commit is needed or not. If you + attempt to commit when not needed, then it results in the error + ``ORA-24756: transaction does not exist``. - A boolean is returned indicating whether a commit is needed or not. If - a commit is performed when one is not needed the error ORA-24756: - transaction does not exist is raised. + If an ``xid`` parameter is passed, then an object should be returned by + the :meth:`xid()` function. If an ``xid`` parameter is not passed, then + the transaction identifier used by the previous :meth:`tpc_begin()` is + used. """ self._verify_connected() if xid is not None: @@ -1950,11 +2409,13 @@ async def tpc_prepare(self, xid: Optional[Xid] = None) -> bool: async def tpc_recover(self) -> list: """ - Returns a list of pending transaction ids suitable for use with - tpc_commit() or tpc_rollback(). + Returns a list of pending transaction identifiers that require + recovery. Objects of type ``Xid`` (as returned by the + :meth:`~Connection.xid()` function) are returned and these can be + passed to :meth:`tpc_commit()` or :meth:`tpc_rollback()` as needed. - This function requires select privilege on the view - DBA_PENDING_TRANSACTIONS. + This function queries the view ``DBA_PENDING_TRANSACTIONS`` and + requires ``SELECT`` privilege on that view. """ with self.cursor() as cursor: await cursor.execute( @@ -1970,12 +2431,16 @@ async def tpc_recover(self) -> list: async def tpc_rollback(self, xid: Optional[Xid] = None) -> None: """ - When called with no arguments, rolls back the transaction previously - started with tpc_begin(). + Rolls back a global transaction. + + If an ``xid`` parameter is not passed, then it rolls back the + transaction that was previously started with + :meth:`~AsyncConnection.tpc_begin()`. - When called with a transaction id, the database rolls back the given - transaction. This form should be called outside of a transaction and is - intended for use in recovery. + If an ``xid`` parameter is passed, then an object should be returned by + :meth:`~Connection.xid()` and the specified transaction is rolled back. + This form should be called outside of a transaction and is intended for + use in recovery. """ self._verify_connected() if xid is not None: From 141caea295767ee681ace6a42680aae04dc859bf Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 11:13:43 -0600 Subject: [PATCH 169/239] See if additional requirements make autodoc happy! --- doc/requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/requirements.txt b/doc/requirements.txt index dcc8e5c8..3ce2ca6d 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,2 +1,4 @@ sphinx>=4.2.0 sphinx-rtd-theme>=0.5.2 +oracledb +cryptography From a79c0fffe0131d6708bca697dc0a0d889da7f7d7 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 11:19:09 -0600 Subject: [PATCH 170/239] Use a build of the current code, not an installation of the last release. --- .readthedocs.yaml | 2 ++ doc/requirements.txt | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 8ecaf936..a4975a0b 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -14,3 +14,5 @@ sphinx: python: install: - requirements: doc/requirements.txt + - method: pip + path: . diff --git a/doc/requirements.txt b/doc/requirements.txt index 3ce2ca6d..dcc8e5c8 100644 --- a/doc/requirements.txt +++ b/doc/requirements.txt @@ -1,4 +1,2 @@ sphinx>=4.2.0 sphinx-rtd-theme>=0.5.2 -oracledb -cryptography From da16c31f3c8c4234ee28844e702c0920e896a9c2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 13:57:26 -0600 Subject: [PATCH 171/239] Tell ReadTheDocs to include submodules. --- .readthedocs.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index a4975a0b..081fa252 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,6 +5,9 @@ build: os: ubuntu-20.04 tools: python: "3.9" + submodules: + include: all + recursive: true # Build documentation in the doc/src directory with Sphinx sphinx: From de8f3ad96534c466681db654e2de549a519e782f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Sat, 2 Aug 2025 14:00:32 -0600 Subject: [PATCH 172/239] Fix configuration. --- .readthedocs.yaml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index 081fa252..1a6c05d2 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -5,9 +5,11 @@ build: os: ubuntu-20.04 tools: python: "3.9" - submodules: - include: all - recursive: true + +# ensure that the submodules are included +submodules: + include: all + recursive: true # Build documentation in the doc/src directory with Sphinx sphinx: From 869a887819cdac7fcd610f9d9d463ade49ea7de6 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 5 Aug 2025 12:25:26 -0600 Subject: [PATCH 173/239] Update remaining API documentation to use autodoc. --- doc/README.md | 49 +- doc/src/api_manual/aq.rst | 321 +- doc/src/api_manual/async_aq.rst | 103 +- doc/src/api_manual/async_connection.rst | 5 +- doc/src/api_manual/async_connection_pool.rst | 193 +- doc/src/api_manual/async_cursor.rst | 5 + doc/src/api_manual/async_lob.rst | 93 +- doc/src/api_manual/connect_params.rst | 486 +-- doc/src/api_manual/connection_pool.rst | 239 +- doc/src/api_manual/dataframe.rst | 83 +- doc/src/api_manual/dbobject_type.rst | 228 +- doc/src/api_manual/defaults.rst | 148 +- doc/src/api_manual/fetch_info.rst | 145 +- doc/src/api_manual/lob.rst | 88 +- doc/src/api_manual/module.rst | 2756 +----------------- doc/src/api_manual/pipeline.rst | 242 +- doc/src/api_manual/pool_params.rst | 221 +- doc/src/api_manual/soda.rst | 609 +--- doc/src/api_manual/sparse_vector.rst | 27 +- doc/src/api_manual/subscription.rst | 258 +- doc/src/api_manual/variable.rst | 94 +- doc/src/conf.py | 5 +- doc/src/release_notes.rst | 2 + doc/src/user_guide/appendix_a.rst | 2 + doc/src/user_guide/appendix_b.rst | 2 + doc/src/user_guide/appendix_c.rst | 2 + doc/src/user_guide/appendix_d.rst | 2 + doc/src/user_guide/aq.rst | 2 + doc/src/user_guide/asyncio.rst | 2 + doc/src/user_guide/batch_statement.rst | 2 + doc/src/user_guide/bind.rst | 2 + doc/src/user_guide/connection_handling.rst | 2 + doc/src/user_guide/cqn.rst | 2 + doc/src/user_guide/dataframes.rst | 2 + doc/src/user_guide/exception_handling.rst | 2 + doc/src/user_guide/extending.rst | 2 + doc/src/user_guide/globalization.rst | 2 + doc/src/user_guide/ha.rst | 2 + doc/src/user_guide/initialization.rst | 2 + doc/src/user_guide/installation.rst | 2 + doc/src/user_guide/introduction.rst | 2 + doc/src/user_guide/json_data_type.rst | 2 + doc/src/user_guide/lob_data.rst | 2 + doc/src/user_guide/plsql_execution.rst | 2 + doc/src/user_guide/soda.rst | 2 + doc/src/user_guide/sql_execution.rst | 2 + doc/src/user_guide/startup.rst | 2 + doc/src/user_guide/tracing.rst | 2 + doc/src/user_guide/troubleshooting.rst | 2 + doc/src/user_guide/tuning.rst | 210 +- doc/src/user_guide/two_phase_commit.rst | 2 + doc/src/user_guide/txn_management.rst | 2 + doc/src/user_guide/vector_data_type.rst | 2 + doc/src/user_guide/xml_data_type.rst | 2 + src/oracledb/__init__.py | 35 +- src/oracledb/aq.py | 242 +- src/oracledb/connect_params.py | 73 +- src/oracledb/connection.py | 47 +- src/oracledb/constructors.py | 42 +- src/oracledb/cursor.py | 33 +- src/oracledb/dataframe.py | 22 +- src/oracledb/dbobject.py | 61 +- src/oracledb/defaults.py | 132 +- src/oracledb/driver_mode.py | 23 +- src/oracledb/dsn.py | 5 +- src/oracledb/fetch_info.py | 101 +- src/oracledb/lob.py | 73 +- src/oracledb/pipeline.py | 193 +- src/oracledb/pool.py | 455 +-- src/oracledb/pool_params.py | 5 +- src/oracledb/soda.py | 275 +- src/oracledb/sparse_vector.py | 11 + src/oracledb/subscr.py | 179 +- src/oracledb/utils.py | 233 +- src/oracledb/var.py | 38 +- utils/fields.cfg | 2 +- utils/templates/connect_params.py | 66 +- utils/templates/connection.py | 43 +- utils/templates/pool.py | 451 +-- 79 files changed, 2890 insertions(+), 6620 deletions(-) diff --git a/doc/README.md b/doc/README.md index e65f3f0d..400cba0f 100644 --- a/doc/README.md +++ b/doc/README.md @@ -1,34 +1,41 @@ -Sphinx is used to generate the HTML for the python-oracledb documentation. +# python-oracledb Documentation Source -The generated python-oracledb documentation is at -https://python-oracledb.readthedocs.io/ +This directory contains the python-oracledb documentation source. It is written +using reST (re-Structured Text). The source files are processed using +[Sphinx](http://www.sphinx-doc.org) and can be turned into HTML, PDF or ePub +documentation. -This directory contains the documentation source. It is written using reST -(re-Structured Text). The source files are processed using Sphinx and can be -turned into HTML, PDF or ePub documentation. +**Visit https://python-oracledb.readthedocs.io/ for pre-built production and +development python-oracledb documentation** -If you wish to build documentation yourself, install Sphinx and the Read the -Docs theme. Sphinx is available on many Linux distributions as a pre-built -package. You can also install Sphinx and the Read the Docs theme using the -Python package manager "pip", for example:: +## Building Documentation Locally - python -m pip install -r requirements.txt +To build the documentation locally: -For more information on Sphinx, please visit this page: +1. Install Sphinx and the Read the Docs theme using the Python package manager + ``pip``, for example: -http://www.sphinx-doc.org + python -m pip install -r requirements.txt -Once Sphinx is installed, the supplied Makefile can be used to build the -different targets, for example to build the HTML documentation, run:: + You can alternatively install these from pre-built packages for your + operating system. - make +2. The "oracledb" module must be built and importable. This is needed for the + "autodoc" extension to create function signature documentation. -To make ePub documentation, run:: +3. Once Sphinx is installed, and "oracledb" can be imported by Python, use the + Makefile to build your desired documentation format. - make epub + To build the HTML documentation: -To make PDF documentation, run:: + make - make pdf + To make ePub documentation: -The program ``latexmk`` may be required by Sphinx to generate PDF output. + make epub + + To make PDF documentation: + + make pdf + + The program ``latexmk`` may be required by Sphinx to generate PDF output. diff --git a/doc/src/api_manual/aq.rst b/doc/src/api_manual/aq.rst index 6235f866..5d662480 100644 --- a/doc/src/api_manual/aq.rst +++ b/doc/src/api_manual/aq.rst @@ -1,71 +1,47 @@ .. _aq: -*************************** +************************** API: Advanced Queuing (AQ) -*************************** +************************** + +.. currentmodule:: oracledb See :ref:`aqusermanual` for more information about using AQ in python-oracledb. .. _queue: -Queue Objects -============= +Queue Class +=========== + +.. autoclass:: Queue -These objects are created using the :meth:`Connection.queue()` method and are -used to enqueue and dequeue messages. + A Queue object should be created using :meth:`Connection.queue()` and is + used to enqueue and dequeue messages. -.. dbapiobjectextension:: + .. dbapiobjectextension:: Queue Methods ------------- -.. method:: Queue.deqmany(max_num_messages) - - Dequeues up to the specified number of messages from the queue and returns - a list of these messages. Each element of the returned list is a - :ref:`message property ` object. +.. automethod:: Queue.deqmany For consistency and compliance with the PEP 8 naming style, the name of the method was changed from `deqMany()`. The old name will continue to work for a period of time. -.. method:: Queue.deqone() - - Dequeues at most one message from the queue. If a message is dequeued, it - will be a :ref:`message property ` object; otherwise, it will - be the value *None*. +.. automethod:: Queue.deqone For consistency and compliance with the PEP 8 naming style, the name of the method was changed from `deqOne()`. The old name will continue to work for a period of time. -.. method:: Queue.enqmany(messages) - - Enqueues multiple messages into the queue. The ``messages`` parameter must - be a sequence containing :ref:`message property ` objects - which have all had their payload attribute set to a value that the queue - supports. - - .. warning:: - - In python-oracledb Thick mode using Oracle Client libraries prior to - 21c, calling :meth:`Queue.enqmany()` in parallel on different - connections acquired from the same connection pool may fail due to - Oracle bug 29928074. To avoid this, do one of: upgrade the client - libraries, ensure that :meth:`Queue.enqmany()` is not run in parallel, - use standalone connections or connections from different pools, or make - multiple calls to :meth:`Queue.enqone()`. The function - :meth:`Queue.deqmany()` call is not affected. +.. automethod:: Queue.enqmany For consistency and compliance with the PEP 8 naming style, the name of the method was changed from `enqMany()`. The old name will continue to work for a period of time. -.. method:: Queue.enqone(message) - - Enqueues a single message into the queue. The message must be a - :ref:`message property` object which has had its payload - attribute set to a value that the queue supports. +.. automethod:: Queue.enqone For consistency and compliance with the PEP 8 naming style, the name of the method was changed from `enqOne()`. The old name will continue @@ -74,273 +50,124 @@ Queue Methods Queue Attributes ---------------- -.. attribute:: Queue.connection +.. autoproperty:: Queue.connection - This read-only attribute returns a reference to the connection object on - which the queue was created. - -.. attribute:: Queue.deqoptions - - This read-only attribute returns a reference to the :ref:`options - ` that will be used when dequeuing messages from the queue. +.. autoproperty:: Queue.deqoptions For consistency and compliance with the PEP 8 naming style, the name of the attribute was changed from ``deqOptions``. The old name will continue to work for a period of time. -.. attribute:: Queue.enqoptions - - This read-only attribute returns a reference to the :ref:`options - ` that will be used when enqueuing messages into the queue. +.. autoproperty:: Queue.enqoptions For consistency and compliance with the PEP 8 naming style, the name of the attribute was changed from ``enqOptions``. The old name will continue to work for a period of time. -.. attribute:: Queue.name - - This read-only attribute returns the name of the queue. +.. autoproperty:: Queue.name -.. attribute:: Queue.payload_type - - This read-only attribute returns the object type for payloads that can be - enqueued and dequeued. If using a JSON queue, this returns the value - ``"JSON"``. If using a raw queue, this returns the value *None*. +.. autoproperty:: Queue.payload_type For consistency and compliance with the PEP 8 naming style, the name of the attribute was changed from ``payloadType``. The old name will continue to work for a period of time. - .. _deqoptions: -Dequeue Options -=============== - -These objects are used to configure how messages are dequeued from queues. -An instance of this object is found in the attribute :attr:`Queue.deqOptions`. - -.. dbapiobjectextension:: - -.. attribute:: DeqOptions.condition - - This read-write attribute specifies a boolean expression similar to the where - clause of a SQL query. The boolean expression can include conditions on message - properties, user data properties and PL/SQL or SQL functions. The default - is to have no condition specified. - - -.. attribute:: DeqOptions.consumername - - This read-write attribute specifies the name of the consumer. Only messages - matching the consumer name will be accessed. If the queue is not set up for - multiple consumers this attribute should not be set. The default is to have - no consumer name specified. - - -.. attribute:: DeqOptions.correlation - - This read-write attribute specifies the correlation identifier of the message - to be dequeued. Special pattern-matching characters, such as the percent sign (%) - and the underscore (_), can be used. If multiple messages satisfy the - pattern, the order of dequeuing is indeterminate. The default is to have no - correlation specified. - - -.. attribute:: DeqOptions.deliverymode - - This write-only attribute specifies what types of messages should be - dequeued. It should be one of the values :data:`~oracledb.MSG_PERSISTENT` - (default), :data:`~oracledb.MSG_BUFFERED` or - :data:`~oracledb.MSG_PERSISTENT_OR_BUFFERED`. - - -.. attribute:: DeqOptions.mode +DeqOptions Class +================ - This read-write attribute specifies the locking behaviour associated - with the dequeue operation. It should be one of the values - :data:`~oracledb.DEQ_BROWSE`, - :data:`~oracledb.DEQ_LOCKED`, - :data:`~oracledb.DEQ_REMOVE` (default), or - :data:`~oracledb.DEQ_REMOVE_NODATA`. +.. autoclass:: DeqOptions + A DeqOptions object is used to configure how messages are dequeued + from queues. An instance of this object is found in the attribute + :attr:`Queue.deqoptions`. -.. attribute:: DeqOptions.msgid + .. dbapiobjectextension:: - This read-write attribute specifies the identifier of the message to be - dequeued. The default is to have no message identifier specified. +DeqOptions Attributes +--------------------- +.. autoproperty:: DeqOptions.condition -.. attribute:: DeqOptions.navigation +.. autoproperty:: DeqOptions.consumername - This read-write attribute specifies the position of the message that - is retrieved. It should be one of the values :data:`~oracledb.DEQ_FIRST_MSG`, - :data:`~oracledb.DEQ_NEXT_MSG` (default), or - :data:`~oracledb.DEQ_NEXT_TRANSACTION`. +.. autoproperty:: DeqOptions.correlation +.. autoproperty:: DeqOptions.deliverymode -.. attribute:: DeqOptions.transformation +.. autoproperty:: DeqOptions.mode - This read-write attribute specifies the name of the transformation that must - be applied after the message is dequeued from the database but before it is - returned to the calling application. The transformation must be created - using dbms_transform. The default is to have no transformation specified. +.. autoproperty:: DeqOptions.msgid +.. autoproperty:: DeqOptions.navigation -.. attribute:: DeqOptions.visibility +.. autoproperty:: DeqOptions.transformation - This read-write attribute specifies the transactional behavior of the dequeue - request. It should be one of the values :data:`~oracledb.DEQ_ON_COMMIT` (default) - or :data:`~oracledb.DEQ_IMMEDIATE`. This attribute is ignored when using - the :data:`~oracledb.DEQ_BROWSE` mode. Note the value of - :attr:`~Connection.autocommit` is always ignored. - - -.. attribute:: DeqOptions.wait - - This read-write attribute specifies the time to wait, in seconds, for a message - matching the search criteria to become available for dequeuing. One of the - values :data:`~oracledb.DEQ_NO_WAIT` or - :data:`~oracledb.DEQ_WAIT_FOREVER` can also be used. The default is - :data:`~oracledb.DEQ_WAIT_FOREVER`. +.. autoproperty:: DeqOptions.visibility +.. autoproperty:: DeqOptions.wait .. _enqoptions: -Enqueue Options -=============== - -These objects are used to configure how messages are enqueued into queues. An -instance of this object is found in the attribute :attr:`Queue.enqOptions`. - -.. dbapiobjectextension:: - -.. attribute:: EnqOptions.deliverymode - - This write-only attribute specifies what type of messages should be - enqueued. It should be one of the values :data:`~oracledb.MSG_PERSISTENT` - (default) or :data:`~oracledb.MSG_BUFFERED`. +EnqOptions Class +================ +.. autoclass:: EnqOptions -.. attribute:: EnqOptions.transformation + An EnqOptions object is used to configure how messages are enqueued into + queues. An instance of this object is found in the attribute + :attr:`Queue.enqoptions`. - This read-write attribute specifies the name of the transformation that - must be applied before the message is enqueued into the database. The - transformation must be created using dbms_transform. The default is to have - no transformation specified. + .. dbapiobjectextension:: +EnqOptions Attributes +--------------------- -.. attribute:: EnqOptions.visibility +.. autoproperty:: EnqOptions.deliverymode - This read-write attribute specifies the transactional behavior of the enqueue - request. It should be one of the values :data:`~oracledb.ENQ_ON_COMMIT` (default) - or :data:`~oracledb.ENQ_IMMEDIATE`. Note the value of - :attr:`~Connection.autocommit` is ignored. +.. autoproperty:: EnqOptions.transformation +.. autoproperty:: EnqOptions.visibility .. _msgproperties: -Message Properties -================== - -These objects are used to identify the properties of messages that are enqueued -and dequeued in queues. They are created by the method -:meth:`Connection.msgproperties()`. They are used by the methods -:meth:`Queue.enqone()` and :meth:`Queue.enqmany()` and returned by the methods -:meth:`Queue.deqone()` and :meth:`Queue.deqmany()`. - -.. dbapiobjectextension:: - -.. attribute:: MessageProperties.attempts - - This read-only attribute specifies the number of attempts that have been - made to dequeue the message. - - -.. attribute:: MessageProperties.correlation - - This read-write attribute specifies the correlation used when the message - was enqueued. - - -.. attribute:: MessageProperties.delay - - This read-write attribute specifies the number of seconds to delay an - enqueued message. Any integer is acceptable but the constant - :data:`~oracledb.MSG_NO_DELAY` can also be used indicating that the - message is available for immediate dequeuing. - - -.. attribute:: MessageProperties.deliverymode - - This read-only attribute specifies the type of message that was dequeued. - It will be one of the values :data:`~oracledb.MSG_PERSISTENT` or - :data:`~oracledb.MSG_BUFFERED`. - - -.. attribute:: MessageProperties.enqtime - - This read-only attribute specifies the time that the message was enqueued. - - -.. attribute:: MessageProperties.exceptionq - - This read-write attribute specifies the name of the queue to which the message - is moved if it cannot be processed successfully. Messages are moved if the - number of unsuccessful dequeue attempts has exceeded the maximum number of - retries or if the message has expired. All messages in the exception queue - are in the :data:`~oracledb.MSG_EXPIRED` state. The default value is the - name of the exception queue associated with the queue table. - - -.. attribute:: MessageProperties.expiration +MessageProperties Class +======================= - This read-write attribute specifies, in seconds, how long the message is - available for dequeuing. This attribute is an offset from the delay attribute. - Expiration processing requires the queue monitor to be running. Any integer is - accepted but the constant :data:`~oracledb.MSG_NO_EXPIRATION` can also be - used indicating that the message never expires. +.. autoclass:: MessageProperties + A MessageProperties object is used to identify the properties of messages + that are enqueued and dequeued in queues. They are created by the method + :meth:`Connection.msgproperties()`. They are used by the methods + :meth:`Queue.enqone()` and :meth:`Queue.enqmany()` and returned by the + methods :meth:`Queue.deqone()` and :meth:`Queue.deqmany()`. -.. attribute:: MessageProperties.msgid + .. dbapiobjectextension:: - This read-only attribute specifies the id of the message in the last queue - that enqueued or dequeued the message. If the message has never been - dequeued or enqueued, the value will be *None*. +MessageProperties Attributes +---------------------------- +.. autoproperty:: MessageProperties.attempts -.. attribute:: MessageProperties.payload +.. autoproperty:: MessageProperties.correlation - This read-write attribute identifies the payload that will be enqueued or the - payload that was dequeued when using a :ref:`queue `. When enqueuing, - the value is checked to ensure that it conforms to the type expected by that - queue. For RAW queues, the value can be a bytes object or a string. If the - value is a string it will first be converted to bytes in the encoding UTF-8. +.. autoproperty:: MessageProperties.delay +.. autoproperty:: MessageProperties.deliverymode -.. attribute:: MessageProperties.priority +.. autoproperty:: MessageProperties.enqtime - This read-write attribute specifies the priority of the message. A smaller - number indicates a higher priority. The priority can be any integer, including - negative numbers. The default value is *0*. +.. autoproperty:: MessageProperties.exceptionq +.. autoproperty:: MessageProperties.expiration -.. attribute:: MessageProperties.state +.. autoproperty:: MessageProperties.msgid - This read-only attribute specifies the state of the message at the time of - the dequeue. It will be one of the values :data:`~oracledb.MSG_WAITING`, - :data:`~oracledb.MSG_READY`, :data:`~oracledb.MSG_PROCESSED` or - :data:`~oracledb.MSG_EXPIRED`. +.. autoproperty:: MessageProperties.payload -.. attribute:: MessageProperties.recipients +.. autoproperty:: MessageProperties.priority - This read-write attribute specifies a list of recipient names that can be - associated with a message at the time of enqueuing the message. This allows a - limited set of recipients to dequeue each message. The recipient list associated - with the message overrides the queue subscriber list, if there is one. The - recipient names need not be in the subscriber list but can be, if desired. +.. autoproperty:: MessageProperties.recipients - To dequeue a message, the consumername attribute can be set to one of - the recipient names. The original message recipient list is not - available on dequeued messages. All recipients have to dequeue a - message before it gets removed from the queue. +.. autoproperty:: MessageProperties.state diff --git a/doc/src/api_manual/async_aq.rst b/doc/src/api_manual/async_aq.rst index a6df9d6c..058433dd 100644 --- a/doc/src/api_manual/async_aq.rst +++ b/doc/src/api_manual/async_aq.rst @@ -4,6 +4,8 @@ API: Async Advanced Queuing (AQ) ******************************** +.. currentmodule:: oracledb + See :ref:`aqusermanual` for more information about using AQ in python-oracledb. .. versionadded:: 3.1.0 @@ -14,105 +16,70 @@ See :ref:`aqusermanual` for more information about using AQ in python-oracledb. .. _asyncqueue: -AsyncQueue Objects -================== +AsyncQueue Class +================ + +.. autoclass:: AsyncQueue -These objects are created using the :meth:`AsyncConnection.queue()` method and -are used to enqueue and dequeue messages. + An AsyncQueue object should be created using + :meth:`AsyncConnection.queue()` and is used to enqueue and dequeue + messages. AsyncQueue Methods ------------------ -.. method:: AsyncQueue.deqmany(max_num_messages) - - Dequeues up to the specified number of messages from the queue and returns - a list of these messages. Each element of the returned list is a - :ref:`message property ` object. - -.. method:: AsyncQueue.deqone() - - Dequeues at most one message from the queue. If a message is dequeued, it - will be a :ref:`message property ` object; otherwise, - the value *None* will be returned. +.. automethod:: AsyncQueue.deqmany -.. method:: AsyncQueue.enqmany(messages) +.. automethod:: AsyncQueue.deqone - Enqueues multiple messages into the queue. The ``messages`` parameter must - be a sequence containing :ref:`message property ` objects - which have all had their payload attribute set to a value that the queue - supports. +.. automethod:: AsyncQueue.enqmany -.. method:: AsyncQueue.enqone(message) - - Enqueues a single message into the queue. The message must be a - :ref:`message property ` object which has had its - payload attribute set to a value that the queue supports. +.. automethod:: AsyncQueue.enqone AsyncQueue Attributes --------------------- -.. attribute:: AsyncQueue.connection - - This read-only attribute returns a reference to the connection object on - which the queue was created. - -.. attribute:: AsyncQueue.deqoptions - - This read-only attribute returns a reference to the :ref:`options - ` that will be used when dequeuing messages from the queue. +.. autoproperty:: AsyncQueue.connection -.. attribute:: AsyncQueue.enqoptions +.. autoproperty:: AsyncQueue.deqoptions - This read-only attribute returns a reference to the :ref:`options - ` that will be used when enqueuing messages into the queue. +.. autoproperty:: AsyncQueue.enqoptions -.. attribute:: AsyncQueue.name +.. autoproperty:: AsyncQueue.name - This read-only attribute returns the name of the queue. - -.. attribute:: AsyncQueue.payload_type - - This read-only attribute returns the object type for payloads that can be - enqueued and dequeued. If using a JSON queue, this returns the value - ``"JSON"``. If using a raw queue, this returns the value *None*. +.. autoproperty:: AsyncQueue.payload_type .. _asyncdeqoptions: -Dequeue Options -=============== - -.. note:: +DeqOptions Class +================ - These objects are used to configure how messages are dequeued from queues. - An instance of this object is found in the attribute - :attr:`AsyncQueue.deqoptions`. +A DeqOptions object is used to configure how messages are dequeued from +queues. An instance of this object is found in the attribute +:attr:`AsyncQueue.deqoptions`. See :ref:`deqoptions` for information on the supported attributes. .. _asyncenqoptions: -Enqueue Options -=============== +EnqOptions Class +================ -.. note:: - - These objects are used to configure how messages are enqueued into queues. - An instance of this object is found in the attribute - :attr:`AsyncQueue.enqoptions`. +An EnqOptions object is used to configure how messages are enqueued into +queues. An instance of this object is found in the attribute +:attr:`AsyncQueue.enqoptions`. See :ref:`enqoptions` for information on the supported attributes. .. _asyncmsgproperties: -Message Properties -================== - -.. note:: +MessageProperties Class +======================= - These objects are used to identify the properties of messages that are - enqueued and dequeued in queues. They are created by the method - :meth:`AsyncConnection.msgproperties()`. They are used by the method - :meth:`AsyncQueue.enqone()` and returned by the method - :meth:`AsyncQueue.deqone()`. +A MessageProperties object is used to identify the properties of messages +that are enqueued and dequeued in queues. They are created by the method +:meth:`AsyncConnection.msgproperties()`. They are used by the method +:meth:`AsyncQueue.enqone()` and returned by the method +:meth:`AsyncQueue.deqone()`. See :ref:`msgproperties` for information on the supported attributes. diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 58d1c4df..b4f1cb3e 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -6,6 +6,9 @@ API: AsyncConnection Objects .. currentmodule:: oracledb +AsyncConnection Class +===================== + .. autoclass:: AsyncConnection An AsyncConnection object should be created with @@ -66,7 +69,7 @@ AsyncConnection Methods .. automethod:: AsyncConnection.commit -.. automethod:: AsyncConnection.createlob(lob_type) +.. automethod:: AsyncConnection.createlob .. automethod:: AsyncConnection.cursor diff --git a/doc/src/api_manual/async_connection_pool.rst b/doc/src/api_manual/async_connection_pool.rst index c469136b..60022bf8 100644 --- a/doc/src/api_manual/async_connection_pool.rst +++ b/doc/src/api_manual/async_connection_pool.rst @@ -4,66 +4,37 @@ API: AsyncConnectionPool Objects ******************************** -An AsyncConnectionPool object can be created with -:meth:`oracledb.create_pool_async()`. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +AsyncConnectionPool Class +========================= -.. versionadded:: 2.0.0 +.. autoclass:: AsyncConnectionPool -.. note:: + An AsyncConnectionPool object should be created with + :meth:`oracledb.create_pool_async()`. - AsyncConnectionPool objects are only supported in the python-oracledb Thin - mode. + .. dbapiobjectextension:: -.. _asynconnpoolmeth: - -AsyncConnectionPool Methods -=========================== - -.. method:: AsyncConnectionPool.acquire(user=None, password=None, cclass=None, \ - purity=oracledb.PURITY_DEFAULT, tag=None, matchanytag=False, \ - shardingkey=[], supershardingkey=[]) - - Acquires a connection from the pool and returns an - :ref:`asynchronous connection object `. - - If the pool is :ref:`homogeneous `, the ``user`` and - ``password`` parameters cannot be specified. If they are, an exception will - be raised. - - The ``cclass`` parameter, if specified, should be a string corresponding to - the connection class for :ref:`drcp`. + .. versionadded:: 2.0.0 - The ``purity`` parameter is expected to be one of - :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or - :data:`~oracledb.PURITY_DEFAULT`. - - The ``tag``, ``matchanytag``, ``shardingkey``, and ``supershardingkey`` - parameters are ignored in python-oracledb Thin mode. - -.. method:: AsyncConnectionPool.close(force=False) + .. note:: - Closes the pool now, rather than when the last reference to it is - released, which makes it unusable for further work. + AsyncConnectionPool objects are only supported in the python-oracledb + Thin mode. - If any connections have been acquired and not released back to the pool, - this method will fail unless the ``force`` parameter is set to *True*. +.. _asynconnpoolmeth: -.. method:: AsyncConnectionPool.drop(connection) +AsyncConnectionPool Methods +=========================== - Drops the connection from the pool which is useful if the connection is no - longer usable (such as when the session is killed). +.. automethod:: AsyncConnectionPool.acquire -.. method:: AsyncConnectionPool.release(connection, tag=None) +.. automethod:: AsyncConnectionPool.close - Releases the connection back to the pool now. The connection will be - unusable from this point forward. An Error exception will be raised if any - operation is attempted with the connection. Any cursors or LOBs created by - the connection will also be marked unusable and an Error exception will be - raised if any operation is attempted with them. +.. automethod:: AsyncConnectionPool.drop - The ``tag`` parameter is ignored in python-oracledb Thin mode. +.. automethod:: AsyncConnectionPool.release .. note:: @@ -79,132 +50,40 @@ AsyncConnectionPool Methods AsyncConnectionPool Attributes ============================== -.. attribute:: AsyncConnectionPool.busy - - This read-only attribute returns the number of connections currently - acquired. - -.. attribute:: AsyncConnectionPool.dsn +.. autoproperty:: AsyncConnectionPool.busy - This read-only attribute returns the TNS entry of the database to which a - connection has been established. +.. autoproperty:: AsyncConnectionPool.dsn -.. attribute:: AsyncConnectionPool.getmode +.. autoproperty:: AsyncConnectionPool.getmode - This read-write attribute determines how connections are returned from the - pool. If :data:`~oracledb.POOL_GETMODE_FORCEGET` is specified, a new - connection will be returned even if there are no free connections in the - pool. :data:`~oracledb.POOL_GETMODE_NOWAIT` will raise an exception if - there are no free connections are available in the pool. If - :data:`~oracledb.POOL_GETMODE_WAIT` is specified and there are no free - connections in the pool, the caller will wait until a free connection is - available. :data:`~oracledb.POOL_GETMODE_TIMEDWAIT` uses the value of - :data:`~ConnectionPool.wait_timeout` to determine how long the caller - should wait for a connection to become available before returning an error. +.. autoproperty:: AsyncConnectionPool.homogeneous -.. attribute:: AsyncConnectionPool.homogeneous +.. autoproperty:: AsyncConnectionPool.increment - This read-only boolean attribute indicates whether the pool is considered - :ref:`homogeneous ` or not. If the pool is not homogeneous, - different authentication can be used for each connection acquired from the - pool. +.. autoproperty:: AsyncConnectionPool.max -.. attribute:: AsyncConnectionPool.increment +.. autoproperty:: AsyncConnectionPool.max_lifetime_session - This read-only attribute returns the number of connections that will be - established when additional connections need to be created. +.. autoproperty:: AsyncConnectionPool.max_sessions_per_shard -.. attribute:: AsyncConnectionPool.max +.. autoproperty:: AsyncConnectionPool.min - This read-only attribute returns the maximum number of connections that the - pool can control. +.. autoproperty:: AsyncConnectionPool.name -.. attribute:: AsyncConnectionPool.max_lifetime_session +.. autoproperty:: AsyncConnectionPool.opened - This read-write attribute is the maximum length of time (in seconds) that a - pooled connection may exist since first being created. A value of *0* means - there is no limit. Connections become candidates for termination when they - are acquired or released back to the pool, and have existed for longer than - ``max_lifetime_session`` seconds. Connections that are in active use will - not be closed. In python-oracledb Thick mode, Oracle Client libraries 12.1 - or later must be used and, prior to Oracle Client 21, cleanup only occurs - when the pool is accessed. +.. autoproperty:: AsyncConnectionPool.ping_interval -.. attribute:: AsyncConnectionPool.max_sessions_per_shard +.. autoproperty:: AsyncConnectionPool.soda_metadata_cache - This read-write attribute returns the number of sessions that can be - created per shard in the pool. This attribute cannot be used in - python-oracledb Thin mode. - -.. attribute:: AsyncConnectionPool.min - - This read-only attribute returns the number of connections with which the - connection pool was created and the minimum number of connections that will - be controlled by the connection pool. - -.. attribute:: AsyncConnectionPool.name - - This read-only attribute returns the name assigned to the pool by Oracle. - -.. attribute:: AsyncConnectionPool.opened - - This read-only attribute returns the number of connections currently opened - by the pool. - -.. attribute:: AsyncConnectionPool.ping_interval - - This read-write integer attribute specifies the pool ping interval in - seconds. When a connection is acquired from the pool, a check is first made - to see how long it has been since the connection was put into the pool. If - this idle time exceeds ``ping_interval``, then a :ref:`round-trip - ` ping to the database is performed. If the connection is - unusable, it is discarded and a different connection is selected to be - returned by :meth:`AsyncConnectionPool.acquire()`. Setting - ``ping_interval`` to a negative value disables pinging. Setting it to *0* - forces a ping for every :meth:`AsyncConnectionPool.acquire()` and is not - recommended. - -.. attribute:: AsyncConnectionPool.soda_metadata_cache - - This read-write boolean attribute returns whether the SODA metadata cache - is enabled or not. This attribute cannot be used in python-oracledb Thin - mode. - -.. attribute:: AsyncConnectionPool.stmtcachesize - - This read-write attribute specifies the size of the statement cache that - will be used for connections obtained from the pool. Once a connection is - created, that connection’s statement cache size can only be changed by - setting the ``stmtcachesize`` attribute on the connection itself. +.. autoproperty:: AsyncConnectionPool.stmtcachesize See :ref:`Statement Caching ` for more information. -.. attribute:: AsyncConnectionPool.thin - - This attribute returns a boolean which indicates the python-oracledb mode - in which the pool was created. If the value of this attribute is *True*, it - indicates that the pool was created in the python-oracledb Thin mode. If - the value of this attribute is *False*, it indicates that the pool was - created in the python-oracledb Thick mode. - -.. attribute:: AsyncConnectionPool.timeout - - This read-only attribute is an integer that specifies the length of time - (in seconds) that a connection may remain idle in the pool before it is - terminated. This applies only when the pool has more than ``min`` - connections open, allowing it to shrink to the specified minimum size. The - default value is *0* seconds. A value of *0* means that there is no maximum - time. - -.. attribute:: AsyncConnectionPool.username +.. autoproperty:: AsyncConnectionPool.thin - This read-only attribute returns the name of the user which established the - connection to the database. +.. autoproperty:: AsyncConnectionPool.timeout -.. attribute:: AsyncConnectionPool.wait_timeout +.. autoproperty:: AsyncConnectionPool.username - This read-write attribute specifies the time (in milliseconds) that the - caller should wait for a connection to become available in the pool before - returning with an error. This value is only used if the ``getmode`` - parameter to :meth:`oracledb.create_pool_async()` was the value - :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. +.. autoproperty:: AsyncConnectionPool.wait_timeout diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index a00b656a..73a78131 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -6,6 +6,9 @@ API: AsyncCursor Objects .. currentmodule:: oracledb +AsyncCursor Class +================= + .. autoclass:: AsyncCursor An AsyncCursor object should be created with @@ -135,6 +138,8 @@ AsyncCursor Attributes .. autoproperty:: AsyncCursor.bindvars +.. autoproperty:: AsyncCursor.connection + .. autoproperty:: AsyncCursor.description .. autoproperty:: AsyncCursor.fetchvars diff --git a/doc/src/api_manual/async_lob.rst b/doc/src/api_manual/async_lob.rst index 1194f8ba..842dad86 100644 --- a/doc/src/api_manual/async_lob.rst +++ b/doc/src/api_manual/async_lob.rst @@ -4,96 +4,57 @@ API: AsyncLOB Objects ********************* -An AsyncLOB object can be created with :meth:`AsyncConnection.createlob()`. -Also, this object is returned whenever Oracle :data:`CLOB`, :data:`BLOB` and -:data:`BFILE` columns are fetched. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +AsyncLOB Class +============== -See :ref:`lobdata` for more information about using LOBs. +.. autoclass:: AsyncLOB -.. note:: + An AsyncLOB object can be created with + :meth:`AsyncConnection.createlob()`. Also, this object is returned whenever + Oracle :data:`CLOB`, :data:`BLOB` and :data:`BFILE` columns are fetched. - AsyncLOB objects are only supported in the python-oracledb Thin mode. + .. dbapiobjectextension:: -.. _asynclobmeth: - -AsyncLOB Methods -================ - -.. method:: AsyncLOB.close() - - Closes the LOB. Call this when writing is completed so that the indexes - associated with the LOB can be updated -- but only if :meth:`~AsyncLOB.open()` - was called first. - -.. method:: AsyncLOB.fileexists() + See :ref:`lobdata` for more information about using LOBs. - Returns a boolean indicating if the file referenced by the BFILE type LOB - exists. + .. note:: -.. method:: AsyncLOB.getchunksize() + AsyncLOB objects are only supported in the python-oracledb Thin mode. - Returns the chunk size for the internal LOB. Reading and writing to the LOB - in chunks of multiples of this size will improve performance. - -.. method:: AsyncLOB.getfilename() - - Returns a two-tuple consisting of the directory alias and file name for a - BFILE type LOB. - -.. method:: AsyncLOB.isopen() - - Returns a boolean indicating if the LOB has been opened using the method - :meth:`~AsyncLOB.open()`. +.. _asynclobmeth: -.. method:: AsyncLOB.open() +AsyncLOB Methods +================ - Opens the LOB for writing. This will improve performance when writing to a - LOB in chunks and there are functional or extensible indexes associated - with the LOB. If this method is not called, each write will perform an open - internally followed by a close after the write has been completed. +.. automethod:: AsyncLOB.close -.. method:: AsyncLOB.read([offset=1, [amount]]) +.. automethod:: AsyncLOB.fileexists - Returns a portion (or all) of the data in the LOB object. Note that the - amount and offset are in bytes for BLOB and BFILE type LOBs and in UCS-2 - code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent - to characters for all but supplemental characters. If supplemental - characters are in the LOB, the offset and amount will have to be chosen - carefully to avoid splitting a character. +.. automethod:: AsyncLOB.getchunksize -.. method:: AsyncLOB.setfilename(dir_alias, name) +.. automethod:: AsyncLOB.getfilename - Sets the directory alias and name of the BFILE type LOB. +.. automethod:: AsyncLOB.isopen -.. method:: AsyncLOB.size() +.. automethod:: AsyncLOB.open - Returns the size of the data in the LOB object. For BLOB and BFILE type - LOBs, this is the number of bytes. For CLOB and NCLOB type LOBs, this is the - number of UCS-2 code points. UCS-2 code points are equivalent to characters - for all but supplemental characters. +.. automethod:: AsyncLOB.read -.. method:: AsyncLOB.trim(new_size=0) +.. automethod:: AsyncLOB.setfilename - Trims the LOB to the new size. +.. automethod:: AsyncLOB.size -.. method:: AsyncLOB.write(data, offset=1) +.. automethod:: AsyncLOB.trim - Writes the data to the LOB object at the given offset. The offset is in - bytes for BLOB type LOBs and in UCS-2 code points for CLOB and NCLOB type - LOBs. UCS-2 code points are equivalent to characters for all but - supplemental characters. If supplemental characters are in the LOB, the - offset will have to be chosen carefully to avoid splitting a character. - Note that if you want to make the LOB value smaller, you must use the - :meth:`~AsyncLOB.trim()` function. +.. automethod:: AsyncLOB.write .. _asynclobattr: AsyncLOB Attributes =================== -.. attribute:: AsyncLOB.type +.. autoproperty:: AsyncLOB.type - This read-only attribute returns the type of the LOB as one of the - :ref:`database type constants `. + See :ref:`database type constants `. diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index ad09094b..5fbb7b96 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -4,68 +4,70 @@ API: ConnectParams Objects ************************** -The ConnectParams objects are created by :meth:`oracledb.ConnectParams()`. -See :ref:`usingconnparams` for more information. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +ConnectParams Class +=================== -.. _connparamsmeth: +.. autoclass:: ConnectParams -ConnectParams Methods -===================== + See :ref:`usingconnparams` for more information. + + .. dbapiobjectextension:: + + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + + .. versionchanged:: 3.0.0 + + The ``instance_name``, ``use_sni``, ``thick_mode_dsn_passthrough`` and + ``extra_auth_params`` parameters were added. + + .. versionchanged:: 2.5.0 + + The ``program``, ``machine``, ``terminal``, ``osuser``, and + ``driver_name`` parameters were added. Support for ``edition`` and + ``appcontext`` was added to python-oracledb Thin mode. + + .. versionchanged:: 2.3.0 + + The default value of the ``retry_delay`` parameter was changed from 0 + seconds to 1 second. The default value of the ``tcp_connect_timeout`` + parameter was changed from 60.0 seconds to 20.0 seconds. The + ``ssl_version`` parameter was added. + + .. versionchanged:: 2.1.0 + + The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. + + .. versionchanged:: 2.0.0 -.. method:: ConnectParams.copy() + The ``ssl_context`` and ``sdu`` parameters were added. - Creates a copy of the ConnectParams instance and returns it. + .. versionchanged:: 1.4.0 -.. method:: ConnectParams.get_connect_string() + The ``connection_id_prefix`` parameter was added. - Returns the connection string associated with the ConnectParams instance. -.. method:: ConnectParams.get_network_service_names() +.. _connparamsmeth: - Returns a list of the network service names found in the - :ref:`tnsnames.ora ` file which is inside the directory - that can be identified by the attribute :attr:`~ConnectParams.config_dir`. - If a tnsnames.ora file does not exist, then an exception is raised. +ConnectParams Methods +===================== -.. method:: ConnectParams.parse_connect_string(connect_string) +.. automethod:: ConnectParams.copy - Parses the connect string into its components and stores the parameters. +.. automethod:: ConnectParams.get_connect_string - The ``connect string`` parameter can be an Easy Connect string, name-value - pairs, or a simple alias which is looked up in ``tnsnames.ora``. Parameters - that are found in the connect string override any currently stored values. +.. automethod:: ConnectParams.get_network_service_names -.. method:: ConnectParams.parse_dsn_with_credentials(dsn) +.. automethod:: ConnectParams.parse_connect_string - Parses a DSN in the form /@ or in the form - / and returns a 3-tuple containing the parsed user, - password and connect string. Empty strings are returned as the value - *None*. +.. automethod:: ConnectParams.parse_dsn_with_credentials .. versionadded:: 1.3.0 -.. method:: ConnectParams.set(user=None, proxy_user=None, password=None, \ - newpassword=None, wallet_password=None, access_token=None, host=None, \ - port=None, protocol=None, https_proxy=None, https_proxy_port=None, \ - service_name=None, instance_name=None, sid=None, server_type=None, \ - cclass=None, purity=None, expire_time=None, retry_count=None, \ - retry_delay=None, tcp_connect_timeout=None, ssl_server_dn_match=None, \ - ssl_server_cert_dn=None, wallet_location=None, events=None, \ - externalauth=None, mode=None, disable_oob=None, stmtcachesize=None, \ - edition=None, tag=None, matchanytag=None, config_dir=None, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=None, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=None, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_Name=None, handle=None) - - Sets the values for one or more of the parameters of a ConnectParams - object. +.. automethod:: ConnectParams.set .. versionchanged:: 3.2.0 @@ -98,29 +100,7 @@ ConnectParams Methods The ``connection_id_prefix`` parameter was added. -.. method:: ConnectParams.set_from_config(config) - - Sets the property values based on the specified configuration. This method - is intended for use with Centralized Configuration Providers. - - The ``config`` parameter is a dictionary which consists of the following - optional keys: "connect_descriptor", "user", "password", and "pyo". - - If the key "connect_descriptor" is specified, it is expected to be a - string, which will be parsed and the properties found within it are stored - in the ConnectParams instance. - - If the keys "user" or "password" are specified, and the parameters do not - already have a user or password set, these values will be stored; - otherwise, they will be ignored. The key "user" is expected to be a - string. The "key" password may be a string or it may be a dictionary which - will be examined by a :ref:`registered password type handler - ` to determine the actual password. - - If the key "pyo" is specified, it is expected to be a dictionary containing - keys corresponding to property names. Any property names accepted by the - ConnectParams class will be stored in the ConnectParams instance; all other - values will be ignored. +.. automethod:: ConnectParams.set_from_config .. versionadded:: 3.0.0 @@ -129,108 +109,55 @@ ConnectParams Methods ConnectParams Attributes ======================== -.. attribute:: ConnectParams.appcontext +All properties are read only. - This read-only attribute is a list that specifies the application context - used by the connection. It is a list of 3-tuples that includes the - namespace, name, and value. Each entry in the tuple is a string. +.. autoproperty:: ConnectParams.appcontext This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.cclass - - This read-only attribute is a string that specifies the connection class - to use for :ref:`drcp`. +.. autoproperty:: ConnectParams.cclass This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.config_dir +.. autoproperty:: ConnectParams.config_dir - This read-only attribute is a string that identifies the directory in which - the :ref:`optional configuration files ` are found. The - default is the value of :attr:`defaults.config_dir`. - -.. attribute:: ConnectParams.connection_id_prefix - - This read-only attribute is a string that is added to the beginning of the - generated ``connection_id`` that is sent to the database for - `tracing `__. +.. autoproperty:: ConnectParams.connection_id_prefix This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 1.4.0 -.. attribute:: ConnectParams.debug_jdwp +.. autoproperty:: ConnectParams.debug_jdwp - This read-only attribute is a string with the format - "host=;port=" that specifies the host and port of the PL/SQL - debugger. This allows the Java Debug Wire Protocol (JDWP) to debug the - PL/SQL code invoked by the python-oracledb driver. The default value is the - value of the environment variable ``ORA_DEBUG_JDWP``. + See :ref:`applntracing` for more information. - This attribute is only supported in python-oracledb Thin mode. - - For python-oracledb Thick mode, set the ``ORA_DEBUG_JDWP`` environment - variable which has the same syntax. See :ref:`applntracing` for more - information. - -.. attribute:: ConnectParams.disable_oob - - This read-only attribute is a boolean that indicates whether out-of-band - breaks should be disabled. The default value is *False*. Note that this - value has no effect on Windows, which does not support this functionality. - - This attribute is only supported in python-oracledb Thin mode. +.. autoproperty:: ConnectParams.disable_oob For python-oracledb Thick mode, set the equivalent option in a ``sqlnet.ora`` file. -.. attribute:: ConnectParams.driver_name +.. autoproperty:: ConnectParams.driver_name - This read-only attribute is a string that specifies the driver used by the - client to connect to Oracle Database. This is an arbitrary value set by the - user in the :meth:`oracledb.ConnectParams()` method or the - :attr:`defaults.driver_name` attribute which is the default value. This is - the value shown in the CLIENT_DRIVER column of the - V$SESSION_CONNECT_INFO view. + This is an arbitrary value set by the user in the + :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.driver_name` + attribute which is the default value. This is the value shown in the + CLIENT_DRIVER column of the V$SESSION_CONNECT_INFO view. This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 2.5.0 -.. attribute:: ConnectParams.edition - - This read-only attribute is a string that specifies the edition to use - for the connection. This attribute cannot be used simultaneously with the - :attr:`ConnectParams.cclass` attribute. +.. autoproperty:: ConnectParams.edition This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.events - - This read-only attribute is a boolean that specifies whether the events - mode should be enabled. - - This attribute is needed for continuous query notification (CQN) and high - availability event notifications. The default value is *False*. - - This attribute is only supported in python-oracledb Thick mode. - -.. attribute:: ConnectParams.expire_time +.. autoproperty:: ConnectParams.events - This read-only attribute is an integer that returns the number of minutes - between the sending of keepalive probes. - - The default value is *0*. If this attribute is set to a value greater than - zero, it enables keepalive. +.. autoproperty:: ConnectParams.expire_time This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.externalauth - - This read-only attribute is a boolean that specifies whether external - authentication should be used. The default value is *False*. +.. autoproperty:: ConnectParams.externalauth For standalone connections, external authentication occurs when the ``user`` and ``password`` attributes are not used. If these attributes, @@ -239,173 +166,119 @@ ConnectParams Attributes This attribute is only supported in python-oracledb Thick mode. -.. attribute:: ConnectParams.extra_auth_params - - This read-only attribute is a dictionary containing the configuration - parameters necessary for Oracle Database authentication using - :ref:`Azure ` or - :ref:` ` cloud native authentication plugins. +.. autoproperty:: ConnectParams.extra_auth_params This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 3.0.0 -.. attribute:: ConnectParams.host - - This read-only attribute is a string that returns the name or IP address of - the machine hosting the database. +.. autoproperty:: ConnectParams.host This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.https_proxy - - This read-only attribute is a string that returns the name or IP address of - a proxy host that is to be used for tunneling secure connections. +.. autoproperty:: ConnectParams.https_proxy This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.https_proxy_port - - This read-only attribute is an integer that returns the port to be used to - communicate with the proxy host. The default value is *0*. +.. autoproperty:: ConnectParams.https_proxy_port This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.instance_name - - This read-only attribute is a string that returns the instance name of the - database. +.. autoproperty:: ConnectParams.instance_name This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 3.0.0 -.. attribute:: ConnectParams.machine +.. autoproperty:: ConnectParams.machine - This read-only attribute is a string that specifies the machine name of - the client connecting to Oracle Database. This is an arbitrary value set - by the user in the :meth:`oracledb.ConnectParams()` method or the - :attr:`defaults.machine` attribute which is the default value. This is the - value shown in the MACHINE column of the V$SESSION view. + This is an arbitrary value set by the user in the + :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.machine` + attribute which is the default value. This is the value shown in the + MACHINE column of the V$SESSION view. This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 2.5.0 -.. attribute:: ConnectParams.matchanytag - - This read-only attribute is a boolean that specifies whether any tag can be - used when acquiring a connection from the pool. The default value is - *False*. - - This attribute is only supported in python-oracledb Thick mode. - -.. attribute:: ConnectParams.mode +.. autoproperty:: ConnectParams.matchanytag - This read-only attribute is an integer that specifies the authorization mode - to use. The default value is :data:`~oracledb.AUTH_MODE_DEFAULT`. +.. autoproperty:: ConnectParams.mode This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.osuser +.. autoproperty:: ConnectParams.osuser - This read-only attribute is a string that represents the operating system - user that initiates the database connection. This is an arbitrary value - set by the user in the :meth:`oracledb.ConnectParams()` method or the - :attr:`defaults.osuser` attribute which is the default value. This is the - value shown in the OSUSER column of the V$SESSION view. + This is an arbitrary value set by the user in the + :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.osuser` + attribute which is the default value. This is the value shown in the OSUSER + column of the V$SESSION view. This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 2.5.0 -.. attribute:: ConnectParams.pool_boundary +.. autoproperty:: ConnectParams.pool_boundary - This read-only attribute is one of the strings *statement* or *transaction* - which indicates when pooled :ref:`DRCP ` or PRCP connections can be - returned to the pool. If the value is *statement*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when the - connection is stateless (that is, there are no active cursors, active - transactions, temporary tables, or temporary LOBs). If the value is - *transaction*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when either one of the methods - :meth:`Connection.commit()` or :meth:`Connection.rollback()` are called. - This attribute requires the use of DRCP or PRCP with Oracle Database 23ai - (or later). See :ref:`implicitconnpool` for more information. + If the value is *statement*, then pooled DRCP or PRCP connections are + implicitly released back to the DRCP or PRCP pool when the connection is + stateless (that is, there are no active cursors, active transactions, + temporary tables, or temporary LOBs). If the value is *transaction*, then + pooled DRCP or PRCP connections are implicitly released back to the DRCP or + PRCP pool when either one of the methods :meth:`Connection.commit()` or + :meth:`Connection.rollback()` are called. This attribute requires the use + of DRCP or PRCP with Oracle Database 23ai (or later). See + :ref:`implicitconnpool` for more information. This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 2.1.0 -.. attribute:: ConnectParams.pool_name +.. autoproperty:: ConnectParams.pool_name - This read-only attribute is a string that specifies the name of the pool - when using multiple DRCP pools with Oracle Database 23.4 or later. See - :ref:`DRCP Pool Names `. + See :ref:`DRCP Pool Names `. This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 3.2.0 -.. attribute:: ConnectParams.port - - This read-only attribute is an integer that returns the port number on - which the database listener is listening. The default value is *1521*. +.. autoproperty:: ConnectParams.port This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.program +.. autoproperty:: ConnectParams.program - This read-only attribute is a string that specifies the name of the - executable program or application connected to Oracle Database. This is an - arbitrary value set by the user in the :meth:`oracledb.ConnectParams()` - method or the :attr:`defaults.program` attribute which is the default - value. This is the value shown in the PROGRAM column of the - V$SESSION view. + This is an arbitrary value set by the user in the + :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.program` + attribute which is the default value. This is the value shown in the + PROGRAM column of the V$SESSION view. This attribute is supported in python-oracledb Thin mode. .. versionadded:: 2.5.0 -.. attribute:: ConnectParams.protocol - - This read-only attribute is a string that indicates whether unencrypted - network traffic or encrypted network traffic (TLS) is used and it can have - the value *tcp* or *tcps*. The default value is *tcp*. +.. autoproperty:: ConnectParams.protocol This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.proxy_user - - This read-only attribute is a string that specifies the name of the proxy - user to connect to. If this value is not specified, then it will be parsed - out of the user if the user attribute is in the form "user[proxy_user]". +.. autoproperty:: ConnectParams.proxy_user This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.purity +.. autoproperty:: ConnectParams.purity - This read-only attribute is an integer that returns the purity used for - :ref:`drcp`. When the value of this attribute is - :attr:`oracledb.PURITY_DEFAULT`, then any standalone connection will use - :attr:`oracledb.PURITY_NEW` and any pooled connection will use - :attr:`oracledb.PURITY_SELF`. The default value is - :data:`~oracledb.PURITY_DEFAULT`. + When the value of this attribute is :attr:`oracledb.PURITY_DEFAULT`, then + any standalone connection will use :attr:`oracledb.PURITY_NEW` and any + pooled connection will use :attr:`oracledb.PURITY_SELF`. The default value + is :data:`~oracledb.PURITY_DEFAULT`. This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.retry_count - - This read-only attribute is an integer that returns the number of times - that a connection attempt should be retried before the attempt is - terminated. The default value is *0*. +.. autoproperty:: ConnectParams.retry_count This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.retry_delay - - This read-only attribute is an integer that returns the number of seconds - to wait before making a new connection attempt. The default value is *1*. +.. autoproperty:: ConnectParams.retry_delay This attribute is supported in both python-oracledb Thin and Thick modes. @@ -414,14 +287,8 @@ ConnectParams Attributes The default value of this attribute was changed from *0* seconds to *1* second. -.. attribute:: ConnectParams.sdu +.. autoproperty:: ConnectParams.sdu - This read-only attribute is an integer that returns the requested size of - the Session Data Unit (SDU), in bytes. The value tunes internal buffers - used for communication to the database. Bigger values can increase - throughput for large queries or bulk data loads, but at the cost of higher - memory use. The SDU size that will actually be used is negotiated down to - the lower of this value and the database network SDU configuration value. See the `Database Net Services documentation `__ for more details. @@ -430,111 +297,51 @@ ConnectParams Attributes .. versionadded:: 2.0.0 -.. attribute:: ConnectParams.server_type - - This read-only attribute is a string that returns the type of server - connection that should be established. If specified, it should be one of - *dedicated*, *shared*, or *pooled*. +.. autoproperty:: ConnectParams.server_type This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.service_name - - This read-only attribute is a string that returns the service name of the - database. +.. autoproperty:: ConnectParams.service_name This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.shardingkey - - This read-only attribute is a list that specifies a sequence of strings, - numbers, bytes, or dates that identify the database shard to connect to. - See :ref:`connsharding`. - - This attribute is only supported in python-oracledb Thick mode. - -.. attribute:: ConnectParams.sid +.. autoproperty:: ConnectParams.shardingkey - This read-only attribute is a string that returns the SID of the database. - It is recommended to use the :attr:`ConnectParams.service_name` instead. +.. autoproperty:: ConnectParams.sid This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.ssl_context - - This read-only attribute is an `SSLContext object - `__ which is used - for connecting to the database using TLS. This SSL context will be modified - to include the private key or any certificates found in a separately - supplied wallet. This parameter should only be specified if the default - SSLContext object cannot be used. +.. autoproperty:: ConnectParams.ssl_context This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 2.0.0 -.. attribute:: ConnectParams.ssl_server_cert_dn - - This read-only attribute is a string that returns the distinguished name - (DN), which should be matched with the server. If this value is specified, - then it is used for any verification. Otherwise, the hostname will be used. - - This value is ignored if the :attr:`~ConnectParams.ssl_server_dn_match` - attribute is not set to the value *True*. +.. autoproperty:: ConnectParams.ssl_server_cert_dn This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.ssl_server_dn_match - - This read-only attribute is a boolean that indicates whether the server - certificate distinguished name (DN) should be matched in addition to the - regular certificate verification that is performed. The default value is - *True*. - - Note that if the :attr:`~ConnectParams.ssl_server_cert_dn` attribute is not - specified, then host name matching is performed instead. +.. autoproperty:: ConnectParams.ssl_server_dn_match This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.ssl_version - - This read-only attribute is one of the constants *ssl.TLSVersion.TLSv1_2* - or *ssl.TLSVersion.TLSv1_3* which identifies the TLS protocol version - used. These constants are defined in the Python `ssl `__ module. +.. autoproperty:: ConnectParams.ssl_version This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 2.3.0 -.. attribute:: ConnectParams.stmtcachesize - - This read-only attribute is an integer that identifies the initial size of - the statement cache. The default is the value of - :attr:`defaults.stmtcachesize`. +.. autoproperty:: ConnectParams.stmtcachesize This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.supershardingkey +.. autoproperty:: ConnectParams.supershardingkey - This read-only attribute is a list that specifies a sequence of strings, - numbers, bytes, or dates that identify the database shard to connect to. - See :ref:`connsharding`. +.. autoproperty:: ConnectParams.tag This attribute is only supported in python-oracledb Thick mode. -.. attribute:: ConnectParams.tag - - This read-only attribute is a string that identifies the type of connection - that should be returned from a pool. - - This attribute is only supported in python-oracledb Thick mode. - -.. attribute:: ConnectParams.tcp_connect_timeout - - This read-only attribute is a float that indicates the maximum number of - seconds to wait for a connection to be established to the database host. - The default value is *20.0*. +.. autoproperty:: ConnectParams.tcp_connect_timeout This attribute is supported in both python-oracledb Thin and Thick modes. @@ -543,68 +350,39 @@ ConnectParams Attributes The default value of this attribute was changed from *60.0* seconds to *20.0* seconds. -.. attribute:: ConnectParams.use_sni - - This read-only attribute is a boolean which indicates whether to use the - TLS Server Name Indicator (SNI) extension to bypass the second TLS - negotiation that would otherwise be required. +.. autoproperty:: ConnectParams.use_sni This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 3.0.0 -.. attribute:: ConnectParams.terminal +.. autoproperty:: ConnectParams.terminal - This read-only attribute is a string that specifies the terminal - identifier from which the connection originates. This is an arbitrary value - set by the user in the :meth:`oracledb.ConnectParams()` method or the - :attr:`defaults.terminal` attribute which is the default value. This is the - value shown in the TERMINAL column of the V$SESSION view. + This is an arbitrary value set by the user in the + :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.terminal` + attribute which is the default value. This is the value shown in the + TERMINAL column of the V$SESSION view. This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 2.5.0 -.. attribute:: ConnectParams.thick_mode_dsn_passthrough - - This read-only attribute is a boolean which indicates whether the connect - string should be passed unchanged to Oracle Client libraries for parsing or - if python-oracledb should parse the connect string itself when using Thick - mode. The default value is the value of - :attr:`defaults.thick_mode_dsn_passthrough`. +.. autoproperty:: ConnectParams.thick_mode_dsn_passthrough This attribute is only supported in python-oracledb Thick mode. .. versionadded:: 3.0.0 -.. attribute:: ConnectParams.use_tcp_fast_open - - This read-only attribute is a boolean which indicates whether to use an - an `Oracle Autonomous Database Serverless (ADB-S) - `__ - specific feature that can reduce the latency in round-trips to the database - after a connection has been established. This feature is only available - with certain versions of ADB-S. The default value is *False*. +.. autoproperty:: ConnectParams.use_tcp_fast_open This attribute is supported in both python-oracledb Thin and Thick modes. .. versionadded:: 2.1.0 -.. attribute:: ConnectParams.user - - This read-only attribute is a string that specifies the name of the user to - connect to. +.. autoproperty:: ConnectParams.user This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: ConnectParams.wallet_location - - This read-only attribute is a string that specifies the directory where the - wallet can be found. - - In python-oracledb Thin mode, this attribute is the directory containing - the PEM-encoded wallet file, ewallet.pem. In python-oracledb Thick mode, - this attribute is the directory containing the file, cwallet.sso. +.. autoproperty:: ConnectParams.wallet_location This attribute is supported in both python-oracledb Thin and Thick modes. diff --git a/doc/src/api_manual/connection_pool.rst b/doc/src/api_manual/connection_pool.rst index 2de1ad9b..0cde3d79 100644 --- a/doc/src/api_manual/connection_pool.rst +++ b/doc/src/api_manual/connection_pool.rst @@ -4,18 +4,26 @@ API: ConnectionPool Objects *************************** -The new ConnectionPool class is synonymous with SessionPool. The SessionPool -class is deprecated in python-oracledb. The preferred function to create pools -is now :meth:`oracledb.create_pool()`. (The name SessionPool came from the -`Oracle Call Interface (OCI) session pool `__. This -implementation is only used in the python-oracledb Thick mode and is not -available in the Thin mode). +.. currentmodule:: oracledb -.. dbapiobjectextension:: +ConnectionPool Class +==================== -In python-oracledb, the type `pool` will show the class `oracledb.ConnectionPool`. -This only affects the name. +.. autoclass:: ConnectionPool + + The new ConnectionPool class is synonymous with SessionPool. The + SessionPool class is deprecated in python-oracledb. The preferred function + to create pools is now :meth:`oracledb.create_pool()`. (The name + SessionPool came from the `Oracle Call Interface (OCI) session pool + `__. This + implementation is only used in the python-oracledb Thick mode and is not + available in the Thin mode). + + .. dbapiobjectextension:: + +In python-oracledb, the type `pool` will show the class +`oracledb.ConnectionPool`. This only affects the name. The following code will continue to work providing backward compatibility with the obsolete cx_Oracle driver: @@ -43,59 +51,13 @@ deprecated in python-oracledb 1.0 and has been deprecated by the function ConnectionPool Methods ====================== -.. method:: ConnectionPool.acquire(user=None, password=None, cclass=None, \ - purity=oracledb.PURITY_DEFAULT, tag=None, matchanytag=False, \ - shardingkey=[], supershardingkey=[]) - - Acquires a connection from the session pool and returns a - :ref:`connection object `. - - If the pool is :ref:`homogeneous `, the ``user`` and - ``password`` parameters cannot be specified. If they are, an exception will - be raised. - - The ``cclass`` parameter, if specified, should be a string corresponding to - the connection class for :ref:`drcp`. - - The ``purity`` parameter is expected to be one of - :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or - :data:`~oracledb.PURITY_DEFAULT`. - - The ``tag`` parameter, if specified, is expected to be a string with - name=value pairs like "k1=v1;k2=v2" and will limit the connections that can - be returned from a connection pool unless the ``matchanytag`` parameter is - set to *True*. In that case, connections with the specified tag will be - preferred over others, but if no such connections are available, then a - connection with a different tag may be returned instead. In any case, - untagged connections will always be returned if no connections with the - specified tag are available. Connections are tagged when they are - :meth:`released ` back to the pool. - - The ``shardingkey`` and ``supershardingkey`` parameters, if specified, are - expected to be a sequence of values which will be used to identify the - database shard to connect to. The key values can be strings, numbers, - bytes, or dates. See :ref:`connsharding`. +.. automethod:: ConnectionPool.acquire - When using the :ref:`connection pool cache `, calling - :meth:`oracledb.connect()` with a ``pool_alias`` parameter is the same as - calling ``pool.acquire()``. +.. automethod:: ConnectionPool.close -.. method:: ConnectionPool.close(force=False) +.. automethod:: ConnectionPool.drop - Closes the pool now, rather than when the last reference to it is - released, which makes it unusable for further work. - - If any connections have been acquired and not released back to the pool, - this method will fail unless the ``force`` parameter is set to *True*. - -.. method:: ConnectionPool.drop(connection) - - Drops the connection from the pool which is useful if the connection is no - longer usable (such as when the session is killed). - -.. method:: ConnectionPool.reconfigure([min, max, increment, getmode, \ - timeout, wait_timeout, max_lifetime_session, max_sessions_per_shard, \ - soda_metadata_cache, stmtcachesize, ping_interval]) +.. automethod:: ConnectionPool.reconfigure Reconfigures various parameters of a connection pool. The pool size can be altered with ``reconfigure()`` by passing values for @@ -145,172 +107,53 @@ ConnectionPool Methods See :ref:`Connection Pool Reconfiguration `. -.. method:: ConnectionPool.release(connection, tag=None) - - Releases the connection back to the pool now, rather than whenever __del__ - is called. The connection will be unusable from this point forward; an - Error exception will be raised if any operation is attempted with the - connection. Any cursors or LOBs created by the connection will also be - marked unusable and an Error exception will be raised if any operation is - attempted with them. - - Internally, references to the connection are held by cursor objects, - LOB objects, etc. Once all of these references are released, the connection - itself will be released back to the pool automatically. Either control - references to these related objects carefully or explicitly release - connections back to the pool in order to ensure sufficient resources are - available. - - If the tag is not *None*, it is expected to be a string with name=value - pairs like "k1=v1;k2=v2" and will override the value in the property - :attr:`Connection.tag`. If either :attr:`Connection.tag` or the tag - parameter are not *None*, the connection will be retagged when it is - released back to the pool. +.. automethod:: ConnectionPool.release .. _connpoolattr: ConnectionPool Attributes ========================= -.. attribute:: ConnectionPool.busy - - This read-only attribute returns the number of connections currently - acquired. - -.. attribute:: ConnectionPool.dsn +.. autoproperty:: ConnectionPool.busy - This read-only attribute returns the TNS entry of the database to which a - connection has been established. +.. autoproperty:: ConnectionPool.dsn -.. attribute:: ConnectionPool.getmode +.. autoproperty:: ConnectionPool.getmode - This read-write attribute determines how connections are returned from the - pool. If :data:`~oracledb.POOL_GETMODE_FORCEGET` is specified, a new - connection will be returned even if there are no free connections in the - pool. :data:`~oracledb.POOL_GETMODE_NOWAIT` will raise an exception if - there are no free connections are available in the pool. If - :data:`~oracledb.POOL_GETMODE_WAIT` is specified and there are no free - connections in the pool, the caller will wait until a free connection is - available. :data:`~oracledb.POOL_GETMODE_TIMEDWAIT` uses the value of - :data:`~ConnectionPool.wait_timeout` to determine how long the caller - should wait for a connection to become available before returning an error. +.. autoproperty:: ConnectionPool.homogeneous -.. attribute:: ConnectionPool.homogeneous +.. autoproperty:: ConnectionPool.increment - This read-only boolean attribute indicates whether the pool is considered - :ref:`homogeneous ` or not. If the pool is not homogeneous, - different authentication can be used for each connection acquired from the - pool. +.. autoproperty:: ConnectionPool.max -.. attribute:: ConnectionPool.increment - - This read-only attribute returns the number of connections that will be - established when additional connections need to be created. - -.. attribute:: ConnectionPool.max - - This read-only attribute returns the maximum number of connections that the - pool can control. - -.. attribute:: ConnectionPool.max_lifetime_session - - This read-write attribute is the maximum length of time (in seconds) that a - pooled connection may exist since first being created. A value of *0* means - there is no limit. Connections become candidates for termination when they - are acquired or released back to the pool, and have existed for longer than - ``max_lifetime_session`` seconds. Connections that are in active use will - not be closed. In python-oracledb Thick mode, Oracle Client libraries 12.1 - or later must be used and, prior to Oracle Client 21, cleanup only occurs - when the pool is accessed. +.. autoproperty:: ConnectionPool.max_lifetime_session .. versionchanged:: 3.0.0 This attribute was added to python-oracledb Thin mode. -.. attribute:: ConnectionPool.max_sessions_per_shard - - This read-write attribute returns the number of sessions that can be - created per shard in the pool. Setting this attribute greater than zero - specifies the maximum number of sessions in the pool that can be used for - any given shard in a sharded database. This lets connections in the pool be - balanced across the shards. A value of *0* will not set any maximum number - of sessions for each shard. This attribute is only available in Oracle - Client 18.3 and higher. - -.. attribute:: ConnectionPool.min - - This read-only attribute returns the number of connections with which the - connection pool was created and the minimum number of connections that will - be controlled by the connection pool. - -.. attribute:: ConnectionPool.name +.. autoproperty:: ConnectionPool.max_sessions_per_shard - This read-only attribute returns the name assigned to the pool by Oracle. +.. autoproperty:: ConnectionPool.min -.. attribute:: ConnectionPool.opened +.. autoproperty:: ConnectionPool.name - This read-only attribute returns the number of connections currently opened - by the pool. +.. autoproperty:: ConnectionPool.opened -.. attribute:: ConnectionPool.ping_interval - - This read-write integer attribute specifies the pool ping interval in - seconds. When a connection is acquired from the pool, a check is first made - to see how long it has been since the connection was put into the pool. If - this idle time exceeds ``ping_interval``, then a :ref:`round-trip - ` ping to the database is performed. If the connection is - unusable, it is discarded and a different connection is selected to be - returned by :meth:`ConnectionPool.acquire()`. Setting ``ping_interval`` to - a negative value disables pinging. Setting it to *0* forces a ping for - every :meth:`ConnectionPool.acquire()` and is not recommended. +.. autoproperty:: ConnectionPool.ping_interval Prior to cx_Oracle 8.2, the ping interval was fixed at *60* seconds. -.. attribute:: ConnectionPool.soda_metadata_cache - - This read-write boolean attribute returns whether the SODA metadata cache - is enabled or not. Enabling the cache significantly improves the - performance of methods :meth:`SodaDatabase.createCollection()` (when not - specifying a value for the ``metadata`` parameter) and - :meth:`SodaDatabase.openCollection()`. Note that the cache can become out - of date if changes to the metadata of cached collections are made - externally. +.. autoproperty:: ConnectionPool.soda_metadata_cache -.. attribute:: ConnectionPool.stmtcachesize - - This read-write attribute specifies the size of the statement cache that - will be used for connections obtained from the pool. Once a connection is - created, that connection’s statement cache size can only be changed by - setting the ``stmtcachesize`` attribute on the connection itself. +.. autoproperty:: ConnectionPool.stmtcachesize See :ref:`Statement Caching ` for more information. -.. attribute:: ConnectionPool.thin - - This attribute returns a boolean which indicates the python-oracledb mode - in which the pool was created. If the value of this attribute is *True*, it - indicates that the pool was created in the python-oracledb Thin mode. If - the value of this attribute is *False*, it indicates that the pool was - created in the python-oracledb Thick mode. - -.. attribute:: ConnectionPool.timeout - - This read-write attribute specifies the time (in seconds) after which idle - connections will be terminated in order to maintain an optimum number of - open connections. A value of *0* means that no idle connections are - terminated. Note that in python-oracledb Thick mode with older Oracle - Client Libraries, the termination only occurs when the pool is accessed. - - -.. attribute:: ConnectionPool.username +.. autoproperty:: ConnectionPool.thin - This read-only attribute returns the name of the user which established the - connection to the database. +.. autoproperty:: ConnectionPool.timeout -.. attribute:: ConnectionPool.wait_timeout +.. autoproperty:: ConnectionPool.username - This read-write attribute specifies the time (in milliseconds) that the - caller should wait for a connection to become available in the pool before - returning with an error. This value is only used if the ``getmode`` - parameter to :meth:`oracledb.create_pool()` was the value - :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. +.. autoproperty:: ConnectionPool.wait_timeout diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index bbb97465..044c64d2 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -4,6 +4,8 @@ API: Data Frames **************** +.. currentmodule:: oracledb + Python-oracledb can fetch directly to data frames that expose an Apache Arrow PyCapsule Interface. These can be used by many numerical and data analysis libraries. @@ -18,81 +20,60 @@ from Oracle Database types to Arrow data types. .. _oracledataframeobj: -DataFrame Objects -================= +DataFrame Class +=============== + +.. autoclass:: DataFrame -DataFrame objects are returned from the methods -:meth:`Connection.fetch_df_all()` and :meth:`Connection.fetch_df_batches()`. + A DataFrame object is returned by the methods + :meth:`Connection.fetch_df_all()` and + :meth:`Connection.fetch_df_batches()`. -Each column in a DataFrame exposes an `Apache Arrow PyCapsule -`__ -interface, giving access to the underlying Arrow array. + Each column in a DataFrame exposes an `Apache Arrow PyCapsule + `__ interface, giving access to the underlying + Arrow array. -.. dbapiobjectextension:: + .. dbapiobjectextension:: -.. versionchanged:: 3.3.0 + .. versionchanged:: 3.3.0 - Removed the prefix "Oracle" from the class name. + Removed the prefix "Oracle" from the class name. -.. versionadded:: 3.0.0 + .. versionadded:: 3.0.0 .. _oracledataframemeth: DataFrame Methods ----------------- -.. method:: DataFrame.column_arrays() - - Returns a list of :ref:`ArrowArray ` objects, - each containing a select list column. - -.. method:: DataFrame.column_names() - - Returns a list of the column names in the data frame. - -.. method:: DataFrame.get_column(i) - - Returns an :ref:`ArrowArray ` object for the column - at the given index ``i``. +.. automethod:: DataFrame.column_arrays -.. method:: DataFrame.get_column_by_name(name) +.. automethod:: DataFrame.column_names - Returns an :ref:`ArrowArray ` object for the column - with the given name ``name``. +.. automethod:: DataFrame.get_column -.. method:: DataFrame.num_columns() +.. automethod:: DataFrame.get_column_by_name - Returns the number of columns in the data frame. +.. automethod:: DataFrame.num_columns -.. method:: DataFrame.num_rows() - - Returns the number of rows in the data frame. - -.. _oracledataframeattr: - -DataFrame Attributes --------------------- - -.. attribute:: DataFrame.metadata - - This read-only attribute returns the metadata for the data frame as a - dictionary with keys ``num_columns``, ``num_rows``, and ``num_chunks``, - showing the number of columns, rows, and chunks, respectively. The number - of chunks is always 1 in python-oracledb. +.. automethod:: DataFrame.num_rows .. _oraclearrowarrayobj: ArrowArray Objects ================== -ArrowArray objects are returned by :meth:`DataFrame.column_arrays()`. +.. autoclass:: ArrowArray + + ArrowArray objects are returned by :meth:`DataFrame.column_arrays()`. -These are used for conversion to `PyArrow Tables -`__, see -:ref:`dataframeformat`. + These are used for conversion to `PyArrow Tables + `__, see + :ref:`dataframeformat`. -.. versionchanged:: 3.3.0 + .. versionchanged:: 3.3.0 - Removed the prefix "Oracle" from the class name. + Removed the prefix "Oracle" from the class name. -.. versionadded:: 3.0.0 + .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/dbobject_type.rst b/doc/src/api_manual/dbobject_type.rst index 5743e25c..0420d4df 100644 --- a/doc/src/api_manual/dbobject_type.rst +++ b/doc/src/api_manual/dbobject_type.rst @@ -4,229 +4,123 @@ API: DbObjectType Objects ************************* -The DbObjectType object is returned by the :meth:`Connection.gettype()` call -and is available as the :data:`Variable.type` for variables containing Oracle -Database objects. +.. currentmodule:: oracledb -.. dbapiobjectextension:: - -DbObjectType Methods -==================== - -.. method:: DbObjectType([sequence]) - - The object type may be called directly and serves as an alternative way of - calling :meth:`~DbObjectType.newobject()`. - -.. method:: DbObjectType.newobject([sequence]) - - Returns a new Oracle object of the given type. This object can then be - modified by setting its attributes and then bound to a cursor for - interaction with Oracle. If the object type refers to a collection, a - sequence may be passed and the collection will be initialized with the - items in that sequence. - -DbObjectType Attributes -======================= - -.. attribute:: DbObjectType.attributes - - This read-only attribute returns a list of the :ref:`attributes - ` that make up the object type. +DbObjectType Class +================== +.. autoclass:: DbObjectType -.. attribute:: DbObjectType.element_type + A DbObjectType object is returned with :meth:`Connection.gettype()` call + and is available as the :data:`Var.type` for variables containing Oracle + Database objects. - This read-only attribute returns the type of elements found in collections - of this type, if :attr:`~DbObjectType.iscollection` is *True*; otherwise, - it returns *None*. If the collection contains objects, this will be - another object type; otherwise, it will be one of the - :ref:`database type constants `. + .. dbapiobjectextension:: +DbObjectType Methods +-------------------- -.. attribute:: DbObjectType.iscollection - - This read-only attribute returns a boolean indicating if the object type - refers to a collection or not. +.. automethod:: DbObjectType.__call__ +.. automethod:: DbObjectType.newobject -.. attribute:: DbObjectType.name +DbObjectType Attributes +----------------------- - This read-only attribute returns the name of the type. +.. autoproperty:: DbObjectType.attributes +.. autoproperty:: DbObjectType.element_type -.. attribute:: DbObjectType.package_name + See :ref:`database type constants `. - This read-only attribute returns the name of the package, if the type - refers to a PL/SQL type (otherwise, it returns the value *None*). +.. autoproperty:: DbObjectType.iscollection +.. autoproperty:: DbObjectType.name -.. attribute:: DbObjectType.schema +.. autoproperty:: DbObjectType.package_name - This read-only attribute returns the name of the schema that owns the type. +.. autoproperty:: DbObjectType.schema .. _dbobject: -DbObject Objects +DbObject Class ================ -The DbObject object is returned by the :meth:`DbObjectType.newobject()` call -and can be bound to variables of type :data:`~oracledb.OBJECT`. Attributes can -be retrieved and set directly. - -.. dbapiobjectextension:: - -DbObject Methods -++++++++++++++++ - -.. method:: DbObject.append(element) - - Appends an element to the collection object. If no elements exist in the - collection, this creates an element at index 0; otherwise, it creates an - element immediately following the highest index available in the - collection. - - -.. method:: DbObject.asdict() - - Returns a dictionary where the collection's indexes are the keys and the - elements are its values. - - -.. method:: DbObject.aslist() - - Returns a list of each of the collection's elements in index order. - - -.. method:: DbObject.copy() - - Creates a copy of the object and returns it. +.. autoclass:: DbObject + A DbObject object is returned by the :meth:`DbObjectType.newobject()` call + and can be bound to variables of type :data:`~oracledb.DB_TYPE_OBJECT`. + Attributes can be retrieved and set directly. -.. method:: DbObject.delete(index) + .. dbapiobjectextension:: - Deletes the element at the specified index of the collection. If the - element does not exist or is otherwise invalid, an error is raised. Note - that the indices of the remaining elements in the collection are not - changed. In other words, the delete operation creates holes in the - collection. - - -.. method:: DbObject.exists(index) - - Returns *True* or *False* indicating if an element exists in the collection - at the specified index. - - -.. method:: DbObject.extend(sequence) - - Appends all of the elements in the sequence to the collection. This is - the equivalent of performing :meth:`~DbObject.append()` for each element - found in the sequence. - - -.. method:: DbObject.first() - - Returns the index of the first element in the collection. If the collection - is empty, *None* is returned. - - -.. method:: DbObject.getelement(index) - - Returns the element at the specified index of the collection. If no element - exists at that index, an exception is raised. - - -.. method:: DbObject.last() - - Returns the index of the last element in the collection. If the collection - is empty, *None* is returned. +DbObject Methods +---------------- +.. automethod:: DbObject.append -.. method:: DbObject.next(index) +.. automethod:: DbObject.asdict - Returns the index of the next element in the collection following the - specified index. If there are no elements in the collection following the - specified index, *None* is returned. +.. automethod:: DbObject.aslist +.. automethod:: DbObject.copy -.. method:: DbObject.prev(index) +.. automethod:: DbObject.delete - Returns the index of the element in the collection preceding the specified - index. If there are no elements in the collection preceding the - specified index, *None* is returned. +.. automethod:: DbObject.exists +.. automethod:: DbObject.extend -.. method:: DbObject.setelement(index, value) +.. automethod:: DbObject.first - Sets the value in the collection at the specified index to the given value. +.. automethod:: DbObject.getelement +.. automethod:: DbObject.last -.. method:: DbObject.size() +.. automethod:: DbObject.next - Returns the number of elements in the collection. +.. automethod:: DbObject.prev +.. automethod:: DbObject.setelement -.. method:: DbObject.trim(num) +.. automethod:: DbObject.size - Removes the specified number of elements from the end of the collection. +.. automethod:: DbObject.trim DbObject Attributes -+++++++++++++++++++ - -.. attribute:: DbObject.Type - - This read-only attribute returns an ObjectType corresponding to the type - of object. +------------------- +.. autoproperty:: DbObject.type .. _dbobjectattr: -DbObjectAttribute Objects -========================= - -The elements of :attr:`DbObjectType.attributes` are instances of this type. - -.. dbapiobjectextension:: - -.. attribute:: DbObjectAttribute.max_size +DbObjectAttribute Class +======================= - This read-only attribute returns the maximum size (in bytes) of the - attribute when the attribute's type is one of - :data:`oracledb.DB_TYPE_CHAR`, :data:`oracledb.DB_TYPE_NCHAR`, - :data:`oracledb.DB_TYPE_NVARCHAR`, :data:`oracledb.DB_TYPE_RAW`, or - :data:`oracledb.DB_TYPE_VARCHAR`. For all other types the value returned is - *None*. +.. autoclass:: DbObjectAttr - .. versionadded:: 3.0.0 + The elements of :attr:`DbObjectType.attributes` are instances of this + type. + .. dbapiobjectextension:: -.. attribute:: DbObjectAttribute.name +DbObjectAttr Attributes +----------------------- - This read-only attribute returns the name of the attribute. +.. autoproperty:: DbObjectAttr.max_size + .. versionadded:: 3.0.0 -.. attribute:: DbObjectAttribute.precision +.. autoproperty:: DbObjectAttr.name - This read-only attribute returns the precision of the attribute when the - attribute's type is :data:`oracledb.DB_TYPE_NUMBER`. For all other types - the value returned is *None*. +.. autoproperty:: DbObjectAttr.precision .. versionadded:: 3.0.0 - -.. attribute:: DbObjectAttribute.scale - - This read-only attribute returns the scale of the attribute when the - attribute's type is :data:`oracledb.DB_TYPE_NUMBER`. For all other types - the value returned is *None*. +.. autoproperty:: DbObjectAttr.scale .. versionadded:: 3.0.0 +.. autoproperty:: DbObjectAttr.type -.. attribute:: DbObjectAttribute.type - - This read-only attribute returns the type of the attribute. This will be an - :ref:`Oracle Object Type ` if the variable binds - Oracle objects; otherwise, it will be one of the - :ref:`database type constants `. + See :ref:`database type constants `. diff --git a/doc/src/api_manual/defaults.rst b/doc/src/api_manual/defaults.rst index 4174e881..9029edf0 100644 --- a/doc/src/api_manual/defaults.rst +++ b/doc/src/api_manual/defaults.rst @@ -4,10 +4,15 @@ API: Defaults Object ******************** -This object contains attributes that can be used to adjust the behavior of the -python-oracledb driver. +.. currentmodule:: oracledb -All attributes are supported in Thin and Thick modes, subject to noted details. +Defaults Class +============== + +.. autoclass:: Defaults + + A Defaults object contains attributes that can be used to adjust the + behavior of the python-oracledb driver. An example of changing a default value is: @@ -20,17 +25,13 @@ An example of changing a default value is: Defaults Attributes =================== -.. attribute:: defaults.arraysize - - The default value for :attr:`Cursor.arraysize`. This is a query tuning - attribute, see :ref:`Tuning Fetch Performance `. - - This attribute has an initial value of *100*. +.. autoproperty:: Defaults.arraysize -.. attribute:: defaults.config_dir + This is an attribute for tuning the performance of fetching rows from + Oracle Database. It does not affect data insertion. See :ref:`Tuning Fetch + Performance `. - The directory in which the optional configuration file ``tnsnames.ora`` - will be read in python-oracledb Thin mode. +.. autoproperty:: Defaults.config_dir At time of ``import oracledb`` the value of ``oracledb.defaults.config_dir`` will be set to (first one wins): @@ -41,9 +42,7 @@ Defaults Attributes Otherwise, ``oracledb.defaults.config_dir`` will not be set. - This attribute is used in python-oracledb Thin mode. It is also used in - Thick mode if :attr:`defaults.thick_mode_dsn_passthrough` is *False*, see - :ref:`optnetfiles`. + See :ref:`optnetfiles`. .. versionchanged:: 3.0.0 @@ -54,34 +53,13 @@ Defaults Attributes Thick mode, the value of :attr:`defaults.config_dir` may get changed by python-oracledb. -.. attribute:: defaults.driver_name +.. autoproperty:: Defaults.driver_name - The default value that represents the driver used by the client to connect - to Oracle Database. This is the value used in the CLIENT_DRIVER column - of the V$SESSION_CONNECT_INFO view. - - This attribute has an initial value of *None*. It is used as required in - python-oracledb Thick and Thin mode. - - In python-oracledb Thick mode, this attribute is used if the - ``driver_name`` parameter is not specified in - :meth:`oracledb.init_oracle_client()`. In Thin mode, this attribute is - used if the ``driver_name`` parameter is not specified in - :meth:`oracledb.connect()`, :meth:`oracledb.connect_async()`, - :meth:`oracledb.create_pool()`, or :meth:`oracledb.create_pool_async()`. - If the value of this attribute is *None*, the value set when connecting in - python-oracledb Thick mode is like "python-oracledb thk : " and - in Thin mode is like "python-oracledb thn : ". See - :ref:`otherinit`. + See :ref:`otherinit`. .. versionadded:: 2.5.0 -.. attribute:: defaults.fetch_decimals - - Identifies whether numbers should be fetched as `decimal.Decimal - `__ values. - This can help avoid issues with converting numbers from Oracle Database's - decimal format to Python's binary format. +.. autoproperty:: Defaults.fetch_decimals An output type handler such as previously required in the obsolete cx_Oracle driver can alternatively be used to adjust the returned type. If @@ -91,15 +69,9 @@ Defaults Attributes ``oracledb.defaults.fetch_decimals`` is used to determine whether to return ``decimal.Decimal`` values. - This attribute has an initial value of *False*. +.. autoproperty:: Defaults.fetch_lobs -.. attribute:: defaults.fetch_lobs - - When the value of this attribute is *True*, then queries to LOB columns - return LOB locators. When the value of this attribute is *False*, then - CLOBs and NCLOBs are fetched as strings, and BLOBs are fetched as bytes. If - LOBs are larger than 1 GB, then this attribute should be set to *True* and - the LOBs should be streamed. See :ref:`lobdata`. + See :ref:`lobdata`. An output type handler such as the one previously required in the obsolete cx_Oracle driver can alternatively be used to adjust the returned type. If @@ -107,87 +79,33 @@ Defaults Attributes then that return variable is used. If the type handler returns *None*, then the value of ``oracledb.defaults.fetch_lobs`` is used. - The value of ``oracledb.defaults.fetch_lobs`` does not affect LOBs returned - as OUT binds. - - This attribute has an initial value of *True*. - -.. attribute:: defaults.machine - - The default value that represents the machine name of the client - connecting to Oracle Database. This is the value used in the - MACHINE column of the V$SESSION view. - - This attribute takes the host name where the application is running as its - initial value. - - This attribute is only used in python-oracledb Thin mode. +.. autoproperty:: Defaults.machine .. versionadded:: 2.5.0 -.. attribute:: defaults.osuser - - The default value that represents the operating system user that initiates - the database connection. This is the value used in the OSUSER - column of the V$SESSION view. - - This attribute takes the login name of the user as its initial value. - - This attribute is only used in python-oracledb Thin mode. +.. autoproperty:: Defaults.osuser .. versionadded:: 2.5.0 -.. attribute:: defaults.prefetchrows - - The default value for :attr:`Cursor.prefetchrows`. This is a query tuning - attribute, see :ref:`Tuning Fetch Performance `. - - This attribute is ignored when using :meth:`Connection.fetch_df_all()` or - :meth:`Connection.fetch_df_batches()` since these methods always set the - internal prefetch size to the relevant arraysize or size value. - - This attribute has an initial value of *2*. - -.. attribute:: defaults.program - - The default value that represents the program name connected to the - database. This is the value used in the PROGRAM column of the - V$SESSION view. +.. autoproperty:: Defaults.prefetchrows - This attribute has an initial value that is populated by `sys.executable - `__. + This is an attribute for tuning the performance of fetching rows from + Oracle Database. It does not affect data insertion. See :ref:`Tuning Fetch + Performance `. - This attribute is only used in python-oracledb Thin mode. +.. autoproperty:: Defaults.program .. versionadded:: 2.5.0 -.. attribute:: defaults.stmtcachesize +.. autoproperty:: Defaults.stmtcachesize - The default value for :attr:`Connection.stmtcachesize` and - :attr:`ConnectionPool.stmtcachesize`. This is a tuning attribute, see - :ref:`stmtcache`. + This is a tuning attribute, see :ref:`stmtcache`. - This attribute has an initial value of *20*. - -.. attribute:: defaults.terminal - - The default value that represents the terminal identifier from which the - connection originates. This is the value used in the TERMINAL - column of the V$SESSION view. - - This attribute has an initial value of *unknown*. - - This attribute is only used in python-oracledb Thin mode. +.. autoproperty:: Defaults.terminal .. versionadded:: 2.5.0 -.. attribute:: defaults.thick_mode_dsn_passthrough - - The value that determines whether :ref:`connection strings ` - passed as the ``dsn`` parameter to :meth:`oracledb.connect()`, - :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and - :meth:`oracledb.create_pool_async()` in python-oracledb Thick mode will be - parsed by Oracle Client libraries or by python-oracledb itself. +.. autoproperty:: Defaults.thick_mode_dsn_passthrough When ``thick_mode_dsn_passthrough`` is the default value `True`, the behavior of python-oracledb 2.5 and earlier versions occurs: Thick mode @@ -222,10 +140,4 @@ Defaults Attributes is used for connection configuration, any :ref:`python-oracledb parameter values ` in the configuration will be used. - The value of ``thick_mode_dsn_passthrough`` is ignored in python-oracledb - Thin mode, which always parses all connect strings (including reading a - :ref:`tnsnames.ora ` file, if required). - - This attribute has an initial value of *True*. - .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/fetch_info.rst b/doc/src/api_manual/fetch_info.rst index 9522560a..adce4dda 100644 --- a/doc/src/api_manual/fetch_info.rst +++ b/doc/src/api_manual/fetch_info.rst @@ -4,150 +4,71 @@ API: FetchInfo Objects ********************** -FetchInfo objects are created internally when a query is executed. They are found -in the sequence :data:`Cursor.description`. There is one FetchInfo object for -each column. For compatibility with the Python Database API, this object -behaves as a 7-tuple containing the values for the attributes ``name``, -``type_code``, ``display_size``, ``internal_size``, ``precision``, ``scale``, -and ``null_ok`` in that order. For example, if ``fetch_info`` is of type -FetchInfo, then ``fetch_info[2]`` is the same as ``fetch_info.display_size``. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +FetchInfo Class +=============== -.. versionadded:: 1.4.0 +.. autoclass:: FetchInfo -FetchInfo Attributes -==================== - -.. attribute:: FetchInfo.annotations - - This read-only attribute returns a dictionary containing the `annotations - `__ associated with the fetched column. If - there are no annotations, the value *None* is returned. Annotations - require Oracle Database 23ai. If using python-oracledb Thick mode, Oracle - Client 23ai is also required. - - .. versionadded:: 2.0.0 + A FetchInfo object is created internally when a query is executed. They + are found in the sequence :data:`Cursor.description`. There is one + FetchInfo object for each column. For compatibility with the Python + Database API, this object behaves as a 7-tuple containing the values for + the attributes ``name``, ``type_code``, ``display_size``, + ``internal_size``, ``precision``, ``scale``, and ``null_ok`` in that order. + For example, if ``fetch_info`` is of type FetchInfo, then ``fetch_info[2]`` + is the same as ``fetch_info.display_size``. -.. attribute:: FetchInfo.display_size + .. dbapiobjectextension:: - This read-only attribute returns the display size of the column as mandated - by the Python Database API. + .. versionadded:: 1.4.0 -.. attribute:: FetchInfo.domain_name +FetchInfo Attributes +==================== - This read-only attribute returns the name of the `data use case domain - `__ associated with the fetched column. If - there is no data use case domain, the value *None* is returned. `Data - use case domains `__ require Oracle Database 23ai. - If using python-oracledb Thick mode, Oracle Client 23ai is also required. +.. autoproperty:: FetchInfo.annotations .. versionadded:: 2.0.0 -.. attribute:: FetchInfo.domain_schema +.. autoproperty:: FetchInfo.display_size - This read-only attribute returns the schema of the `data use case domain - `__ associated with the fetched column. If - there is no data use case domain, the value *None* is returned. `Data - use case domains `__ require Oracle Database 23ai. - If using python-oracledb Thick mode, Oracle Client 23ai is also required. +.. autoproperty:: FetchInfo.domain_name .. versionadded:: 2.0.0 -.. attribute:: FetchInfo.internal_size - - This read-only attribute returns the internal size of the column as - mandated by the Python Database API. +.. autoproperty:: FetchInfo.domain_schema -.. attribute:: FetchInfo.is_json + .. versionadded:: 2.0.0 - This read-only attribute returns whether the column is known to contain - JSON data. This will be *True* when the type code is - :data:`oracledb.DB_TYPE_JSON` as well as when an "IS JSON" constraint is - enabled on LOB and VARCHAR2 columns. +.. autoproperty:: FetchInfo.internal_size -.. attribute:: FetchInfo.is_oson +.. autoproperty:: FetchInfo.is_json - This read-only attribute returns whether the column is known to contain - binary encoded `OSON `__ data. This will be *True* - when an "IS JSON FORMAT OSON" check constraint is enabled on BLOB columns. +.. autoproperty:: FetchInfo.is_oson .. versionadded:: 2.1.0 -.. attribute:: FetchInfo.name - - This read-only attribute returns the name of the column as mandated by the - Python Database API. - -.. attribute:: FetchInfo.null_ok +.. autoproperty:: FetchInfo.name - This read-only attribute returns whether nulls are allowed in the column as - mandated by the Python Database API. +.. autoproperty:: FetchInfo.null_ok -.. attribute:: FetchInfo.precision +.. autoproperty:: FetchInfo.precision - This read-only attribute returns the precision of the column as mandated by - the Python Database API. +.. autoproperty:: FetchInfo.scale -.. attribute:: FetchInfo.scale +.. autoproperty:: FetchInfo.type - This read-only attribute returns the scale of the column as mandated by - the Python Database API. +.. autoproperty:: FetchInfo.type_code -.. attribute:: FetchInfo.type - - This read-only attribute returns the type of the column. This will be an - :ref:`Oracle Object Type ` if the column contains Oracle - objects; otherwise, it will be one of the :ref:`database type constants - ` defined at the module level. - - -.. attribute:: FetchInfo.type_code - - This read-only attribute returns the type of the column as mandated by the - Python Database API. The type will be one of the :ref:`database type - constants ` defined at the module level. - -.. attribute:: FetchInfo.vector_dimensions - - This read-only attribute returns the number of dimensions required by - VECTOR columns. If the column is not a VECTOR column or allows for any - number of dimensions, the value returned is *None*. +.. autoproperty:: FetchInfo.vector_dimensions .. versionadded:: 2.2.0 -.. attribute:: FetchInfo.vector_format - - This read-only attribute returns the storage format used by VECTOR - columns. The value of this attribute can be: - - - :data:`oracledb.VECTOR_FORMAT_BINARY` which represents 8-bit unsigned - integers - - :data:`oracledb.VECTOR_FORMAT_INT8` which represents 8-bit signed - integers - - :data:`oracledb.VECTOR_FORMAT_FLOAT32` which represents 32-bit - floating-point numbers - - :data:`oracledb.VECTOR_FORMAT_FLOAT64` which represents 64-bit - floating-point numbers - - If the column is not a VECTOR column or allows for any type of storage, - the value returned is *None*. +.. autoproperty:: FetchInfo.vector_format .. versionadded:: 2.2.0 -.. attribute:: FetchInfo.vector_is_sparse - - This read-only attribute returns a boolean that indicates whether the - vector is sparse or not. - - If the column contains vectors that are SPARSE, the value returned is - True. If the column contains vectors that are DENSE, the value returned is - False. If the column is not a VECTOR column, the value returned is ``None``. +.. autoproperty:: FetchInfo.vector_is_sparse .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/lob.rst b/doc/src/api_manual/lob.rst index 5fcdceb9..8fe6bb59 100644 --- a/doc/src/api_manual/lob.rst +++ b/doc/src/api_manual/lob.rst @@ -4,88 +4,54 @@ API: LOB Objects **************** -A LOB object can be created with :meth:`Connection.createlob()`. See -:ref:`lobdata` for more information about using LOBs. +.. currentmodule:: oracledb -This object is returned by default whenever Oracle :data:`CLOB`, :data:`BLOB` -and :data:`BFILE` columns are fetched. +LOB Class +========= -.. dbapiobjectextension:: +.. autoclass:: LOB -LOB Methods -=========== - -.. method:: LOB.close() - - Closes the LOB. Call this when writing is completed so that the indexes - associated with the LOB can be updated -- but only if :meth:`~LOB.open()` - was called first. - -.. method:: LOB.fileexists() + A LOB object should be created with :meth:`Connection.createlob()`. - Returns a boolean indicating if the file referenced by the BFILE type LOB - exists. + This object is returned by default whenever Oracle :data:`CLOB`, + :data:`BLOB`, and :data:`BFILE` columns are fetched. -.. method:: LOB.getchunksize() + This type object is the Python type of :data:`DB_TYPE_BLOB`, + :data:`DB_TYPE_BFILE`, :data:`DB_TYPE_CLOB` and :data:`DB_TYPE_NCLOB` data + that is returned from cursors. - Returns the chunk size for the internal LOB. Reading and writing to the LOB - in chunks of multiples of this size will improve performance. + .. dbapiobjectextension:: -.. method:: LOB.getfilename() + See :ref:`lobdata` for more information about using LOBs. - Returns a two-tuple consisting of the directory alias and file name for a - BFILE type LOB. - -.. method:: LOB.isopen() - - Returns a boolean indicating if the LOB has been opened using the method - :meth:`~LOB.open()`. - -.. method:: LOB.open() +LOB Methods +=========== - Opens the LOB for writing. This will improve performance when writing to a - LOB in chunks and there are functional or extensible indexes associated - with the LOB. If this method is not called, each write will perform an open - internally followed by a close after the write has been completed. +.. automethod:: LOB.close -.. method:: LOB.read([offset=1, [amount]]) +.. automethod:: LOB.fileexists - Returns a portion (or all) of the data in the LOB object. Note that the - amount and offset are in bytes for BLOB and BFILE type LOBs and in UCS-2 - code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent - to characters for all but supplemental characters. If supplemental - characters are in the LOB, the offset and amount will have to be chosen - carefully to avoid splitting a character. +.. automethod:: LOB.getchunksize -.. method:: LOB.setfilename(dir_alias, name) +.. automethod:: LOB.getfilename - Sets the directory alias and name of the BFILE type LOB. +.. automethod:: LOB.isopen -.. method:: LOB.size() +.. automethod:: LOB.open - Returns the size of the data in the LOB object. For BLOB and BFILE type - LOBs, this is the number of bytes. For CLOB and NCLOB type LOBs, this is the - number of UCS-2 code points. UCS-2 code points are equivalent to characters - for all but supplemental characters. +.. automethod:: LOB.read -.. method:: LOB.trim(new_size=0) +.. automethod:: LOB.setfilename - Trims the LOB to the new size. +.. automethod:: LOB.size -.. method:: LOB.write(data, offset=1) +.. automethod:: LOB.trim - Writes the data to the LOB object at the given offset. The offset is in - bytes for BLOB type LOBs and in UCS-2 code points for CLOB and NCLOB type - LOBs. UCS-2 code points are equivalent to characters for all but - supplemental characters. If supplemental characters are in the LOB, the - offset will have to be chosen carefully to avoid splitting a character. - Note that if you want to make the LOB value smaller, you must use the - :meth:`~LOB.trim()` function. +.. automethod:: LOB.write LOB Attributes ============== -.. attribute:: LOB.type +.. autoproperty:: LOB.type - This read-only attribute returns the type of the LOB as one of the - :ref:`database type constants `. + See :ref:`database type constants `. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 50e1e2b8..cb1868a3 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -11,424 +11,15 @@ API: python-oracledb Module Oracledb Methods ================ -.. function:: Binary(string) +.. autofunction:: Binary - Constructs an object holding a binary (long) string value. +.. autofunction:: clientversion - -.. function:: clientversion() - - Returns the version of the client library being used as a 5-tuple. The five - values are the major version, minor version, update number, patch number, - and port update number. - - This function can only be called when python-oracledb is in Thick - mode. Using it in Thin mode will throw an exception. See - :ref:`enablingthick`. + See :ref:`enablingthick`. .. dbapimethodextension:: -.. function:: connect(dsn=None, pool=None, pool_alias=None, conn_class=None, \ - params=None, user=None, proxy_user=None, password=None, \ - newpassword=None, wallet_password=None, access_token=None, host=None, \ - port=1521, protocol="tcp", https_proxy=None, https_proxy_port=0, \ - service_name=None, instance_name=None, sid=None, server_type=None, \ - cclass=None, purity=oracledb.PURITY_DEFAULT, expire_time=0, \ - retry_count=0, retry_delay=1, tcp_connect_timeout=20.0, \ - ssl_server_dn_match=True, ssl_server_cert_dn=None, \ - wallet_location=None, events=False, externalauth=False, \ - mode=oracledb.AUTH_MODE_DEFAULT, disable_oob=False, \ - stmtcachesize=oracledb.defaults.stmtcachesize, edition=None, \ - tag=None, matchanytag=False, config_dir=oracledb.defaults.config_dir, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Constructor for creating a connection to the database. Returns a - :ref:`Connection Object `. All parameters are optional and can be - specified as keyword parameters. See :ref:`standaloneconnection` - information about connections. - - Not all parameters apply to both python-oracledb Thin and :ref:`Thick - ` modes. - - Some values, such as the database host name, can be specified as - parameters, as part of the connect string, and in the params object. If a - ``dsn`` (data source name) parameter is passed, the python-oracledb Thick - mode will use the string to connect, otherwise a connection string is - internally constructed from the individual parameters and params object - values, with the individual parameters having precedence. In - python-oracledb's default Thin mode, a connection string is internally used - that contains all relevant values specified. The precedence in Thin mode - is that values in any ``dsn`` parameter override values passed as - individual parameters, which themselves override values set in the - ``params`` parameter object. Similar precedence rules also apply to other - values. - - The ``dsn`` (data source name) parameter is an :ref:`Oracle Net Services - Connection String `. It can also be a string in the format - ``user/password@connect_string``. - - The ``pool`` parameter is expected to be a pool object. This parameter - was deprecated in python-oracledb 3.0.0. Use - :meth:`ConnectionPool.acquire()` instead since the use of this parameter - is the equivalent of calling this method. - - The ``pool_alias`` parameter is expected to be a string which indicates the - name of the previously created pool in the :ref:`connection pool cache - ` from which to acquire the connection. This is identical to - calling :meth:`ConnectionPool.acquire()`. When ``pool_alias`` is used, - ``connect()`` supports the same parameters as - :meth:`~ConnectionPool.acquire()` and has the same behavior. - - The ``conn_class`` parameter is expected to be Connection or a subclass of - Connection. - - The ``params`` parameter is expected to be of type :ref:`ConnectParams - ` and contains connection parameters that will be used when - establishing the connection. If this parameter is not specified, the - additional keyword parameters will be used to internally create an instance - of ConnectParams. If both the params parameter and additional keyword - parameters are specified, the values in the keyword parameters have - precedence. Note that if a ``dsn`` is also supplied in python-oracledb Thin - mode, then the values of the parameters specified (if any) within the - ``dsn`` will override the values passed as additional keyword parameters, - which themselves override the values set in the ``params`` parameter - object. - - The ``user`` parameter is expected to be a string which indicates the name - of the user to connect to. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". This - value is used in both the python-oracledb Thin and Thick modes. - - The ``password`` parameter expected to be a string which indicates the - password for the user. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode. The - ``wallet_password`` parameter is not needed for cwallet.sso files that are - used in the python-oracledb Thick mode. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. This value - is used in both the python-oracledb Thin and Thick modes. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. This value is used in - both the python-oracledb Thin and Thick modes. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. This value is used in both the python-oracledb Thin - and Thick modes. The purity will internally default to - :data:`~oracledb.PURITY_SELF` for pooled connections. For standalone - connections, the purity will internally default to - :data:`~oracledb.PURITY_NEW`. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *0*. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. This value is used in both the python-oracledb Thin - and Thick modes. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *1*. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *20.0*. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ``ssl_server_cert_dn`` parameter is not - provided, host name matching is performed instead. This value is used in - both the python-oracledb Thin and Thick modes. The default value is *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ``ssl_server_dn_match`` parameter is - not set to the value *True*. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. In python-oracledb Thick mode, this must be the directory of - the file, cwallet.sso. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``events`` parameter is expected to be a boolean that specifies whether - the events mode should be enabled. This value is only used in the - python-oracledb Thick mode and is ignored in the Thin mode. This parameter - is needed for continuous query notification and high availability event - notifications. The default value is *False*. - - The ``externalauth`` parameter is a boolean that specifies whether external - authentication should be used. This value is only used in the - python-oracledb Thick mode and is ignored in the Thin mode. The default - value is *False*. For standalone connections, external authentication - occurs when the ``user`` and ``password`` attributes are not used. If these - attributes are not used, you can optionally set the ``externalauth`` - attribute to *True*, which may aid code auditing. - - If the ``mode`` parameter is specified, it must be one of the - :ref:`connection authorization modes ` - which are defined at the module level. This value is used in both the - python-oracledb Thin and Thick modes. The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value is only used - in the python-oracledb Thin mode and has no effect on Windows which - does not support this functionality. The default value is *False*. - - The ``stmtcachesize`` parameter is expected to be an integer which - specifies the initial size of the statement cache. This value is used in - both the python-oracledb Thin and Thick modes. The default is the value of - :attr:`defaults.stmtcachesize`. - - The ``edition`` parameter is expected to be a string that indicates the - edition to use for the connection. It requires Oracle Database 11.2, or - later. This parameter cannot be used simultaneously with the ``cclass`` - parameter. - - The ``tag`` parameter is expected to be a string that identifies the type - of connection that should be returned from a pool. This value is only used - in the python-oracledb Thick mode and is ignored in the Thin mode. - - The ``matchanytag`` parameter is expected to be a boolean specifying - whether any tag can be used when acquiring a connection from the pool. This - value is only used in the python-oracledb Thick mode when acquiring a - connection from a pool. This value is ignored in the python-oracledb Thin - mode. The default value is *False*. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which :ref:`optional configuration files ` are - found. The default is the value of :attr:`defaults.config_dir`. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters, if - specified, are expected to be a sequence of values which identifies the - database shard to connect to. The key values can be a list of strings, - numbers, bytes, or dates. These values are only used in the - python-oracledb Thick mode and are ignored in the Thin mode. See - :ref:`connsharding`. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code called by python-oracledb. This value is only used in the - python-oracledb Thin mode. For python-oracledb Thick mode, set the - ``ORA_DEBUG_JDWP`` environment variable which has the same syntax. For more - information, see :ref:`applntracing`. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. This value - is only used in the python-oracledb Thin mode. - - The ``ssl_context`` parameter is expected to be an `SSLContext object - `__ which is used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. This value is only used in - the python-oracledb Thin mode. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`Connection.commit()` or - :meth:`Connection.rollback()` are called. This parameter requires the use - of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - *tcps*. This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``thick_mode_dsn_passthrough`` parameter is expected to be a boolean - which indicates whether the connect string should be passed unchanged to - the Oracle Client libraries for parsing when using python-oracledb Thick - mode. If this parameter is set to *False* in Thick mode, connect strings - are parsed by python-oracledb itself and a generated connect descriptor is - sent to the Oracle Client libraries. This value is only used in the - python-oracledb Thick mode. The default value is the value of - :attr:`defaults.thick_mode_dsn_passthrough`. For more information, see - :ref:`usingconfigfiles`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. This value is - used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This parameter can be used in both python-oracledb Thin and Thick - modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` - value in effect is *True*, it can only be used if the ``dsn`` parameter is - not specified. For Thick mode, you may prefer to set the Oracle Net - Services parameter `POOL_NAME `__ parameter in the - :ref:`easy connect string ` or - :ref:`connect descriptor `. See - :ref:`DRCP Pool Names `. - - If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* - and is only of use when embedding Python in an application (like - PowerBuilder) which has already made the connection. The connection thus - created should *never* be used after the source handle has been closed or - destroyed. This value is only used in the python-oracledb Thick mode and - is ignored in the Thin mode. It should be used with extreme caution. The - default value is *0*. +.. autofunction:: connect .. versionchanged:: 3.2.0 @@ -466,330 +57,7 @@ Oracledb Methods The ``connection_id_prefix`` parameter was added. -.. function:: connect_async(dsn=None, pool=None, pool_alias=None, \ - conn_class=None, params=None, user=None, proxy_user=None, \ - password=None, newpassword=None, wallet_password=None, \ - access_token=None, host=None, port=1521, protocol="tcp", \ - https_proxy=None, https_proxy_port=0, service_name=None, \ - instance_name=None, sid=None, server_type=None, cclass=None, \ - purity=oracledb.PURITY_DEFAULT, expire_time=0, retry_count=0, \ - retry_delay=1, tcp_connect_timeout=20.0, ssl_server_dn_match=True, \ - ssl_server_cert_dn=None, wallet_location=None, events=False, \ - externalauth=False, mode=oracledb.AUTH_MODE_DEFAULT, \ - disable_oob=False, stmtcachesize=oracledb.defaults.stmtcachesize, \ - edition=None, tag=None, matchanytag=False, \ - config_dir=oracledb.defaults.config_dir, appcontext=[], \ - shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Constructor for creating a connection to the database. Returns an - :ref:`AsyncConnection Object `. All parameters are optional - and can be specified as keyword parameters. See - :ref:`standaloneconnection` information about connections. - - This method can only be used in python-oracledb Thin mode. - - When connecting to Oracle Autonomous Database, use Python 3.11, or later. - - .. versionadded:: 2.0.0 - - Some values, such as the database host name, can be specified as - parameters, as part of the connect string, and in the params object. - The precedence is that values in the ``dsn`` parameter override values - passed as individual parameters, which themselves override values set in - the ``params`` parameter object. Similar precedence rules also apply to - other values. - - The ``dsn`` (data source name) parameter is an :ref:`Oracle Net Services - Connection String `. It can also be a string in the format - ``user/password@connect_string``. - - The ``pool`` parameter is expected to be an AsyncConnectionPool object. - This parameter was deprecated in python-oracledb 3.0.0. Use - :meth:`AsyncConnectionPool.acquire()` instead since the - use of this parameter is the equivalent of calling this method. - - The ``pool_alias`` parameter is expected to be a string which indicates the - name of the previously created pool in the :ref:`connection pool cache - ` from which to acquire the connection. This is identical to - calling :meth:`AsyncConnectionPool.acquire()`. When ``pool_alias`` is used, - ``connect_async()`` supports the same parameters as - :meth:`~AsyncConnectionPool.acquire()` and has the same behavior. - - The ``conn_class`` parameter is expected to be AsyncConnection or a - subclass of AsyncConnection. - - The ``params`` parameter is expected to be of type :ref:`ConnectParams - ` and contains connection parameters that will be used when - establishing the connection. If this parameter is not specified, the - additional keyword parameters will be used to create an instance of - ConnectParams. If both the params parameter and additional keyword - parameters are specified, the values in the keyword parameters have - precedence. Note that if a ``dsn`` is also supplied, then the values of the - parameters specified (if any) within the ``dsn`` will override the values - passed as additional keyword parameters, which themselves override the - values set in the ``params`` parameter object. - - The ``user`` parameter is expected to be a string which indicates the name - of the user to connect to. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". - - The ``password`` parameter expected to be a string which indicates the - password for the user. - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. The purity will internally default to - :data:`~oracledb.PURITY_SELF` for pooled connections. For standalone - connections, the purity will internally default to - :data:`~oracledb.PURITY_NEW`. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. The - default value is *0*. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. The - default value is *1*. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. The default value is *20.0*. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ``ssl_server_cert_dn`` parameter is not - provided, host name matching is performed instead. The default value is - *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ``ssl_server_dn_match`` parameter is - not set to the value *True*. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. - - The ``events`` parameter is ignored in the python-oracledb Thin mode. - - The ``externalauth`` parameter is ignored in the python-oracledb Thin mode. - - If the ``mode`` parameter is specified, it must be one of the - :ref:`connection authorization modes ` - which are defined at the module level. The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value has no effect on - Windows which does not support this functionality. The default value is - *False*. - - The ``stmtcachesize`` parameter is expected to be an integer which - specifies the initial size of the statement cache. The default is the - value of :attr:`defaults.stmtcachesize`. - - The ``tag`` parameter is ignored in the python-oracledb Thin mode. - - The ``matchanytag`` parameter is ignored in the python-oracledb Thin mode. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which :ref:`optional configuration files ` are - found. The default is the value of :attr:`defaults.config_dir`. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters are - ignored in the python-oracledb Thin mode. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code called by python-oracledb. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. - - The ``ssl_context`` parameter is expected to be an SSLContext object used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`AsyncConnection.commit()` or - :meth:`AsyncConnection.rollback()` are called. This parameter requires the - use of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - *tcps*. This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. - This value is used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This parameter can be used in both python-oracledb Thin and Thick - modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` - value in effect is *True*, it can only be used if the ``dsn`` parameter is - not specified. For Thick mode, you may prefer to set the Oracle Net - Services parameter `POOL_NAME `__ parameter in the - :ref:`easy connect string ` or - :ref:`connect descriptor `. See - :ref:`DRCP Pool Names `. - - The ``thick_mode_dsn_passthrough`` and ``handle`` parameters are ignored in - python-oracledb Thin mode. +.. autofunction:: connect_async .. versionchanged:: 3.2.0 @@ -827,873 +95,11 @@ Oracledb Methods The ``connection_id_prefix`` parameter was added. -.. function:: ConnectParams(user=None, proxy_user=None, password=None, \ - newpassword=None, wallet_password=None, access_token=None, host=None, \ - port=1521, protocol="tcp", https_proxy=None, https_proxy_port=0, \ - service_name=None, instance_name=None, sid=None, server_type=None, \ - cclass=None, purity=oracledb.PURITY_DEFAULT, expire_time=0, \ - retry_count=0, retry_delay=1, tcp_connect_timeout=20.0, \ - ssl_server_dn_match=True, ssl_server_cert_dn=None, \ - wallet_location=None, events=False, externalauth=False, \ - mode=oracledb.AUTH_MODE_DEFAULT, disable_oob=False, \ - stmtcachesize=oracledb.defaults.stmtcachesize, edition=None, \ - tag=None, matchanytag=False, config_dir=oracledb.defaults.config_dir, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Contains all the parameters that can be used to establish a connection to - the database. - - Creates and returns a :ref:`ConnectParams Object `. The object - can be passed to :meth:`oracledb.connect()`. - - All the parameters are optional. - - The ``user`` parameter is expected to be a string which indicates the name - of the user to connect to. This value is used in both the python-oracledb - Thin and :ref:`Thick ` modes. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". This - value is used in both the python-oracledb Thin and Thick modes. - - The ``password`` parameter expected to be a string which indicates the - password for the user. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode. The - ``wallet_password`` parameter is not needed for cwallet.sso files that are - used in the python-oracledb Thick mode. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. This value - is used in both the python-oracledb Thin and Thick modes. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. This value is used in - both the python-oracledb Thin and Thick modes. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. This value is used in both the python-oracledb Thin - and Thick modes. The purity will internally default to - :data:`~oracledb.PURITY_SELF` for pooled connections . For standalone - connections, the purity will internally default to - :data:`~oracledb.PURITY_NEW`. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *0*. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. This value is used in both the python-oracledb Thin - and Thick modes. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *1*. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *20.0*. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ``ssl_server_cert_dn`` parameter is not - provided, host name matching is performed instead. This value is used in - both the python-oracledb Thin and Thick modes. The default value is *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ``ssl_server_dn_match`` parameter is - not set to the value *True*. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. In python-oracledb Thick mode, this must be the directory of - the file, cwallet.sso. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``events`` parameter is expected to be a boolean that specifies whether - the events mode should be enabled. This value is only used in the - python-oracledb Thick mode. This parameter is needed for continuous - query notification and high availability event notifications. The default - value is *False*. - - The ``externalauth`` parameter is a boolean that specifies whether external - authentication should be used. This value is only used in the - python-oracledb Thick mode. The default value is *False*. For standalone - connections, external authentication occurs when the ``user`` and - ``password`` attributes are not used. If these attributes are not used, you - can optionally set the ``externalauth`` attribute to *True*, which may aid - code auditing. - - The ``mode`` parameter is expected to be an integer that identifies the - authorization mode to use. This value is used in both the python-oracledb - Thin and Thick modes.The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value is only used - in the python-oracledb Thin mode and has no effect on Windows which - does not support this functionality. The default value is *False*. - - The ``stmtcachesize`` parameter is expected to be an integer that - identifies the initial size of the statement cache. This value is used in - both the python-oracledb Thin and Thick modes. The default is the value of - :attr:`defaults.stmtcachesize`. - - The ``edition`` parameter is expected to be a string that indicates the - edition to use for the connection. It requires Oracle Database 11.2, or - later. This parameter cannot be used simultaneously with the ``cclass`` - parameter. - - The ``tag`` parameter is expected to be a string that identifies the type of - connection that should be returned from a pool. This value is only used - in the python-oracledb Thick mode. - - The ``matchanytag`` parameter is expected to be a boolean specifying - whether any tag can be used when acquiring a connection from the pool. This - value is only used in the python-oracledb Thick mode when acquiring a - connection from a pool. The default value is *False*. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which the :ref:`tnsnames.ora ` configuration file - is located. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters, if - specified, are expected to be a sequence of values which identifies the - database shard to connect to. The key values can be a list of strings, - numbers, bytes, or dates. These values are only used in the - python-oracledb Thick mode and are ignored in the Thin mode. See - :ref:`connsharding`. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code invoked by python-oracledb. This value is only used in the - python-oracledb Thin mode. For python-oracledb Thick mode, set the - ``ORA_DEBUG_JDWP`` environment variable which has the same syntax. For more - information, see :ref:`applntracing`. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. This value - is only used in the python-oracledb Thin mode. - - The ``ssl_context`` parameter is expected to be an `SSLContext object - `__ which is used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. This value is only used in - the python-oracledb Thin mode. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`Connection.commit()` or - :meth:`Connection.rollback()` are called. This parameter requires the use - of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - "tcps". This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``thick_mode_dsn_passthrough`` parameter is expected to be a boolean - which indicates whether the connect string should be passed unchanged to - the Oracle Client libraries for parsing when using python-oracledb Thick - mode. If this parameter is set to *False* in Thick mode, connect strings - are parsed by python-oracledb itself and a generated connect descriptor is - sent to the Oracle Client libraries. This value is only used in the - python-oracledb Thick mode. The default value is the value of - :attr:`defaults.thick_mode_dsn_passthrough`. For more information, see - :ref:`usingconfigfiles`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. This value is - used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. - - The ``handle`` parameter is expected to be an integer which represents a - pointer to a valid service context handle. This value is only used in the - python-oracledb Thick mode. It should be used with extreme caution. The - default value is *0*. - - .. versionchanged:: 3.2.0 - - The ``pool_name`` parameter was added. - - .. versionchanged:: 3.0.0 - - The ``instance_name``, ``use_sni``, ``thick_mode_dsn_passthrough`` and - ``extra_auth_params`` parameters were added. - - .. versionchanged:: 2.5.0 - - The ``program``, ``machine``, ``terminal``, ``osuser``, and - ``driver_name`` parameters were added. Support for ``edition`` and - ``appcontext`` was added to python-oracledb Thin mode. - - .. versionchanged:: 2.3.0 - - The default value of the ``retry_delay`` parameter was changed from 0 - seconds to 1 second. The default value of the ``tcp_connect_timeout`` - parameter was changed from 60.0 seconds to 20.0 seconds. The - ``ssl_version`` parameter was added. - - .. versionchanged:: 2.1.0 - - The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. - - .. versionchanged:: 2.0.0 - - The ``ssl_context`` and ``sdu`` parameters were added. - - .. versionchanged:: 1.4.0 - - The ``connection_id_prefix`` parameter was added. - -.. function:: create_pipeline() - - Creates a :ref:`pipeline object ` which can be used to - process a set of operations against a database. +.. autofunction:: create_pipeline .. versionadded:: 2.4.0 -.. function:: create_pool(dsn=None, pool_class=oracledb.ConnectionPool, \ - pool_alias=None, params=None, min=1, max=2, increment=1, \ - connectiontype=oracledb.Connection, \ - getmode=oracledb.POOL_GETMODE_WAIT, homogeneous=True, timeout=0, \ - wait_timeout=0, max_lifetime_session=0, session_callback=None, \ - max_sessions_per_shard=0, soda_metadata_cache=False, ping_interval=60, \ - ping_timeout=5000, user=None, proxy_user=None, password=None, \ - newpassword=None, wallet_password=None, access_token=None, host=None, \ - port=1521, protocol="tcp", https_proxy=None, https_proxy_port=0, \ - service_name=None, instance_name=None, sid=None, server_type=None, \ - cclass=None, purity=oracledb.PURITY_DEFAULT, expire_time=0, \ - retry_count=0, retry_delay=1, tcp_connect_timeout=20.0, \ - ssl_server_dn_match=True, ssl_server_cert_dn=None, \ - wallet_location=None, events=False, externalauth=False, \ - mode=oracledb.AUTH_MODE_DEFAULT, disable_oob=False, \ - stmtcachesize=oracledb.defaults.stmtcachesize, edition=None, \ - tag=None, matchanytag=False, config_dir=oracledb.defaults.config_dir, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Creates a connection pool with the supplied parameters and returns the - :ref:`ConnectionPool object ` for the pool. See :ref:`Connection - pooling ` for more information. - - This function is the equivalent of the ``cx_Oracle.SessionPool()`` - function. The use of ``SessionPool()`` has been deprecated in - python-oracledb. - - Not all parameters apply to both python-oracledb Thin and :ref:`Thick - ` modes. - - Some values, such as the database host name, can be specified as - parameters, as part of the connect string, and in the params object. If a - ``dsn`` (data source name) parameter is passed, the python-oracledb Thick - mode will use the string to connect, otherwise a connection string is - internally constructed from the individual parameters and params object - values, with the individual parameters having precedence. In - python-oracledb's default Thin mode, a connection string is internally used - that contains all relevant values specified. The precedence in Thin mode - is that values in any ``dsn`` parameter override values passed as - individual parameters, which themselves override values set in the - ``params`` parameter object. Similar precedence rules also apply to other - values. - - Python-oracledb connection pools must be created, used and closed within - the same process. Sharing pools or connections across processes has - unpredictable behavior. Using connection pools in multi-threaded - architectures is supported. Multi-process architectures that cannot be - converted to threading may get some benefit from :ref:`drcp`. - - In python-oracledb Thick mode, connection pooling is handled by Oracle's - `Session pooling `__ technology. - This allows python-oracledb applications to support features like - `Application Continuity `__. - - The ``user``, ``password``, and ``dsn`` parameters are the same as for - :meth:`oracledb.connect()`. - - The ``pool_class`` parameter is expected to be a - :ref:`ConnectionPool Object ` or a subclass of ConnectionPool. - - The ``pool_alias`` parameter is expected to be a string representing the - name used to store and reference the pool in the python-oracledb connection - pool cache. If this parameter is not specified, then the pool will not be - added to the cache. The value of this parameter can be used with the - :meth:`oracledb.get_pool()` and :meth:`oracledb.connect()` methods to - access the pool. See :ref:`connpoolcache`. - - The ``params`` parameter is expected to be of type :ref:`PoolParams - ` and contains parameters that are used to create the pool. - If this parameter is not specified, the additional keyword parameters will - be used to create an instance of PoolParams. If both the params parameter - and additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a ``dsn`` is also supplied, then - in the python-oracledb Thin mode, the values of the parameters specified - (if any) within the ``dsn`` will override the values passed as additional - keyword parameters, which themselves override the values set in the - ``params`` parameter object. - - The ``min``, ``max`` and ``increment`` parameters control pool growth - behavior. A fixed pool size where ``min`` equals ``max`` is - :ref:`recommended ` to help prevent connection storms and to - help overall system stability. The ``min`` parameter is the number of - connections opened when the pool is created. The default value of the - ``min`` parameter is *1*. The ``increment`` parameter is the number of - connections that are opened whenever a connection request exceeds the - number of currently open connections. The default value of the - ``increment`` parameter is *1*. The ``max`` parameter is the maximum number - of connections that can be open in the connection pool. The default value - of the ``max`` parameter is *2*. - - If the ``connectiontype`` parameter is specified, all calls to - :meth:`ConnectionPool.acquire()` will create connection objects of that - type, rather than the base type defined at the module level. - - The ``getmode`` parameter determines the behavior of - :meth:`ConnectionPool.acquire()`. One of the constants - :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, - :data:`oracledb.POOL_GETMODE_FORCEGET`, or - :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default value is - :data:`oracledb.POOL_GETMODE_WAIT`. - - The ``homogeneous`` parameter is a boolean that indicates whether the - connections are homogeneous (same user) or heterogeneous (multiple - users). The default value is *True*. - - The ``timeout`` parameter is the length of time (in seconds) that a - connection may remain idle in the pool before it is terminated. This - applies only when the pool has more than ``min`` connections open, allowing - it to shrink to the specified minimum size. The default value is *0* - seconds. A value of *0* means there is no limit. - - The ``wait_timeout`` parameter is the length of time (in milliseconds) that - a caller should wait when acquiring a connection from the pool with - ``getmode`` set to :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default - value is *0* milliseconds. - - The ``max_lifetime_session`` parameter is the length of time (in seconds) - that a pooled connection may exist since first being created. The default - value is *0*. A value of *0* means that there is no limit. Connections - become candidates for termination when they are acquired or released back - to the pool and have existed for longer than ``max_lifetime_session`` - seconds. In python-oracledb Thick mode, Oracle Client libraries 12.1 or - later must be used and, prior to Oracle Client 21, cleanup only occurs when - the pool is accessed. - - The ``session_callback`` parameter is a callable that is invoked when a - connection is returned from the pool for the first time, or when the - connection tag differs from the one requested. - - The ``max_sessions_per_shard`` parameter is the maximum number of - connections that may be associated with a particular shard. This value is - only used in the python-oracledb Thick mode and is ignored in the - python-oracledb Thin mode. The default value is *0*. - - The ``soda_metadata_cache`` parameter is a boolean that indicates whether - or not the SODA metadata cache should be enabled. This value is only used - in the python-oracledb Thick mode and is ignored in the python-oracledb - Thin mode. The default value is *False*. - - The ``ping_interval`` parameter is the length of time (in seconds) after - which an unused connection in the pool will be a candidate for pinging when - :meth:`ConnectionPool.acquire()` is called. If the ping to the database - indicates the connection is not alive a replacement connection will be - returned by :meth:`~ConnectionPool.acquire()`. If ``ping_interval`` is a - negative value, then the ping functionality will be disabled. The default - value is *60* seconds. - - The ``ping_timeout`` parameter is the maximum length of time (in - milliseconds) that :meth:`ConnectionPool.acquire()` waits for a connection - to respond to any internal ping to the database. If the ping does not - respond within the specified time, then the connection is destroyed and - :meth:`~ConnectionPool.acquire()` returns a different connection. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *5000* milliseconds. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". This - value is used in both the python-oracledb Thin and Thick modes. - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode. The - ``wallet_password`` parameter is not needed for cwallet.sso files that are - used in the python-oracledb Thick mode. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. This value - is used in both the python-oracledb Thin and Thick modes. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. This value is used in - both the python-oracledb Thin and Thick modes. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. This value is used in both the python-oracledb Thin - and Thick modes. The purity will internally default to - :data:`~oracledb.PURITY_SELF` for pooled connections. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *0* minutes. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. This value is used in both the python-oracledb Thin - and Thick modes. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *1* seconds. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *20.0* seconds. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ``ssl_server_cert_dn`` parameter is not - provided, host name matching is performed instead. This value is used in - both the python-oracledb Thin and Thick modes. The default value is *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ``ssl_server_dn_match`` parameter is - not set to the value *True*. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. In python-oracledb Thick mode, this must be the directory of - the file, cwallet.sso. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``events`` parameter is expected to be a boolean that specifies whether - the events mode should be enabled. This value is only used in the - python-oracledb Thick mode and is ignored in the Thin mode. This parameter - is needed for continuous query notification and high availability event - notifications. The default value is *False*. - - The ``externalauth`` parameter is a boolean that determines whether to use - external authentication. This value is only used in python-oracledb Thick - mode and is ignored in Thin mode. The default value is *False*. For pooled - connections in Thick mode, external authentication requires the use of a - heterogeneous pool. For this reason, you must set the ``homogeneous`` - parameter to *False*. See :ref:`extauth`. - - If the ``mode`` parameter is specified, it must be one of the - :ref:`connection authorization modes ` - which are defined at the module level. This value is used in both the - python-oracledb Thin and Thick modes.The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value is only used - in the python-oracledb Thin mode and has no effect on Windows which - does not support this functionality. The default value is *False*. - - The ``stmtcachesize`` parameter is expected to be an integer which - specifies the initial size of the statement cache. This value is used in - both the python-oracledb Thin and Thick modes. The default is the value of - :attr:`defaults.stmtcachesize`. - - The ``edition`` parameter is expected to be a string that indicates the - edition to use for the connection. It requires Oracle Database 11.2, or - later. This parameter cannot be used simultaneously with the ``cclass`` - parameter. - - The ``tag`` parameter is expected to be a string that identifies the type - of connection that should be returned from a pool. This value is only used - in the python-oracledb Thick mode and is ignored in the Thin mode. - - The ``matchanytag`` parameter is expected to be a boolean specifying - whether any tag can be used when acquiring a connection from the pool. This - value is only used in the python-oracledb Thick mode when acquiring a - connection from a pool. This value is ignored in the python-oracledb Thin - mode. The default value is *False*. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which the :ref:`tnsnames.ora ` configuration file - is located. The default is the value of :attr:`defaults.config_dir`. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters, if - specified, are expected to be a sequence of values which identifies the - database shard to connect to. The key values can be a list of strings, - numbers, bytes, or dates. These values are only used in the - python-oracledb Thick mode and are ignored in the Thin mode. See - :ref:`connsharding`. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code invoked by python-oracledb. This value is only used in the - python-oracledb Thin mode. For python-oracledb Thick mode, set the - ``ORA_DEBUG_JDWP`` environment variable which has the same syntax. For more - information, see :ref:`applntracing`. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. This value - is only used in the python-oracledb Thin mode. - - The ``ssl_context`` parameter is expected to be an `SSLContext object - `__ which is used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. This value is only used in - the python-oracledb Thin mode. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`Connection.commit()` or - :meth:`Connection.rollback()` are called. This parameter requires the use - of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - "tcps". This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``thick_mode_dsn_passthrough`` parameter is expected to be a boolean - which indicates whether the connect string should be passed unchanged to - the Oracle Client libraries for parsing when using python-oracledb Thick - mode. If this parameter is set to *False* in Thick mode, connect strings - are parsed by python-oracledb itself and a generated connect descriptor is - sent to the Oracle Client libraries. This value is only used in the - python-oracledb Thick mode. The default value is - :attr:`defaults.thick_mode_dsn_passthrough`. For more information, see - :ref:`usingconfigfiles`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. This value is - used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This parameter can be used in both python-oracledb Thin and Thick - modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` - value in effect is *True*, it can only be used if the ``dsn`` parameter is - not specified. For Thick mode, you may prefer to set the Oracle Net - Services parameter `POOL_NAME `__ parameter in the - :ref:`easy connect string ` or - :ref:`connect descriptor `. See - :ref:`DRCP Pool Names `. - - If the ``handle`` parameter is specified, it must be of type OCISvcCtx\* - and is only of use when embedding Python in an application (like - PowerBuilder) which has already made the connection. The connection thus - created should *never* be used after the source handle has been closed or - destroyed. This value is only used in the python-oracledb Thick mode and - is ignored in the Thin mode. It should be used with extreme caution. The - default value is *0*. +.. autofunction:: create_pool .. versionchanged:: 3.2.0 @@ -1730,394 +136,7 @@ Oracledb Methods The ``connection_id_prefix`` parameter was added. -.. function:: create_pool_async(dsn=None, \ - pool_class=oracledb.AsyncConnectionPool, pool_alias=None, \ - params=None, min=1, max=2, increment=1, \ - connectiontype=oracledb.AsyncConnection, \ - getmode=oracledb.POOL_GETMODE_WAIT, homogeneous=True, timeout=0, \ - wait_timeout=0, max_lifetime_session=0, session_callback=None, \ - max_sessions_per_shard=0, soda_metadata_cache=False, ping_interval=60, \ - ping_timeout=5000, user=None, proxy_user=None, password=None, \ - newpassword=None, wallet_password=None, access_token=None, host=None, \ - port=1521, protocol="tcp", https_proxy=None, https_proxy_port=0, \ - service_name=None, instance_name=None, sid=None, server_type=None, \ - cclass=None, purity=oracledb.PURITY_DEFAULT, expire_time=0, \ - retry_count=0, retry_delay=1, tcp_connect_timeout=20.0, \ - ssl_server_dn_match=True, ssl_server_cert_dn=None, \ - wallet_location=None, events=False, externalauth=False, \ - mode=oracledb.AUTH_MODE_DEFAULT, disable_oob=False, \ - stmtcachesize=oracledb.defaults.stmtcachesize, edition=None, \ - tag=None, matchanytag=False, config_dir=oracledb.defaults.config_dir, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Creates a connection pool with the supplied parameters and returns the - :ref:`AsyncConnectionPool object ` for the pool. - ``create_pool_async()`` is a synchronous method. See - :ref:`Connection pooling ` for more information. - - This method can only be used in python-oracledb Thin mode. - - When connecting to Oracle Autonomous Database, use Python 3.11, or later. - - .. versionadded:: 2.0.0 - - Some values, such as the database host name, can be specified as - parameters, as part of the connect string, and in the params object. - The precedence is that values in the ``dsn`` parameter override values - passed as individual parameters, which themselves override values set in - the ``params`` parameter object. Similar precedence rules also apply to - other values. - - The ``user``, ``password``, and ``dsn`` parameters are the same as for - :meth:`oracledb.connect()`. - - The ``pool_class`` parameter is expected to be an - :ref:`AsyncConnectionPool Object ` or a subclass of - AsyncConnectionPool. - - The ``pool_alias`` parameter is expected to be a string representing the - name used to store and reference the pool in the python-oracledb connection - pool cache. If this parameter is not specified, then the pool will not be - added to the cache. The value of this parameter can be used with the - :meth:`oracledb.get_pool()` and :meth:`oracledb.connect_async()` methods to - access the pool. See :ref:`connpoolcache`. - - The ``params`` parameter is expected to be of type :ref:`PoolParams - ` and contains parameters that are used to create the pool. - If this parameter is not specified, the additional keyword parameters will - be used to create an instance of PoolParams. If both the params parameter - and additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a ``dsn`` is also supplied, then - the values of the parameters specified (if any) within the ``dsn`` will - override the values passed as additional keyword parameters, which - themselves override the values set in the ``params`` parameter object. - - The ``min``, ``max`` and ``increment`` parameters control pool growth - behavior. A fixed pool size where ``min`` equals ``max`` is - :ref:`recommended ` to help prevent connection storms and to - help overall system stability. The ``min`` parameter is the number of - connections opened when the pool is created. The default value of the - ``min`` parameter is *1*. The ``increment`` parameter is the number of - connections that are opened whenever a connection request exceeds the - number of currently open connections. The default value of the - ``increment`` parameter is *1*. The ``max`` parameter is the maximum number - of connections that can be open in the connection pool. The default value - of the ``max`` parameter is *2*. - - If the ``connectiontype`` parameter is specified, all calls to - :meth:`AsyncConnectionPool.acquire()` will create connection objects of - that type, rather than the base type defined at the module level. - - The ``getmode`` parameter determines the behavior of - :meth:`AsyncConnectionPool.acquire()`. One of the constants - :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, - :data:`oracledb.POOL_GETMODE_FORCEGET`, or - :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default value is - :data:`oracledb.POOL_GETMODE_WAIT`. - - The ``homogeneous`` parameter is a boolean that indicates whether the - connections are homogeneous (same user) or heterogeneous (multiple - users). The default value is *True*. - - The ``timeout`` parameter is the length of time (in seconds) that a - connection may remain idle in the pool before it is terminated. This - applies only when the pool has more than ``min`` connections open, allowing - it to shrink to the specified minimum size. The default value is *0* - seconds. A value of *0* means there is no limit. - - The ``wait_timeout`` parameter is the length of time (in milliseconds) that - a caller should wait when acquiring a connection from the pool with - ``getmode`` set to :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default - value is *0* milliseconds. - - The ``max_lifetime_session`` parameter is the length of time (in seconds) - that a pooled connection may exist since first being created. The default - value is *0*. A value of *0* means that there is no limit. Connections - become candidates for termination when they are acquired or released back - to the pool and have existed for longer than ``max_lifetime_session`` - seconds. In python-oracledb Thick mode, Oracle Client libraries 12.1 or - later must be used and, prior to Oracle Client 21, cleanup only occurs when - the pool is accessed. - - The ``session_callback`` parameter is a callable that is invoked when a - connection is returned from the pool for the first time, or when the - connection tag differs from the one requested. - - The ``max_sessions_per_shard`` parameter is ignored in the python-oracledb - Thin mode. - - The ``soda_metadata_cache`` parameter is ignored in the python-oracledb - Thin mode. - - The ``ping_interval`` parameter is the length of time (in seconds) after - which an unused connection in the pool will be a candidate for pinging when - :meth:`AsyncConnectionPool.acquire()` is called. If the ping to the - database indicates the connection is not alive a replacement connection - will be returned by :meth:`~AsyncConnectionPool.acquire()`. If - ``ping_interval`` is a negative value, then the ping functionality will be - disabled. The default value is *60* seconds. - - The ``ping_timeout`` parameter is the maximum length of time (in - milliseconds) that :meth:`AsyncConnectionPool.acquire()` waits for a - connection to respond to any internal ping to the database. If the ping - does not respond within the specified time, then the connection is - destroyed and :meth:`~AsyncConnectionPool.acquire()` returns a different - connection. This value is used in both the python-oracledb Thin and Thick - modes. The default value is *5000* milliseconds. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. The purity will internally default to - :data:`~oracledb.PURITY_SELF` for pooled connections. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. The - default value is *0* minutes. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. The - default value is *1* seconds. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. The default value is *20.0* seconds. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ``ssl_server_cert_dn`` parameter is not - provided, host name matching is performed instead. The default value is - *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ``ssl_server_dn_match`` parameter is - not set to the value *True*. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. - - The ``events`` parameter is ignored in the python-oracledb Thin mode. - - The ``externalauth`` parameter is ignored in the python-oracledb Thin mode. - - If the ``mode`` parameter is specified, it must be one of the - :ref:`connection authorization modes ` - which are defined at the module level. The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value has no effect - on Windows which does not support this functionality. The default value - is *False*. - - The ``stmtcachesize`` parameter is expected to be an integer which - specifies the initial size of the statement cache. The default is the - value of :attr:`defaults.stmtcachesize`. - - The ``tag`` parameter is ignored in the python-oracledb Thin mode. - - The ``matchanytag`` parameter is ignored in the python-oracledb Thin mode. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which the :ref:`tnsnames.ora ` configuration file - is located. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters are - ignored in the python-oracledb Thin mode. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code invoked by python-oracledb. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. - - The ``ssl_context`` parameter is expected to be an SSLContext object used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`AsyncConnection.commit()` or - :meth:`AsyncConnection.rollback()` are called. This parameter requires the - use of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - *tcps*. This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. This value is - used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This parameter can be used in both python-oracledb Thin and Thick - modes. However, in Thick mode, when the ``thick_mode_dsn_passthrough`` - value in effect is *True*, it can only be used if the ``dsn`` parameter is - not specified. For Thick mode, you may prefer to set the Oracle Net - Services parameter `POOL_NAME `__ parameter in the - :ref:`easy connect string ` or - :ref:`connect descriptor `. See - :ref:`DRCP Pool Names `. - - The ``handle`` and ``thick_mode_dsn_passthrough`` parameters are ignored in - python-oracledb Thin mode. +.. autofunction:: create_pool_async .. versionchanged:: 3.2.0 @@ -2154,148 +173,27 @@ Oracledb Methods The ``connection_id_prefix`` parameter was added. -.. function:: Date(year, month, day) - - Constructs an object holding a date value. - +.. autofunction:: Date -.. function:: DateFromTicks(ticks) +.. autofunction:: DateFromTicks - Constructs an object holding a date value from the given ticks value - (number of seconds since the epoch; see the documentation of the standard - Python time module for details). - - -.. function:: enable_thin_mode() - - Makes python-oracledb be in Thin mode. After this method is called, Thick - mode cannot be enabled. If python-oracledb is already in Thick mode, then - calling ``enable_thin_mode()`` will fail. If Thin mode connections have - already been opened, or a connection pool created in Thin mode, then - calling ``enable_thin_mode()`` is a no-op. - - Since python-oracledb defaults to Thin mode, almost all applications do not - need to call this method. However, because it bypasses python-oracledb's - internal mode-determination heuristic, it may be useful for applications - with multiple threads that concurrently create :ref:`standalone connections - ` when the application starts. +.. autofunction:: enable_thin_mode See :ref:`enablingthin` for more information. .. versionadded:: 2.5.0 -.. function:: from_arrow(obj) - - This method converts a data frame to a :ref:`DataFrame ` - or :ref:`ArrowArray ` instance. - - If ``obj`` supports the Arrow PyCapsule interface ``__arrow_c_stream__`` - method, then ``from_arrow()`` returns the instance as a :ref:`DataFrame - `. If ``obj`` does not support that method, but does - support ``__arrow_c_array__``, then an :ref:`ArrowArray - ` is returned. +.. autofunction:: from_arrow .. versionadded:: 3.3.0 -.. function:: get_pool(pool_alias) - - Returns a :ref:`ConnectionPool object ` from the python-oracledb - pool cache. The pool must have been previously created by passing the same - ``pool_alias`` value to :meth:`oracledb.create_pool()` or - :meth:`oracledb.create_pool_async()`. - - If a pool with the given name does not exist, *None* is returned. +.. autofunction:: get_pool See :ref:`connpoolcache` for more information. .. versionadded:: 3.0.0 -.. function:: init_oracle_client(lib_dir=None, config_dir=None, \ - error_url=None, driver_name=None) - - Enables python-oracledb Thick mode by initializing the Oracle Client - library, see :ref:`enablingthick`. If a standalone connection or pool has - already been created in Thin mode, ``init_oracle_client()`` will raise an - exception and python-oracledb will remain in Thin mode. - - If a standalone connection or pool has *not* already been created in Thin - mode, but ``init_oracle_client()`` raises an exception, python-oracledb - will remain in Thin mode but further calls to ``init_oracle_client()`` can - be made, if desired. - - The ``init_oracle_client()`` method can be called multiple times in each - Python process as long as the arguments are the same each time. - - The ``lib_dir`` parameter is a string or a bytes object that specifies the - directory containing Oracle Client libraries. If the ``lib_dir`` parameter - is set, then the specified directory is the only one searched for the - Oracle Client libraries; otherwise, the operating system library search - path is used to locate the Oracle Client library. If you are using Python - 3.11 and later, then the value specified in this parameter is encoded - using `locale.getencoding() `__. For all other Python versions, the encoding - "utf-8" is used. If a bytes object is specified in this parameter, then - this value will be used as is without any encoding. - - The ``config_dir`` parameter is a string or a bytes object that specifies - the directory in which the - :ref:`Optional Oracle Net Configuration ` and - :ref:`Optional Oracle Client Configuration ` files reside. - If the ``config_dir`` parameter is set, then the specified directory is - used to find Oracle Client library configuration files. This is - equivalent to setting the environment variable ``TNS_ADMIN`` and overrides - any value already set in ``TNS_ADMIN``. If this parameter is not set, the - :ref:`Oracle standard ` way of locating Oracle Client - library configuration files is used. If you are using Python 3.11 and - later, then the value specified in this parameter is encoded using - `locale.getencoding() `__. For all other Python versions, the encoding - "utf-8" is used. If a bytes object is specified in this parameter, then - this value will be used as is without any encoding. - - The ``error_url`` parameter is a string that specifies the URL which is - included in the python-oracledb exception message if the Oracle Client - libraries cannot be loaded. If the ``error_url`` parameter is set, then - the specified value is included in the message of the exception raised - when the Oracle Client library cannot be loaded; otherwise, the - :ref:`installation` URL is included. This parameter lets your application - display custom installation instructions. - - The ``driver_name`` parameter is a string that specifies the driver name - value. If the ``driver_name`` parameter is set, then the specified value - can be found in database views that give information about connections. - For example, it is in the CLIENT_DRIVER column of the - V$SESSION_CONNECT_INFO view. From Oracle Database 12.2, the name displayed - can be 30 characters. The standard is to set this value to ``" : - version>"``, where is the name of the driver and is its - version. There should be a single space character before and after the - colon. If this parameter is not set, then the value specified in - :attr:`oracledb.defaults.driver_name ` is used. If - the value of this attribute is *None*, then the default value in - python-oracledb Thick mode is like "python-oracledb thk : ". See - :ref:`otherinit`. - - At successful completion of a call to ``oracledb.init_oracle_client()``, - the attribute :attr:`defaults.config_dir` will be set as determined below - (first one wins): - - - the value of the ``oracledb.init_oracle_client()`` parameter - ``config_dir``, if one was passed. - - - the value of :attr:`defaults.config_dir` if it has one. I.e. - :attr:`defaults.config_dir` remains unchanged after - ``oracledb.init_oracle_client()`` completes. - - - the value of the environment variable ``$TNS_ADMIN``, if it is set. - - - the value of ``$ORACLE_HOME/network/admin`` if the environment variable - ``$ORACLE_HOME`` is set. - - - the directory of the loaded Oracle Client library, appended with - ``network/admin``. Note this directory is not determinable on AIX. - - - otherwise the value *None* is used. (Leaving :attr:`defaults.config_dir` - unchanged). +.. autofunction:: init_oracle_client .. dbapimethodextension:: @@ -2313,36 +211,13 @@ Oracledb Methods "utf-8" is used. These values may also be supplied as a ``bytes`` object, in which case they will be used as is. -.. function:: is_thin_mode() - - Returns a boolean indicating if python-oracledb is in Thin mode. - - Immediately after python-oracledb is imported, this function will return - *True* indicating that python-oracledb defaults to Thin mode. If a call to - :func:`oracledb.init_oracle_client()` returns successfully, then a - subsequent call to ``is_thin_mode()`` will return False indicating that - Thick mode is enabled. Once the first standalone connection or connection - pool is created, or a successful call to ``oracledb.init_oracle_client()`` - is made, or :meth:`oracledb.enable_thin_mode()` is called, then - python-oracledb’s mode is fixed and the value returned by - ``is_thin_mode()`` will never change for the lifetime of the process. - - The attribute :attr:`Connection.thin` can be used to check a connection's - mode. The attribute :attr:`ConnectionPool.thin` can be used to check a - pool's mode. +.. autofunction:: is_thin_mode .. dbapimethodextension:: .. versionadded:: 1.1.0 - -.. function:: makedsn(host, port, sid=None, service_name=None, region=None, \ - sharding_key=None, super_sharding_key=None) - - Returns a string suitable for use as the ``dsn`` parameter for - :meth:`~oracledb.connect()`. This string is identical to the strings that - are defined by the Oracle names server or defined in the ``tnsnames.ora`` - file. +.. autofunction:: makedsn .. deprecated:: python-oracledb 1.0 @@ -2350,480 +225,7 @@ Oracledb Methods .. dbapimethodextension:: -.. function:: PoolParams(min=1, max=2, increment=1, connectiontype=None, \ - getmode=oracledb.POOL_GETMODE_WAIT, homogeneous=True, timeout=0, \ - wait_timeout=0, max_lifetime_session=0, session_callback=None, \ - max_sessions_per_shard=0, soda_metadata_cache=False, \ - ping_interval=60, ping_timeout=5000, user=None, proxy_user=Nonde, \ - password=None, newpassword=None, wallet_password=None, \ - access_token=None, host=None, port=1521, protocol="tcp", \ - https_proxy=None, https_proxy_port=0, service_name=None, \ - instance_name=None, sid=None, server_type=None, cclass=None, \ - purity=oracledb.PURITY_DEFAULT, expire_time=0, retry_count=0, \ - retry_delay=1, tcp_connect_timeout=20.0, ssl_server_dn_match=True, \ - ssl_server_cert_dn=None, wallet_location=None, events=False, \ - externalauth=False, mode=oracledb.AUTH_MODE_DEFAULT, \ - disable_oob=False, stmtcachesize=oracledb.defaults.stmtcachesize, \ - edition=None, tag=None, matchanytag=False, \ - config_dir=oracledb.defaults.config_dir, appcontext=[], \ - shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=8192, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=False, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=0) - - Creates and returns a :ref:`PoolParams Object `. The object - can be passed to :meth:`oracledb.create_pool()`. - - All the parameters are optional. - - The ``min`` parameter is the minimum number of connections that the pool - should contain. The default value is *1*. - - The ``max`` parameter is the maximum number of connections that the pool - should contain. The default value is *2*. - - The ``increment`` parameter is the number of connections that should be - added to the pool whenever a new connection needs to be created. The - default value is *1*. - - The ``connectiontype`` parameter is the class of the connection that should - be returned during calls to :meth:`ConnectionPool.acquire()`. It must be a - Connection or a subclass of Connection. - - The ``getmode`` parameter determines the behavior of - :meth:`ConnectionPool.acquire()`. One of the constants - :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, - :data:`oracledb.POOL_GETMODE_FORCEGET`, or - :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default value is - :data:`oracledb.POOL_GETMODE_WAIT`. - - The ``homogeneous`` parameter is a boolean that indicates whether the - connections are homogeneous (same user) or heterogeneous (multiple users). - The default value is *True*. - - The ``timeout`` parameter is the length of time (in seconds) that a - connection may remain idle in the pool before it is terminated. This - applies only when the pool has more than ``min`` connections open, allowing - it to shrink to the specified minimum size. The default value is *0* - seconds. A value of *0* means there is no limit. - - The ``wait_timeout`` parameter is the length of time (in milliseconds) that - a caller should wait when acquiring a connection from the pool with - ``getmode`` set to :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default - value is *0* milliseconds. - - The ``max_lifetime_session`` parameter is the length of time (in seconds) - that a pooled connection may exist since first being created. The default - value is *0*. A value of *0* means that there is no limit. Connections - become candidates for termination when they are acquired or released back - to the pool and have existed for longer than ``max_lifetime_session`` - seconds. In python-oracledb Thick mode, Oracle Client libraries 12.1 or - later must be used and, prior to Oracle Client 21, cleanup only occurs when - the pool is accessed. - - The ``session_callback`` parameter is a callable that is invoked when a - connection is returned from the pool for the first time, or when the - connection tag differs from the one requested. - - The ``max_sessions_per_shard`` parameter is the maximum number of - connections that may be associated with a particular shard. The default - value is *0*. - - The ``soda_metadata_cache`` parameter is a boolean that indicates whether - or not the SODA metadata cache should be enabled. The default value is - *False*. - - The ``ping_interval`` parameter is the length of time (in seconds) after - which an unused connection in the pool will be a candidate for pinging when - :meth:`ConnectionPool.acquire()` is called. If the ping to the database - indicates the connection is not alive a replacement connection will be - returned by :meth:`ConnectionPool.acquire()`. If ping_interval is a - negative value, then the ping functionality will be disabled. The default - value is *60* seconds. - - The ``ping_timeout`` parameter is the maximum length of time (in - milliseconds) that :meth:`ConnectionPool.acquire()` waits for a connection - to respond to any internal ping to the database. If the ping does not - respond within the specified time, then the connection is destroyed and - :meth:`~ConnectionPool.acquire()` returns a different connection. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *5000* milliseconds. - - The ``user`` parameter is expected to be a string which indicates the name - of the user to connect to. This value is used in both the python-oracledb - Thin and Thick modes. - - The ``proxy_user`` parameter is expected to be a string which indicates the - name of the proxy user to connect to. If this value is not specified, it - will be parsed out of user if user is in the form "user[proxy_user]". This - value is used in both the python-oracledb Thin and Thick modes. - - The ``password`` parameter expected to be a string which indicates the - password for the user. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``newpassword`` parameter is expected to be a string which indicates - the new password for the user. The new password will take effect - immediately upon a successful connection to the database. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``wallet_password`` parameter is expected to be a string which - indicates the password to use to decrypt the PEM-encoded wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode. The - ``wallet_password`` parameter is not needed for cwallet.sso files that are - used in the python-oracledb Thick mode. - - The ``access_token`` parameter is expected to be a string or a 2-tuple or - a callable. If it is a string, it specifies an Azure AD OAuth2 token used - for Open Authorization (OAuth 2.0) token based authentication. If it is a - 2-tuple, it specifies the token and private key strings used for Oracle - Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``host`` parameter is expected to be a string which specifies the name - or IP address of the machine hosting the listener, which handles the - initial connection to the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``port`` parameter is expected to be an integer which indicates the - port number on which the listener is listening. The default value is - *1521*. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``protocol`` parameter is expected to be one of the strings *tcp* or - *tcps* which indicates whether to use unencrypted network traffic or - encrypted network traffic (TLS). The default value is *tcp*. This value is - used in both the python-oracledb Thin and Thick modes. - - The ``https_proxy`` parameter is expected to be a string which indicates - the name or IP address of a proxy host to use for tunneling secure - connections. This value is used in both the python-oracledb Thin and Thick - modes. - - The ``https_proxy_port`` parameter is expected to be an integer which - indicates the port that is to be used to communicate with the proxy host. - The default value is *0*. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``service_name`` parameter is expected to be a string which indicates - the service name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``instance_name`` parameter is expected to be a string which indicates - the instance name of the database. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``sid`` parameter is expected to be a string which indicates the SID of - the database. It is recommended to use ``service_name`` instead. This value - is used in both the python-oracledb Thin and Thick modes. - - The ``server_type`` parameter is expected to be a string that indicates the - type of server connection that should be established. If specified, it - should be one of *dedicated*, *shared*, or *pooled*. This value is used in - both the python-oracledb Thin and Thick modes. - - The ``cclass`` parameter is expected to be a string that identifies the - connection class to use for :ref:`drcp`. This value is used in both the - python-oracledb Thin and Thick modes. - - The ``purity`` parameter is expected to be one of the - :ref:`oracledb.PURITY_* ` constants that identifies the - purity to use for DRCP. This value is used in both the python-oracledb Thin - and Thick modes. Internally pooled connections will default to a purity of - :data:`~oracledb.PURITY_SELF`. - - The ``expire_time`` parameter is expected to be an integer which indicates - the number of minutes between the sending of keepalive probes. If this - parameter is set to a value greater than zero it enables keepalive. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *0* minutes. - - The ``retry_count`` parameter is expected to be an integer that identifies - the number of times that a connection attempt should be retried before the - attempt is terminated. This value is used in both the python-oracledb Thin - and Thick modes. The default value is *0*. - - The ``retry_delay`` parameter is expected to be an integer that identifies - the number of seconds to wait before making a new connection attempt. This - value is used in both the python-oracledb Thin and Thick modes. The default - value is *1* seconds. - - The ``tcp_connect_timeout`` parameter is expected to be a float that - indicates the maximum number of seconds to wait for establishing a - connection to the database host. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *20.0* seconds. - - The ``ssl_server_dn_match`` parameter is expected to be a boolean that - indicates whether the server certificate distinguished name (DN) should be - matched in addition to the regular certificate verification that is - performed. Note that if the ssl_server_cert_dn parameter is not provided, - host name matching is performed instead. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *True*. - - The ``ssl_server_cert_dn`` parameter is expected to be a string that - indicates the distinguished name (DN) which should be matched with the - server. This value is ignored if the ssl_server_dn_match parameter is not - set to the value *True*. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``wallet_location`` parameter is expected to be a string that - identifies the directory where the wallet can be found. In python-oracledb - Thin mode, this must be the directory of the PEM-encoded wallet file, - ewallet.pem. In python-oracledb Thick mode, this must be the directory of - the file, cwallet.sso. This value is used in both the python-oracledb Thin - and Thick modes. - - The ``externalauth`` parameter is a boolean that determines whether to use - external authentication. This value is only used in the python-oracledb - Thick mode. The default value is *False*. - - The ``events`` parameter is expected to be a boolean that specifies whether - the events mode should be enabled. This value is only used in the - python-oracledb Thick mode. This parameter is needed for continuous - query notification and high availability event notifications. The default - value is *False*. - - The ``mode`` parameter is expected to be an integer that identifies the - authorization mode to use. This value is used in both the python-oracledb - Thin and Thick modes.The default value is - :data:`oracledb.AUTH_MODE_DEFAULT`. - - The ``disable_oob`` parameter is expected to be a boolean that indicates - whether out-of-band breaks should be disabled. This value is only used - in the python-oracledb Thin mode and has no effect on Windows which - does not support this functionality. The default value is *False*. - - The ``stmtcachesize`` parameter is expected to be an integer that - identifies the initial size of the statement cache. This value is used in - both the python-oracledb Thin and Thick modes. The default is the value of - :attr:`defaults.stmtcachesize`. - - The ``edition`` parameter is expected to be a string that indicates the - edition to use for the connection. It requires Oracle Database 11.2, or - later. This parameter cannot be used simultaneously with the ``cclass`` - parameter. - - The ``tag`` parameter is expected to be a string that identifies the type - of connection that should be returned from a pool. This value is only used - in the python-oracledb Thick mode. - - The ``matchanytag`` parameter is expected to be a boolean specifying - whether any tag can be used when acquiring a connection from the pool. This - value is only used in the python-oracledb Thick mode when acquiring a - connection from a pool. The default value is *False*. - - The ``config_dir`` parameter is expected to be a string that indicates the - directory in which the :ref:`tnsnames.ora ` configuration file - is located. - - The ``appcontext`` parameter is expected to be a list of 3-tuples that - identifies the application context used by the connection. This parameter - should contain namespace, name, and value and each entry in the tuple - should be a string. - - The ``shardingkey`` parameter and ``supershardingkey`` parameters, if - specified, are expected to be a sequence of values which identifies the - database shard to connect to. The key values can be a list of strings, - numbers, bytes, or dates. These values are only used in the - python-oracledb Thick mode and are ignored in the Thin mode. See - :ref:`connsharding`. - - The ``debug_jdwp`` parameter is expected to be a string with the format - `host=;port=` that specifies the host and port of the PL/SQL - debugger. This allows using the Java Debug Wire Protocol (JDWP) to debug - PL/SQL code invoked by python-oracledb. This value is only used in the - python-oracledb Thin mode. For python-oracledb Thick mode, set the - ``ORA_DEBUG_JDWP`` environment variable which has the same syntax. For more - information, see :ref:`jdwp`. - - The ``connection_id_prefix`` parameter is expected to be a string and is - added to the beginning of the generated ``connection_id`` that is sent to - the database for `tracing `__. This value - is only used in the python-oracledb Thin mode. - - The ``ssl_context`` parameter is expected to be an `SSLContext object - `__ which is used - for connecting to the database using TLS. This SSL context will be - modified to include the private key or any certificates found in a - separately supplied wallet. This parameter should only be specified if - the default SSLContext object cannot be used. This value is only used in - the python-oracledb Thin mode. - - The ``sdu`` parameter is expected to be an integer that returns the - requested size of the Session Data Unit (SDU), in bytes. The value tunes - internal buffers used for communication to the database. Bigger values can - increase throughput for large queries or bulk data loads, but at the cost - of higher memory use. The SDU size that will actually be used is negotiated - down to the lower of this value and the database network SDU configuration - value. See the `Database Net Services documentation `__ for more details. This value is used in both the - python-oracledb Thin and Thick modes. The default value is *8192* bytes. - - The ``pool_boundary`` parameter is expected to be one of the strings - *statement* or *transaction* which indicates when pooled :ref:`DRCP ` - or PRCP connections can be returned to the pool. If the value is - *statement*, then pooled DRCP or PRCP connections are implicitly released - back to the DRCP or PRCP pool when the connection is stateless (that is, - there are no active cursors, active transactions, temporary tables, or - temporary LOBs). If the value is *transaction*, then pooled DRCP or PRCP - connections are implicitly released back to the DRCP or PRCP pool when - either one of the methods :meth:`Connection.commit()` or - :meth:`Connection.rollback()` are called. This parameter requires the use - of DRCP or PRCP with Oracle Database 23ai (or later). See - :ref:`implicitconnpool` for more information. This value is used in both - the python-oracledb Thin and Thick modes. - - The ``use_tcp_fast_open`` parameter is expected to be a boolean which - indicates whether to use TCP Fast Open which is an `Oracle Autonomous - Database Serverless (ADB-S) `__ specific feature that can - reduce the latency in round-trips to the database after a connection has - been established. This feature is only available with certain versions of - ADB-S. This value is used in both python-oracledb Thin and Thick modes. - The default value is *False*. - - The ``ssl_version`` parameter is expected to be one of the constants - *ssl.TLSVersion.TLSv1_2* or *ssl.TLSVersion.TLSv1_3* which identifies the - TLS protocol version used. These constants are defined in the Python - `ssl `__ module. This - parameter can be specified when establishing connections with the protocol - "tcps". This value is used in both python-oracledb Thin and Thick modes. - The value *ssl.TLSVersion.TLSv1_3* requires Oracle Database 23ai. If you - are using python-oracledb Thick mode, Oracle Client 23ai is additionally - required. - - The ``use_sni`` parameter is expected to be a boolean which indicates - whether to use the TLS Server Name Indication (SNI) extension to bypass the - second TLS negotiation that would otherwise be required. This parameter is - used in both python-oracledb Thin and Thick modes. This parameter requires - Oracle Database 23.7. The default value is *False*. See the `Database Net - Services documentation - `__ for more details. - - The ``program`` parameter is expected to be a string which specifies the - name of the executable program or application connected to Oracle - Database. This value is only used in the python-oracledb Thin mode. The - default is the value of :attr:`defaults.program`. - - The ``machine`` parameter is expected to be a string which specifies the - machine name of the client connecting to Oracle Database. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.machine`. - - The ``terminal`` parameter is expected to be a string which specifies the - terminal identifier from which the connection originates. This value is - only used in the python-oracledb Thin mode. The default is the value of - :attr:`defaults.terminal`. - - The ``osuser`` parameter is expected to be a string which specifies the - operating system user that initiates the database connection. This value - is only used in the python-oracledb Thin mode. The default value is the - value of :attr:`defaults.osuser`. - - The ``driver_name`` parameter is expected to be a string which specifies - the driver used by the client to connect to Oracle Database. This value - is used in both the python-oracledb Thin and Thick modes. The default is - the value of :attr:`defaults.driver_name`. - - The ``thick_mode_dsn_passthrough`` parameter is expected to be a boolean - which indicates whether the connect string should be passed unchanged to - the Oracle Client libraries for parsing when using python-oracledb Thick - mode. If this parameter is set to *False* in Thick mode, connect strings - are parsed by python-oracledb itself and a generated connect descriptor is - sent to the Oracle Client libraries. This value is only used in the - python-oracledb Thick mode. The default value is - :attr:`defaults.thick_mode_dsn_passthrough`. For more information, see - :ref:`usingconfigfiles`. - - The ``extra_auth_params`` parameter is expected to be a dictionary - containing the configuration parameters necessary for Oracle Database - authentication using :ref:`OCI ` or :ref:`Azure - ` cloud native authentication plugins. This value is - used in both the python-oracledb Thin and Thick modes. See - :ref:`tokenauth`. - - The ``pool_name`` parameter is expected to be a string which specifies the - name of the pool when using multiple DRCP pools with Oracle Database 23.4 - or later. This value is used in both python-oracledb Thin and Thick modes. - See :ref:`DRCP Pool Names `. - - The ``handle`` parameter is expected to be an integer which represents a - pointer to a valid service context handle. This value is only used in the - python-oracledb Thick mode. It should be used with extreme caution. The - default value is *0*. - - .. versionchanged:: 3.2.0 - - The ``pool_name`` parameter was added. - - .. versionchanged:: 3.0.0 - - The ``use_sni``, ``instance_name``, ``thick_mode_dsn_passthrough``, - ``extra_auth_params``, and ``instance_name`` parameters were added. - - .. versionchanged:: 2.5.0 - - The ``program``, ``machine``, ``terminal``, ``osuser``, and - ``driver_name`` parameters were added. Support for ``edition`` and - ``appcontext`` was added to python-oracledb Thin mode. - - .. versionchanged:: 2.3.0 - - The default value of the ``retry_delay`` parameter was changed from *0* - seconds to *1* second. The default value of the ``tcp_connect_timeout`` - parameter was changed from *60.0* seconds to *20.0* seconds. The - ``ping_timeout`` and ``ssl_version`` parameters were added. - - .. versionchanged:: 2.1.0 - - The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. - - .. versionchanged:: 2.0.0 - - The ``ssl_context`` and ``sdu`` parameters were added. - - .. versionchanged:: 1.4.0 - - The ``connection_id_prefix`` parameter was added. - -.. function:: SparseVector(num_dimensions, indices, values) - - Creates and returns a :ref:`SparseVector object `. - - The ``num_dimensions`` parameter is the number of dimensions contained in - the vector. - - The ``indices`` parameter is the indices (zero-based) of non-zero values - in the vector. - - The ``values`` parameter is the non-zero values stored in the vector. - - .. versionadded:: 3.0.0 - -.. function:: register_params_hook(hook_function) - - Registers a user parameter hook function that will be called internally by - python-oracledb prior to connection or pool creation. The hook function - accepts a copy of the parameters that will be used to create the pool or - standalone connection and may modify them. For example, the cloud native - authentication plugins modify the "access_token" parameter with a function - that will acquire the token using information found in the - "extra_auth_parms" parameter. - - Multiple hooks may be registered. They will be invoked in order of - registration. +.. autofunction:: register_params_hook To unregister a user function, use :meth:`oracledb.unregister_params_hook`. @@ -2833,22 +235,7 @@ Oracledb Methods .. versionadded:: 3.0.0 -.. function:: register_password_type(password_type, hook_function) - - Registers a user password hook function that will be called internally by - python-oracledb when a password is supplied as a dictionary containing the - given ``password_type`` as the key "type". The hook function is called for - passwords specified as the ``password``, ``newpassword`` and - ``wallet_parameter`` parameters in calls to :meth:`oracledb.connect()`, - :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and - :meth:`oracledb.create_pool_async()`. - - Your hook function is expected to accept the dictionary supplied by the - application and return the valid password. - - Calling :meth:`~oracledb.register_password_type()` with the - ``hook_function`` parameter set to *None* will result in a previously - registered user function being removed and the default behavior restored. +.. autofunction:: register_password_type See :ref:`registerpasswordtype`. @@ -2856,68 +243,7 @@ Oracledb Methods .. versionadded:: 3.0.0 -.. function:: register_protocol(protocol, hook_function) - - Registers a user protocol hook function that will be called internally by - python-oracledb Thin mode prior to connection or pool creation. The hook - function will be invoked when :func:`oracledb.connect`, - :func:`oracledb.create_pool`, :meth:`oracledb.connect_async()`, or - :meth:`oracledb.create_pool_async()` are called with a ``dsn`` parameter - value prefixed with the specified protocol. The user function will also be - invoked when :meth:`ConnectParams.parse_connect_string()` is called in Thin - or Thick modes with a similar ``connect_string`` parameter value. - - Your hook function is expected to construct valid connection details. For - example, if a hook function is registered for the "ldaps" protocol, then - calling :func:`oracledb.connect` with a connection string prefixed with - "ldaps://" will invoke the function. The function can then perform LDAP - lookup to retrieve and set the actual database information that will be - used internally by python-oracledb to complete the connection creation. - - The ``protocol`` parameter is a string that will be matched against the - prefix appearing before "://" in connection strings. - - The ``hook_function`` parameter should be a function with the signature:: - - hook_function(protocol, protocol_arg, params) - - The hook function will be called with the following arguments: - - - The ``protocol`` parameter is the value that was registered. - - - The ``protocol_arg`` parameter is the section after "://" in the - connection string used in the connection or pool creation call, or passed - to :meth:`~ConnectParams.parse_connect_string()`. - - - The ``params`` parameter is an instance of :ref:`ConnectParams - `. - - When your hook function is invoked internally prior to connection or pool - creation, ``params`` will be the ConnectParams instance originally passed - to the :func:`oracledb.connect`, :func:`oracledb.create_pool`, - :meth:`oracledb.connect_async()`, or :meth:`oracledb.create_pool_async()` - call, if such an instance was passed. Otherwise it will be a new - ConnectParams instance. The hook function should parse ``protocol`` and - ``protocol_arg`` and take any desired action to update ``params`` - :ref:`attributes ` with appropriate connection - parameters. Attributes can be set using :meth:`ConnectParams.set()` or - :meth:`ConnectParams.parse_connect_string()`. The ConnectParams instance - will then be used to complete the connection or pool creation. - - When your hook function is invoked by - :meth:`ConnectParams.parse_connect_string()`, then ``params`` will be the - invoking ConnectParams instance that you can update using - :meth:`ConnectParams.set()` or - :meth:`ConnectParams.parse_connect_string()`. - - Internal hook functions for the "tcp" and "tcps" protocols are - pre-registered but can be overridden if needed. If any other protocol has - not been registered, then connecting will result in the error ``DPY-4021: - invalid protocol``. - - Calling :meth:`~oracledb.register_protocol()` with the ``hook_function`` - parameter set to *None* will result in a previously registered user function - being removed and the default behavior restored. +.. autofunction:: register_protocol See :ref:`registerprotocolhook` for more information. @@ -2925,41 +251,15 @@ Oracledb Methods .. versionadded:: 2.5.0 -.. function:: Time(hour, minute, second) - - Constructs an object holding a time value. - - .. note:: - - A time-only data type is not supported by Oracle Database. Calling this - function raises a NotSupportedError exception. - - -.. function:: TimeFromTicks(ticks) - - Constructs an object holding a time value from the given ticks value - (number of seconds since the epoch; see the documentation of the standard - Python time module for details). +.. autofunction:: Time - .. note:: - - A time-only data type is not supported by Oracle Database. Calling this - function raises a NotSupportedError exception. - -.. function:: Timestamp(year, month, day, hour, minute, second) - - Constructs an object holding a time stamp value. +.. autofunction:: TimeFromTicks -.. function:: TimestampFromTicks(ticks) +.. autofunction:: Timestamp - Constructs an object holding a time stamp value from the given ticks value - (number of seconds since the epoch; see the documentation of the standard - Python time module for details). +.. autofunction:: TimestampFromTicks -.. function:: unregister_params_hook(hook_function) - - Unregisters a user parameter function that was earlier registered with a - call to :meth:`oracledb.register_params_hook()`. +.. autofunction:: unregister_params_hook .. dbapimethodextension:: @@ -3551,6 +851,7 @@ parameter of the :meth:`Connection.shutdown()` method. should be prohibited and no new transactions should be allowed. It then waits for only local active transactions to complete. +.. _eventtypes: Event Types ----------- @@ -4353,13 +1654,6 @@ All of these types are extensions to the DB API definition. This type object is the Python type of the :ref:`database type constants `. - -.. data:: LOB - - This type object is the Python type of :data:`DB_TYPE_BLOB`, - :data:`DB_TYPE_BFILE`, :data:`DB_TYPE_CLOB` and :data:`DB_TYPE_NCLOB` data - that is returned from cursors. - .. _tpcconstants: Two-Phase Commit (TPC) Constants diff --git a/doc/src/api_manual/pipeline.rst b/doc/src/api_manual/pipeline.rst index 326fe95b..b9536e26 100644 --- a/doc/src/api_manual/pipeline.rst +++ b/doc/src/api_manual/pipeline.rst @@ -4,6 +4,8 @@ API: Pipeline Objects ********************* +.. currentmodule:: oracledb + Pipelining is only supported in python-oracledb Thin mode with :ref:`asyncio `. See :ref:`pipelining` for more information about pipelining. @@ -16,248 +18,158 @@ information about pipelining. .. _pipelineobjs: -Pipeline Objects -================ +Pipeline Class +============== + +.. autoclass:: Pipeline -Pipeline objects represent a pipeline used to execute multiple database -operations. A Pipeline object is created by calling -:meth:`oracledb.create_pipeline()`. + Pipeline objects represent a pipeline used to execute multiple database + operations. A Pipeline object is created by calling + :meth:`oracledb.create_pipeline()`. .. _pipelinemethods: Pipeline Methods ---------------- -.. method:: Pipeline.add_callfunc(name, return_type, parameters=None, keyword_parameters=None) +.. automethod:: Pipeline.add_callfunc - Adds an operation to the pipeline that calls a stored PL/SQL function with - the given parameters and return type. The created - :ref:`PipelineOp object ` is also returned from this - function. :ref:`pipelineopattrs` can be used to examine the operation, if - needed. + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - When the Pipeline is executed, the - :ref:`PipelineOpResult object ` that is returned for - this operation will have the :attr:`~PipelineOpResult.return_value` - attribute populated with the return value of the PL/SQL function if the - call completes successfully. + .. seealso:: -.. method:: Pipeline.add_callproc(name, parameters=None, keyword_parameters=None) + :ref:`PipelineOp object ` and + :ref:`PipelineOpResult object ` - Adds an operation that calls a stored procedure with the given parameters. - The created :ref:`PipelineOp object ` is also returned - from this function. :ref:`pipelineopattrs` can be used to examine the - operation, if needed. +.. automethod:: Pipeline.add_callproc -.. method:: Pipeline.add_commit() + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - Adds an operation that performs a commit. + .. seealso:: -.. method:: Pipeline.add_execute(statement, parameters=None) + :ref:`PipelineOp object ` - Adds an operation that executes a statement with the given parameters. - The created :ref:`PipelineOp object ` is also returned - from this function. :ref:`pipelineopattrs` can be used to examine the - operation, if needed. +.. automethod:: Pipeline.add_commit - Do not use this for queries that return rows. Instead use - :meth:`Pipeline.add_fetchall()`, :meth:`Pipeline.add_fetchmany()`, or - :meth:`Pipeline.add_fetchone()`. +.. automethod:: Pipeline.add_execute -.. method:: Pipeline.add_executemany(statement, parameters) + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - Adds an operation that executes a SQL statement once using all bind value - mappings or sequences found in the sequence parameters. This can be used to - insert, update, or delete multiple rows in a table. It can also invoke a - PL/SQL procedure multiple times. See :ref:`batchstmnt`. + .. seealso:: - The created :ref:`PipelineOp object ` is also returned from - this function. :ref:`pipelineopattrs` can be used to examine the operation, - if needed. + :ref:`PipelineOp object ` - The ``parameters`` parameter can be a list of tuples, where each tuple item - maps to one bind variable placeholder in ``statement``. It can also be a - list of dictionaries, where the keys match the bind variable placeholder - names in ``statement``. If there are no bind values, or values have - previously been bound, the ``parameters`` value can be an integer - specifying the number of iterations. +.. automethod:: Pipeline.add_executemany -.. method:: Pipeline.add_fetchall(statement, parameters=None, arraysize=None, rowfactory=None) + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - Adds an operation that executes a query and returns all of the rows from - the result set. The created :ref:`PipelineOp object ` is - also returned from this function. :ref:`pipelineopattrs` can be used to - examine the operation, if needed. + .. seealso:: - When the Pipeline is executed, the :ref:`PipelineOpResult - object ` that is returned for this operation will - have the :attr:`~PipelineOpResult.rows` attribute populated with the list - of rows returned by the query. + :ref:`batchstmnt` and :ref:`PipelineOp object ` - The default value for ``arraysize`` is :attr:`defaults.arraysize`. +.. automethod:: Pipeline.add_fetchall - Internally, this operation's :attr:`Cursor.prefetchrows` size is set to the - value of the explicit or default ``arraysize`` parameter value. + :ref:`pipelineopattrs` can be used to examine the operation, if needed. -.. method:: Pipeline.add_fetchmany(statement, parameters=None, num_rows=None, rowfactory=None) + .. seealso:: - Adds an operation that executes a query and returns up to the specified - number of rows from the result set. The created - :ref:`PipelineOp object ` is also returned from this - function. :ref:`pipelineopattrs` can be used to examine the operation, if - needed. + :ref:`PipelineOp object ` and + :ref:`PipelineOpResult object ` - When the Pipeline is executed, the - :ref:`PipelineOpResult object ` that is returned for - this operation will have the :attr:`~PipelineOpResult.rows` attribute - populated with the list of rows returned by the query. +.. automethod:: Pipeline.add_fetchmany - The default value for ``num_rows`` is the value of - :attr:`defaults.arraysize`. + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - Internally, this operation's :attr:`Cursor.prefetchrows` size is set to the - value of the explicit or default ``num_rows`` parameter, allowing all rows - to be fetched in one :ref:`round-trip ` + .. seealso:: - Since only one fetch is performed for a query operation, consider adding a - ``FETCH NEXT`` clause to the statement to prevent the database processing - rows that will never be fetched, see :ref:`rowlimit`. + :ref:`PipelineOp object `, + :ref:`PipelineOpResult object `, + :ref:`roundtrips`, and :ref:`rowlimit` -.. method:: Pipeline.add_fetchone(statement, parameters=None, rowfactory=None) +.. automethod:: Pipeline.add_fetchone - Adds an operation that executes a query and returns the first row of the - result set if one exists. The created - :ref:`PipelineOp object ` is also returned from this - function. :ref:`pipelineopattrs` can be used to examine the operation, if - needed. + :ref:`pipelineopattrs` can be used to examine the operation, if needed. - When the Pipeline is executed, the - :ref:`PipelineOpResult object ` that is returned for - this operation will have the :attr:`~PipelineOpResult.rows` attribute - populated with this row if the query is performed successfully. + .. seealso:: - Internally, this operation's :attr:`Cursor.prefetchrows` and - :attr:`Cursor.arraysize` sizes will be set to 1. - - Since only one fetch is performed for a query operation, consider adding a - ``WHERE`` condition or using a ``FETCH NEXT`` clause in the statement to - prevent the database processing rows that will never be fetched, see - :ref:`rowlimit`. + :ref:`PipelineOp object `, + :ref:`PipelineOpResult object `, and + :ref:`rowlimit` Pipeline Attributes ------------------- -.. attribute:: Pipeline.operations - - This read-only attribute returns the list of operations associated with - the pipeline. +.. autoproperty:: Pipeline.operations .. _pipelineopobjs: -PipelineOp Objects -================== +PipelineOp Class +================ + +.. autoclass:: PipelineOp -PipelineOp objects are created by calling the methods in the -:ref:`Pipeline class `. + A PipelineOp object should be created by calling the methods in the + :ref:`Pipeline class `. .. _pipelineopattrs: PipelineOp Attributes --------------------- -.. attribute:: PipelineOp.arraysize - - This read-only attribute returns the :ref:`array size ` that - will be used when fetching query rows with :meth:`Pipeline.add_fetchall()`. - For all other operations, the value returned is *0*. - -.. attribute:: PipelineOp.keyword_parameters - - This read-only attribute returns the keyword parameters to the stored - procedure or function being called by the operation, if applicable. +.. autoproperty:: PipelineOp.arraysize -.. attribute:: PipelineOp.name + .. seealso:: - This read-only attribute returns the name of the stored procedure or - function being called by the operation, if applicable. + :ref:`tuningfetch` -.. attribute:: PipelineOp.num_rows +.. autoproperty:: PipelineOp.keyword_parameters - This read-only attribute returns the number of rows to fetch when - performing a query of a specific number of rows. For all other operations, - the value returned is *0*. +.. autoproperty:: PipelineOp.name -.. attribute:: PipelineOp.op_type +.. autoproperty:: PipelineOp.num_rows - This read-only attribute returns the type of operation that is taking - place. See :ref:`pipeline-operation-types` for types of operations. +.. autoproperty:: PipelineOp.op_type -.. attribute:: PipelineOp.parameters + See :ref:`pipeline-operation-types` for types of operations. - This read-only attribute returns the parameters to the stored procedure or - function or the parameters bound to the statement being executed by the - operation, if applicable. +.. autoproperty:: PipelineOp.parameters -.. attribute:: PipelineOp.return_type +.. autoproperty:: PipelineOp.return_type - This read-only attribute returns the return type of the stored function - being called by the operation, if applicable. +.. autoproperty:: PipelineOp.rowfactory -.. attribute:: PipelineOp.rowfactory - - This read-only attribute returns the row factory callable function to be - used in a query executed by the operation, if applicable. - -.. attribute:: PipelineOp.statement - - This read-only attribute returns the statement being executed by the - operation, if applicable. +.. autoproperty:: PipelineOp.statement .. _pipelineopresultobjs: PipelineOpResult Objects ======================== -When :meth:`AsyncConnection.run_pipeline()` is called, it returns a list of -PipelineOpResult objects. These objects contain the results of the executed -:ref:`PipelineOp objects ` operations. +.. autoclass:: PipelineOpResult + + When :meth:`AsyncConnection.run_pipeline()` is called, it returns a list of + PipelineOpResult objects. These objects contain the results of the executed + :ref:`PipelineOp objects ` operations. PipelineOpResult Attributes --------------------------- -.. attribute:: PipelineOpResult.columns - - This read-only attribute is a list of :ref:`FetchInfo` - objects. This attribute will be *None* for operations that do not return - rows. +.. autoproperty:: PipelineOpResult.columns .. versionadded:: 2.5.0 -.. attribute:: PipelineOpResult.error +.. autoproperty:: PipelineOpResult.error - This read-only attribute returns the error that occurred when running this - operation. If no error occurred, then the value *None* is returned. +.. autoproperty:: PipelineOpResult.operation -.. attribute:: PipelineOpResult.operation +.. autoproperty:: PipelineOpResult.return_value - This read-only attribute returns the :ref:`PipelineOp ` - operation object that generated the result. +.. autoproperty:: PipelineOpResult.rows -.. attribute:: PipelineOpResult.return_value +.. autoproperty:: PipelineOpResult.warning - This read-only attribute returns the return value of the called PL/SQL - function, if a function was called for the operation. - -.. attribute:: PipelineOpResult.rows - - This read-only attribute returns the rows that were fetched by the - operation, if a query was executed. - -.. attribute:: PipelineOpResult.warning + .. versionadded:: 2.5.0 - This read-only attribute returns any warning that was encountered when - running this operation. If no warning was encountered, then the value - *None* is returned. See :ref:`PL/SQL Compilation Warnings - `. + .. seealso:: - .. versionadded:: 2.5.0 + :ref:`PL/SQL Compilation Warnings ` diff --git a/doc/src/api_manual/pool_params.rst b/doc/src/api_manual/pool_params.rst index 76d3168d..bdebeb3c 100644 --- a/doc/src/api_manual/pool_params.rst +++ b/doc/src/api_manual/pool_params.rst @@ -4,215 +4,162 @@ API: PoolParams Objects *********************** -A PoolParams object can be created with :meth:`oracledb.PoolParams()`. The -PoolParams class is a subclass of the :ref:`ConnectParams Class `. -In addition to the parameters and attributes of the ConnectParams class, the -PoolParams class also contains new parameters and attributes. +.. currentmodule:: oracledb -See :ref:`usingpoolparams` for more information. +PoolParams Class +================ -.. _poolparamsmeth: +.. autoclass:: PoolParams -PoolParams Methods -================== + The PoolParams class is a subclass of the :ref:`ConnectParams Class + `. In addition to the parameters and attributes of the + ConnectParams class, the PoolParams class also contains new parameters and + attributes. + + See :ref:`usingpoolparams` for more information. + + .. versionchanged:: 3.2.0 + + The ``pool_name`` parameter was added. + + .. versionchanged:: 3.0.0 + + The ``use_sni``, ``instance_name``, ``thick_mode_dsn_passthrough``, + ``extra_auth_params``, and ``instance_name`` parameters were added. -.. method:: PoolParams.copy() + .. versionchanged:: 2.5.0 - Creates a copy of the parameters and returns it. + The ``program``, ``machine``, ``terminal``, ``osuser``, and + ``driver_name`` parameters were added. Support for ``edition`` and + ``appcontext`` was added to python-oracledb Thin mode. -.. method:: PoolParams.get_connect_string() + .. versionchanged:: 2.3.0 - Returns the connection string associated with the PoolParams instance. + The default value of the ``retry_delay`` parameter was changed from *0* + seconds to *1* second. The default value of the ``tcp_connect_timeout`` + parameter was changed from *60.0* seconds to *20.0* seconds. The + ``ping_timeout`` and ``ssl_version`` parameters were added. -.. method:: PoolParams.parse_connect_string(connect_string) + .. versionchanged:: 2.1.0 - Parses the connect string into its components and stores the parameters. + The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. - The connect string can be an Easy Connect string, name-value pairs, or a - simple alias which is looked up in ``tnsnames.ora``. Parameters that are - found in the connect string override any currently stored values. + .. versionchanged:: 2.0.0 -.. method:: PoolParams.set(min=None, max=None, increment=None, \ - connectiontype=None, getmode=None, homogeneous=None, timeout=None, \ - wait_timeout=None, max_lifetime_session=None, session_callback=None, \ - max_sessions_per_shard=None, soda_metadata_cache=None, \ - ping_interval=None, ping_timeout=None, user=None, proxy_user=None, \ - password=None, newpassword=None, wallet_password=None, \ - access_token=None, host=None, port=None, protocol=None, \ - https_proxy=None, https_proxy_port=None, service_name=None, \ - instance_name=None, sid=None, server_type=None, cclass=None, \ - purity=None, expire_time=None, retry_count=None, retry_delay=None, \ - tcp_connect_timeout=None, ssl_server_dn_match=None, \ - ssl_server_cert_dn=None, wallet_location=None, events=None, \ - externalauth=None, mode=None, disable_oob=None, stmtcachesize=None, \ - edition=None, tag=None, matchanytag=None, config_dir=None, \ - appcontext=[], shardingkey=[], supershardingkey=[], debug_jdwp=None, \ - connection_id_prefix=None, ssl_context=None, sdu=None, \ - pool_boundary=None, use_tcp_fast_open=False, ssl_version=None, \ - program=oracledb.defaults.program, machine=oracledb.defaults.machine, \ - terminal=oracledb.defaults.terminal, osuser=oracledb.defaults.osuser, \ - driver_name=oracledb.defaults.driver_name, use_sni=None, \ - thick_mode_dsn_passthrough=oracledb.defaults.thick_mode_dsn_passthrough, \ - extra_auth_params=None, pool_name=None, handle=None) + The ``ssl_context`` and ``sdu`` parameters were added. + + .. versionchanged:: 1.4.0 + + The ``connection_id_prefix`` parameter was added. + + +.. _poolparamsmeth: + +PoolParams Methods +================== - Sets one or more of the parameters. +.. automethod:: PoolParams.copy - .. versionchanged:: 3.2.0 +.. automethod:: PoolParams.get_connect_string - The ``pool_name`` parameter was added. +.. automethod:: PoolParams.parse_connect_string - .. versionchanged:: 3.0.0 +.. automethod:: PoolParams.set - The ``use_sni``, ``thick_mode_dsn_passthrough``, - ``extra_auth_params`` and ``instance_name`` parameters were added. + .. versionchanged:: 3.2.0 - .. versionchanged:: 2.5.0 + The ``pool_name`` parameter was added. - The ``program``, ``machine``, ``terminal``, ``osuser``, and - ``driver_name`` parameters were added. Support for ``edition`` and - ``appcontext`` was added to python-oracledb Thin mode. + .. versionchanged:: 3.0.0 - .. versionchanged:: 2.3.0 + The ``use_sni``, ``thick_mode_dsn_passthrough``, + ``extra_auth_params`` and ``instance_name`` parameters were added. - The ``ping_timeout`` and ``ssl_version`` parameters were added. + .. versionchanged:: 2.5.0 - .. versionchanged:: 2.1.0 + The ``program``, ``machine``, ``terminal``, ``osuser``, and + ``driver_name`` parameters were added. Support for ``edition`` and + ``appcontext`` was added to python-oracledb Thin mode. - The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. + .. versionchanged:: 2.3.0 + + The ``ping_timeout`` and ``ssl_version`` parameters were added. + + .. versionchanged:: 2.1.0 + + The ``pool_boundary`` and ``use_tcp_fast_open`` parameters were added. .. _poolparamsattr: PoolParams Attributes ===================== -.. attribute:: PoolParams.connectiontype +All properties are read only. - This read-only attribute specifies the class of the connection that should - be returned during calls to :meth:`ConnectionPool.acquire()`. It must be - Connection or a subclass of Connection. This attribute is of type - Type["oracledb.connection"]. The default value is ``oracledb.Connection``. +.. autoproperty:: PoolParams.connectiontype This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.getmode - - This read-write attribute is an integer that determines the behavior of - :meth:`ConnectionPool.acquire()`. The value of this attribute can be one of - the constants :data:`oracledb.POOL_GETMODE_WAIT`, - :data:`oracledb.POOL_GETMODE_NOWAIT`, :data:`oracledb.POOL_GETMODE_FORCEGET`, - or :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. The default value is - :data:`oracledb.POOL_GETMODE_WAIT`. +.. autoproperty:: PoolParams.getmode This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.homogeneous - - This read-only attribute is a boolean which indicates whether the - connections are :ref:`homogeneous ` (same user) or - heterogeneous (multiple users). The default value is *True*. +.. autoproperty:: PoolParams.homogeneous This attribute is only supported in python-oracledb Thick mode. The python-oracledb Thin mode supports only homogeneous modes. -.. attribute:: PoolParams.increment - - This read-only attribute specifies the number of connections that should - be added to the pool whenever a new connection needs to be created. The - default value is *1*. +.. autoproperty:: PoolParams.increment This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.min - - This read-only attribute is an integer that specifies the minimum number - of connections that the pool should contain. The default value is *1*. +.. autoproperty:: PoolParams.min This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.max - - This read-only attribute specifies the maximum number of connections that - the pool should contain. The default value is *2*. +.. autoproperty:: PoolParams.max This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.max_lifetime_session +.. autoproperty:: PoolParams.max_lifetime_session - This read-only attribute is the maximum length of time (in seconds) that a - pooled connection may exist since first being created. A value of *0* means - there is no limit. Connections become candidates for termination when they - are acquired or released back to the pool, and have existed for longer than + Connections become candidates for termination when they are acquired or + released back to the pool, and have existed for longer than ``max_lifetime_session`` seconds. Connections that are in active use will not be closed. In python-oracledb Thick mode, Oracle Client libraries 12.1 or later must be used and, prior to Oracle Client 21, cleanup only occurs when the pool is accessed. -.. attribute:: PoolParams.max_sessions_per_shard - - This read-only attribute is an integer that determines the maximum number - of connections that may be associated with a particular shard. The default - value is *0*. +.. autoproperty:: PoolParams.max_sessions_per_shard This attribute is only supported in python-oracledb Thick mode. -.. attribute:: PoolParams.ping_interval - - This read-only attribute is an integer that specifies the length of time - (in seconds) after which an unused connection in the pool will be a - candidate for pinging when :meth:`ConnectionPool.acquire()` is called. - If the ping to the database indicates that the connection is not alive, - then a replacement connection will be returned by - :meth:`ConnectionPool.acquire()`. If the ``ping_interval`` is a negative - value, then the ping functionality will be disabled. The default value is - *60* seconds. +.. autoproperty:: PoolParams.ping_interval - This attribute is supported in both python-oracledb Thin and Thick modes. - -.. attribute:: PoolParams.ping_timeout + This attribute is supported in both python-oracledb Thin and Thick modes. - This read-only attribute is an integer that specifies the maximum length of - time (in milliseconds) that :meth:`ConnectionPool.acquire()` waits for a - connection to respond to any internal ping to the database. If the ping - does not respond within the specified time, then the connection is - destroyed and :meth:`~ConnectionPool.acquire()` returns a different - connection. The default value is *5000* milliseconds. +.. autoproperty:: PoolParams.ping_timeout This attribute is supported in both python-oracledb Thin and Thick modes. - .. versionadded:: 2.3.0 + .. versionadded:: 2.3.0 -.. attribute:: PoolParams.session_callback - - This read-only attribute specifies a callback that is invoked when a - connection is returned from the pool for the first time, or when the - connection tag differs from the one requested. +.. autoproperty:: PoolParams.session_callback This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.soda_metadata_cache - - This read-only attribute is a boolean that indicates whether SODA - metadata cache should be enabled or not. The default value is *False*. +.. autoproperty:: PoolParams.soda_metadata_cache This attribute is only supported in python-oracledb Thick mode. -.. attribute:: PoolParams.timeout +.. autoproperty:: PoolParams.timeout - This read-only attribute is an integer that specifies the length of time - (in seconds) that a connection may remain idle in the pool before it is - terminated. This applies only when the pool has more than ``min`` - connections open, allowing it to shrink to the specified minimum size. The - default value is *0* seconds. A value of *0* means that there is no maximum - time. + This applies only when the pool has more than ``min`` connections open, + allowing it to shrink to the specified minimum size. The default value is + *0* seconds. A value of *0* means that there is no maximum time. This attribute is supported in both python-oracledb Thin and Thick modes. -.. attribute:: PoolParams.wait_timeout - - This read-only attribute is an integer that specifies the length of time - (in milliseconds) that a caller should wait when acquiring a connection - from the pool with :attr:`~PoolParams.getmode` set to - :data:`~oracledb.POOLGETMODE_TIMEDWAIT`. The default value is *0* - milliseconds. +.. autoproperty:: PoolParams.wait_timeout This attribute is supported in both python-oracledb Thin and Thick modes. diff --git a/doc/src/api_manual/soda.rst b/doc/src/api_manual/soda.rst index 8a7cb4cb..7f70e46e 100644 --- a/doc/src/api_manual/soda.rst +++ b/doc/src/api_manual/soda.rst @@ -4,11 +4,13 @@ API: SODA ********** +.. currentmodule:: oracledb + `Oracle Database Simple Oracle Document Access (SODA) `__ allows documents to be inserted, queried, and retrieved from Oracle Database -using a set of NoSQL-style python-oracledb methods. By default, documents are JSON -strings. See the :ref:`user manual ` for examples. +using a set of NoSQL-style python-oracledb methods. By default, documents are +JSON strings. See the :ref:`user manual ` for examples. .. note:: @@ -38,7 +40,6 @@ SODA requires Oracle Client 18.3 or higher and Oracle Database 18.1 and higher. SODA APIs are only supported in the python-oracledb Thick mode. See :ref:`enablingthick`. - If you are using Oracle Database 21c (or later) and create new collections you need to do one of the following: @@ -77,583 +78,281 @@ the specification in the collection metadata``. .. _sodadb: -SodaDatabase Objects -==================== +SodaDatabase Class +================== -SODA Database objects are returned by the method -:meth:`Connection.getSodaDatabase()`. +.. autoclass:: SodaDatabase -.. dbapiobjectextension:: + A SodaDatabase object is returned by :meth:`Connection.getSodaDatabase()`. + + .. dbapiobjectextension:: SodaDatabase Methods -------------------- -.. method:: SodaDatabase.createCollection(name, metadata=None, mapMode=False) - - Creates a SODA collection with the given name and returns a new - :ref:`SODA collection object `. If you try to create a - collection, and a collection with the same name and metadata already - exists, then that existing collection is opened without error. - - If ``metadata`` is specified, it is expected to be a string containing - valid JSON or a dictionary that will be transformed into a JSON - string. This JSON permits you to specify the configuration of the - collection including storage options; specifying the presence or absence of - columns for creation timestamp, last modified timestamp and version; - whether the collection can store only JSON documents; and methods of key - and version generation. The default metadata creates a collection that only - supports JSON documents and uses system generated keys. See this - `collection metadata reference `__ for - more information. - - If the ``mapMode`` parameter is set to *True*, the new collection is mapped - to an existing table instead of creating a table. If a collection is - created in this way, dropping the collection will not drop the existing - table either. - - -.. method:: SodaDatabase.createDocument(content, key=None, mediaType="application/json") - - Creates a :ref:`SODA document ` usable for SODA write operations. - You only need to use this method if your collection requires - client-assigned keys or has non-JSON content; otherwise, you can pass your - content directly to SODA write operations. SodaDocument attributes - :attr:`~SodaDoc.createdOn`, :attr:`~SodaDoc.lastModified`, and - :attr:`~SodaDoc.version` will be *None*. - - The ``content`` parameter can be a dictionary or list which will be - transformed into a JSON string and then UTF-8 encoded. It can also be a - string which will be UTF-8 encoded or it can be a bytes object which will - be stored unchanged. If a bytes object is provided and the content is - expected to be JSON, note that SODA only supports UTF-8, UTF-16LE and - UTF-16BE encodings. +.. automethod:: SodaDatabase.createCollection - The ``key`` parameter should only be supplied if the collection in which the - document is to be placed requires client-assigned keys. + .. seealso:: - The ``mediaType`` parameter should only be supplied if the collection in which - the document is to be placed supports non-JSON documents and the content - for this document is non-JSON. Using a standard MIME type for this value is - recommended but any string will be accepted. + :ref:`SODA collection object ` and `Oracle Database SODA + Collection Metadata Reference `__ +.. automethod:: SodaDatabase.createDocument -.. method:: SodaDatabase.getCollectionNames(startName=None, limit=0) +.. automethod:: SodaDatabase.getCollectionNames - Returns a list of the names of collections in the database that match the - criteria, in alphabetical order. - - If the ``startName`` parameter is specified, the list of names returned will - start with this value and also contain any names that fall after this value - in alphabetical order. - - If the ``limit`` parameter is specified and is non-zero, the number of - collection names returned will be limited to this value. - - -.. method:: SodaDatabase.openCollection(name) - - Opens an existing collection with the given name and returns a new - :ref:`SODA collection object `. If a collection with that name - does not exist, *None* is returned. +.. automethod:: SodaDatabase.openCollection .. _sodacoll: -SodaCollection Objects -====================== +SodaCollection Class +==================== -SODA Collection objects are used to represent SODA collections and is created -by methods :meth:`SodaDatabase.createCollection()` and -:meth:`SodaDatabase.openCollection()`. +.. autoclass:: SodaCollection -.. dbapiobjectextension:: + A SODA Collection object is used to represent SODA collections and is + created by :meth:`SodaDatabase.createCollection()` and + :meth:`SodaDatabase.openCollection()`. + + .. dbapiobjectextension:: SodaCollection Methods ---------------------- -.. method:: SodaCollection.createIndex(spec) - - Creates an index on a SODA collection. The spec is expected to be a - dictionary or a JSON-encoded string. See this `overview - `__ - for information on indexes in SODA. - - .. note:: - - A commit should be performed before attempting to create an index. - +.. automethod:: SodaCollection.createIndex -.. method:: SodaCollection.drop() + .. seealso:: - Drops the collection from the database, if it exists. Note that if the - collection was created with the ``mapMode`` parameter set to *True* the - underlying table will not be dropped. + `Overview of SODA Indexing `__ - A boolean value is returned indicating if the collection was actually - dropped. +.. automethod:: SodaCollection.drop +.. automethod:: SodaCollection.dropIndex -.. method:: SodaCollection.dropIndex(name, force=False) + .. seealso:: - Drops the index with the specified name, if it exists. + `DROP INDEX statement `__ - The ``force`` parameter, if set to *True*, can be used to force the - dropping of an index that the underlying Oracle Database domain index - does not normally permit. This is only applicable to spatial and JSON - search indexes. See `here `__ for more - information. +.. automethod:: SodaCollection.find - A boolean value is returned indicating if the index was actually dropped. + .. seealso:: + :ref:`SodaOperation object ` -.. method:: SodaCollection.find() +.. automethod:: SodaCollection.getDataGuide - This method is used to begin an operation that will act upon documents in - the collection. It creates and returns a - :ref:`SodaOperation object ` which is used to specify the criteria - and the operation that will be performed on the documents that match that - criteria. + .. seealso:: + :ref:`SODA document object ` -.. method:: SodaCollection.getDataGuide() +.. automethod:: SodaCollection.insertMany - Returns a :ref:`SODA document object ` containing property names, - data types and lengths inferred from the JSON documents in the collection. - It can be useful for exploring the schema of a collection. Note that this - method is only supported for JSON-only collections where a JSON search - index has been created with the 'dataguide' option enabled. If there are - no documents in the collection, *None* is returned. + .. seealso:: -.. method:: SodaCollection.insertMany(docs) + :ref:`SODA document object `. - Inserts a list of documents into the collection at one time. Each of the - input documents can be a dictionary or list or an existing :ref:`SODA - document object `. +.. automethod:: SodaCollection.insertManyAndGet - .. note:: + .. seealso:: - This method requires Oracle Client 18.5 and higher and is available - only as a preview. + :ref:`SODA Document objects `, `MONITOR and NO_MONITOR Hints + `__, and `Monitoring Database Operations + `__ +.. automethod:: SodaCollection.insertOne -.. method:: SodaCollection.insertManyAndGet(docs, hint=None) + .. seealso:: - Similarly to :meth:`~SodaCollection.insertMany()` this method inserts a - list of documents into the collection at one time. The only difference is - that it returns a list of :ref:`SODA Document objects `. Note that - for performance reasons the returned documents do not contain the content. + :ref:`SODA document object ` - The ``hint`` parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the same - format as a SQL hint but without any comment characters, for example - ``hint="MONITOR"``. Pass only the hint "MONITOR" (turn on monitoring) - or "NO_MONITOR" (turn off monitoring). See the Oracle Database SQL - Tuning Guide documentation `MONITOR and NO_MONITOR Hints - `__ - and `Monitoring Database Operations - `__ - for more information. +.. automethod:: SodaCollection.insertOneAndGet - .. note:: + .. seealso:: - - This method requires Oracle Client 18.5 and higher. + :ref:`SODA Document object `, `MONITOR and NO_MONITOR Hints + `__, and `Monitoring Database Operations + `__ - - Use of the ``hint`` parameter requires Oracle Client 21.3 or higher - (or Oracle Client 19 from 19.11). - - -.. method:: SodaCollection.insertOne(doc) - - Inserts a given document into the collection. The input document can be a - dictionary or list or an existing :ref:`SODA document object `. - - -.. method:: SodaCollection.insertOneAndGet(doc, hint=None) - - Similarly to :meth:`~SodaCollection.insertOne()` this method inserts a - given document into the collection. The only difference is that it - returns a :ref:`SODA Document object `. Note that for performance - reasons the returned document does not contain the content. - - The ``hint`` parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the same - format as a SQL hint but without any comment characters, for example - ``hint="MONITOR"``. Pass only the hint "MONITOR" (turn on monitoring) - or "NO_MONITOR" (turn off monitoring). See the Oracle Database SQL - Tuning Guide documentation `MONITOR and NO_MONITOR Hints - `__ - and `Monitoring Database Operations - `__ - for more information. - - .. note:: - - Use of the ``hint`` parameter requires Oracle Client 21.3 or higher - (or Oracle Client 19 from 19.11). - -.. method:: SodaCollection.listIndexes() - - Returns a list of specifications for the indexes found on the collection. - - This method requires Oracle Client 21.3 or later (or Oracle Client 19 from - 19.13). +.. automethod:: SodaCollection.listIndexes .. versionadded:: 1.4.0 -.. method:: SodaCollection.save(doc) - - Saves a document into the collection. This method is equivalent to - :meth:`~SodaCollection.insertOne()` except that if client-assigned keys are - used, and the document with the specified key already exists in the - collection, it will be replaced with the input document. - - This method requires Oracle Client 19.9 or higher in addition to the usual - SODA requirements. - - -.. method:: SodaCollection.saveAndGet(doc, hint=None) - - Saves a document into the collection. This method is equivalent to - :meth:`~SodaCollection.insertOneAndGet()` except that if client-assigned - keys are used, and the document with the specified key already exists in - the collection, it will be replaced with the input document. +.. automethod:: SodaCollection.save - The ``hint`` parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the same - format as a SQL hint but without any comment characters, for example - ``hint="MONITOR"``. Pass only the hint "MONITOR" (turn on monitoring) - or "NO_MONITOR" (turn off monitoring). See the Oracle Database SQL - Tuning Guide documentation `MONITOR and NO_MONITOR Hints - `__ - and `Monitoring Database Operations - `__ - for more information. +.. automethod:: SodaCollection.saveAndGet - This method requires Oracle Client 19.9 or higher in addition to the usual - SODA requirements. + .. seealso:: - .. note:: + `MONITOR and NO_MONITOR Hints `__ and + `Monitoring Database Operations `__ - Use of the ``hint`` parameter requires Oracle Client 21.3 or higher - (or Oracle Client 19 from 19.11). - - -.. method:: SodaCollection.truncate() - - Removes all of the documents in the collection, similarly to what is done - for rows in a table by the TRUNCATE TABLE statement. +.. automethod:: SodaCollection.truncate SodaCollection Attributes ------------------------- -.. attribute:: SodaCollection.metadata - - This read-only attribute returns a dictionary containing the metadata that - was used to create the collection. See this `collection metadata reference - `__ for more information. - +.. autoproperty:: SodaCollection.metadata -.. attribute:: SodaCollection.name + .. seealso:: - This read-only attribute returns the name of the collection. + `Oracle Database SODA Collection Metadata Reference `__ +.. autoproperty:: SodaCollection.name .. _sodadoc: -SodaDoc Objects -=============== +SodaDocument Class +================== -SODA Document objects are returned by the methods -:meth:`SodaDatabase.createDocument()`, :meth:`SodaOperation.getDocuments()` and -:meth:`SodaOperation.getOne()` as -well as by iterating over :ref:`SODA document cursors `. +.. autoclass:: SodaDocument -.. dbapiobjectextension:: + A SODA Document object is returned by + :meth:`SodaDatabase.createDocument()`, + :meth:`SodaOperation.getDocuments()`, and + :meth:`SodaOperation.getOne()` as well as by iterating over + :ref:`SODA document cursors `. -SodaDoc Methods ---------------- + .. dbapiobjectextension:: -.. method:: SodaDoc.getContent() - - Returns the content of the document as a dictionary or list. This method - assumes that the content is application/json and will raise an exception if - this is not the case. If there is no content, however, *None* will be - returned. - - -.. method:: SodaDoc.getContentAsBytes() - - Returns the content of the document as a bytes object. If there is no - content, however, *None* will be returned. - - -.. method:: SodaDoc.getContentAsString() - - Returns the content of the document as a string. If the document encoding - is not known, UTF-8 will be used. If there is no content, however, *None* - will be returned. - -SodaDoc Attributes ------------------- - -.. attribute:: SodaDoc.createdOn +SodaDocument Methods +-------------------- - This read-only attribute returns the creation time of the document in - `ISO 8601 `__ - format. Documents created by :meth:`SodaDatabase.createDocument()` or - fetched from collections where this attribute is not stored will return - *None*. +.. automethod:: SodaDocument.getContent -.. attribute:: SodaDoc.key +.. automethod:: SodaDocument.getContentAsBytes - This read-only attribute returns the unique key assigned to this document. - Documents created by :meth:`SodaDatabase.createDocument()` may not have a - value assigned to them and return *None*. +.. automethod:: SodaDocument.getContentAsString +SodaDocument Attributes +----------------------- -.. attribute:: SodaDoc.lastModified +.. autoproperty:: SodaDocument.createdOn - This read-only attribute returns the last modified time of the document in - `ISO 8601 `__ - format. Documents created by :meth:`SodaDatabase.createDocument()` or - fetched from collections where this attribute is not stored will return - *None*. + .. seealso:: + `ISO 8601 `__ -.. attribute:: SodaDoc.mediaType +.. autoproperty:: SodaDocument.key - This read-only attribute returns the media type assigned to the document. - By convention this is expected to be a MIME type but no checks are - performed on this value. If a value is not specified when calling - :meth:`SodaDatabase.createDocument()` or the document is fetched from a - collection where this component is not stored, the string - "application/json" is returned. +.. autoproperty:: SodaDocument.lastModified + .. seealso:: -.. attribute:: SodaDoc.version + `ISO 8601 `__ - This read-only attribute returns the version assigned to this document. - Documents created by :meth:`SodaDatabase.createDocument()` or fetched - from collections where this attribute is not stored will return *None*. +.. autoproperty:: SodaDocument.mediaType +.. autoproperty:: SodaDocument.version .. _sodadoccur: -SodaDocCursor Objects -===================== +SodaDocCursor Class +=================== + +.. autoclass:: SodaDocCursor -SODA Document Cursor objects are returned by the method -:meth:`SodaOperation.getCursor()` and implements the iterator protocol. Each -iteration will return a :ref:`SODA document object `. + A SodaDocCursor object is returned by :meth:`SodaOperation.getCursor()` + and implements the iterator protocol. Each iteration will return a + :ref:`SODA document object `. -.. dbapiobjectextension:: + .. dbapiobjectextension:: SodaDocCursor Methods --------------------- -.. method:: SodaDocCursor.close() - - Closes the cursor now, rather than whenever __del__ is called. The cursor - will be unusable from this point forward; an Error exception will be raised - if any operation is attempted with the cursor. - +.. automethod:: SodaDocCursor.close .. _sodaop: -SodaOperation Objects -===================== +SodaOperation Class +=================== + +.. autoclass:: SodaOperation -A SODA Operation object represents an operation that will be performed on all -or some of the documents in a SODA collection. This object is created by the -method :meth:`SodaCollection.find()`. + A SODA Operation object represents an operation that will be performed on + all or some of the documents in a SODA collection. This object is created + by :meth:`SodaCollection.find()`. -.. dbapiobjectextension:: + .. dbapiobjectextension:: SodaOperation Methods --------------------- -.. method:: SodaOperation.count() - - Returns a count of the number of documents in the collection that match - the criteria. If :meth:`~SodaOperation.skip()` or - :meth:`~SodaOperation.limit()` were called on this object, an exception is - raised. - - -.. method:: SodaOperation.fetchArraySize(value) - - This is a tuning method to specify the number of documents that are - internally fetched in batches by calls to :meth:`~SodaOperation.getCursor()` - and :meth:`~SodaOperation.getDocuments()`. It does not affect how many - documents are returned to the application. A value of *0* will use the - default value (*100*). This method is only available in Oracle Client 19.5 - and higher. - - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. - - -.. method:: SodaOperation.filter(value) - - Sets a filter specification for complex document queries and ordering of - JSON documents. Filter specifications must be provided as a dictionary or - JSON-encoded string and can include comparisons, regular expressions, - logical and spatial operators, among others. See the - `overview of SODA filter specifications - `__ - for more information. +.. automethod:: SodaOperation.count - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. +.. automethod:: SodaOperation.fetchArraySize +.. automethod:: SodaOperation.filter -.. method:: SodaOperation.getCursor() + .. seealso:: - Returns a :ref:`SODA Document Cursor object ` that can be used - to iterate over the documents that match the criteria. + `Overview of SODA filter specifications `__ +.. automethod:: SodaOperation.getCursor -.. method:: SodaOperation.getDocuments() + .. seealso:: - Returns a list of :ref:`SODA Document objects ` that match the - criteria. + :ref:`SODA Document Cursor object ` +.. automethod:: SodaOperation.getDocuments -.. method:: SodaOperation.getOne() + .. seealso:: - Returns a single :ref:`SODA Document object ` that matches the - criteria. Note that if multiple documents match the criteria only the first - one is returned. + :ref:`SODA Document objects ` +.. automethod:: SodaOperation.getOne -.. method:: SodaOperation.hint(value) + .. seealso:: - Specifies a hint that will be provided to the SODA operation when it is - performed. This is expected to be a string in the same format as a SQL hint - but without any comment characters, for example ``hint("MONITOR")``. Pass - only the hint "MONITOR" (turn on monitoring) or "NO_MONITOR" (turn off - monitoring). See the Oracle Database SQL Tuning Guide documentation - `MONITOR and NO_MONITOR Hints `__ and - `Monitoring Database Operations `__ for more - information. + :ref:`SODA Document object ` - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. +.. automethod:: SodaOperation.hint - Use of this method requires Oracle Client 21.3 or higher (or Oracle Client - 19 from 19.11). + .. seealso:: -.. method:: SodaOperation.key(value) + Oracle Database SQL Tuning Guide documentation `MONITOR and NO_MONITOR + Hints `__ and `Monitoring Database + Operations `__ - Specifies that the document with the specified key should be returned. - This causes any previous calls made to this method and - :meth:`~SodaOperation.keys()` to be ignored. +.. automethod:: SodaOperation.key - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. +.. automethod:: SodaOperation.keys +.. automethod:: SodaOperation.limit -.. method:: SodaOperation.keys(seq) - - Specifies that documents that match the keys found in the supplied sequence - should be returned. This causes any previous calls made to this method and - :meth:`~SodaOperation.key()` to be ignored. - - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. - - -.. method:: SodaOperation.limit(value) - - Specifies that only the specified number of documents should be returned. - This method is only usable for read operations such as - :meth:`~SodaOperation.getCursor()` and - :meth:`~SodaOperation.getDocuments()`. For write operations, any value set - using this method is ignored. - - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. - - -.. method:: SodaOperation.lock() - - Specifies whether the documents fetched from the collection should be - locked (equivalent to SQL "select for update"). - - The next commit or rollback on the connection made after the operation is - performed will "unlock" the documents. Ensure that the connection is not in - autocommit mode or the documents will be unlocked immediately after the - operation is complete. - - This method should only be used with read operations (other than - :func:`~SodaOperation.count()`) and should not be used in - conjunction with non-terminal methods :meth:`~SodaOperation.skip()` and - :meth:`~SodaOperation.limit()`. - - If this method is specified in conjunction with a write operation this - method is ignored. - - This method is only supported in Oracle Client 21.3 or later (or - Oracle Client 19 from 19.11). +.. automethod:: SodaOperation.lock .. versionadded:: 1.4.0 -.. method:: SodaOperation.remove() - - Removes all of the documents in the collection that match the criteria. The - number of documents that have been removed is returned. - - -.. method:: SodaOperation.replaceOne(doc) - - Replaces a single document in the collection with the specified document. - The input document can be a dictionary or list or an existing - :ref:`SODA document object `. A boolean indicating if a document - was replaced or not is returned. - - Currently the method :meth:`~SodaOperation.key()` must be called before - this method can be called. - - -.. method:: SodaOperation.replaceOneAndGet(doc) - - Similarly to :meth:`~SodaOperation.replaceOne()`, this method replaces a - single document in the collection with the specified document. The only - difference is that it returns a :ref:`SODA document object `. - Note that for performance reasons the returned document does not contain - the content. +.. automethod:: SodaOperation.remove +.. automethod:: SodaOperation.replaceOne -.. method:: SodaOperation.skip(value) + .. seealso:: - Specifies the number of documents that match the other criteria that will - be skipped. This method is only usable for read operations such as - :meth:`~SodaOperation.getOne()`, :meth:`~SodaOperation.getCursor()`, and - :meth:`~SodaOperation.getDocuments()`. For write operations, any value set - using this method is ignored. + :ref:`SODA document object ` - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. +.. automethod:: SodaOperation.replaceOneAndGet + .. seealso:: -.. method:: SodaOperation.version(value) + :ref:`SODA document object ` - Specifies that documents with the specified version should be returned. - Typically this is used with :meth:`~SodaOperation.key()` to implement - optimistic locking, so that the write operation called later does not - affect a document that someone else has modified. +.. automethod:: SodaOperation.skip - As a convenience, the SodaOperation object is returned so that further - criteria can be specified by chaining methods together. +.. automethod:: SodaOperation.version diff --git a/doc/src/api_manual/sparse_vector.rst b/doc/src/api_manual/sparse_vector.rst index c597372c..de75bc93 100644 --- a/doc/src/api_manual/sparse_vector.rst +++ b/doc/src/api_manual/sparse_vector.rst @@ -4,27 +4,22 @@ API: SparseVector Objects ************************* -A SparseVector Object stores information about a sparse vector. This object -can be created with :meth:`oracledb.SparseVector()`. +.. currentmodule:: oracledb -See :ref:`sparsevectors` for more information. +SparseVector Class +================== -.. versionadded:: 3.0.0 +.. autoclass:: SparseVector -SparseVector Attributes -======================= - -.. attribute:: SparseVector.indices + See :ref:`sparsevectors` for more information. - This read-only attribute is an array that returns the indices (zero-based) - of non-zero values in the vector. + .. versionadded:: 3.0.0 -.. attribute:: SparseVector.num_dimensions +SparseVector Attributes +======================= - This read-only attribute is an integer that returns the number of - dimensions of the vector. +.. autoproperty:: SparseVector.indices -.. attribute:: SparseVector.values +.. autoproperty:: SparseVector.num_dimensions - This read-only attribute is an array that returns the non-zero values - stored in the vector. +.. autoproperty:: SparseVector.values diff --git a/doc/src/api_manual/subscription.rst b/doc/src/api_manual/subscription.rst index f4e639bf..5371de7d 100644 --- a/doc/src/api_manual/subscription.rst +++ b/doc/src/api_manual/subscription.rst @@ -4,255 +4,137 @@ API: Subscription Objects ************************* -.. dbapiobjectextension:: - -Subscription Methods -==================== - -.. method:: Subscription.registerquery(statement, [args]) +.. currentmodule:: oracledb - Registers the query for subsequent notification when tables referenced by - the query are changed. This behaves similarly to :meth:`Cursor.execute()` - but only queries are permitted and the ``args`` parameter must be a - sequence or dictionary. If the ``qos`` parameter included the flag - :data:`oracledb.SUBSCR_QOS_QUERY` when the subscription was created, then - the ID for the registered query is returned; otherwise, *None* is returned. - -Subscription Attributes -======================= - -.. attribute:: Subscription.callback +.. dbapiobjectextension:: - This read-only attribute returns the callback that was registered when the - subscription was created. +Subscription Class +================== +.. autoclass:: Subscription -.. attribute:: Subscription.connection + A Subscription object should be created using + :meth:`Connection.subscribe()`. See :ref:`cqn` for more information. - This read-only attribute returns the connection that was used to register - the subscription when it was created. +Subscription Methods +-------------------- +.. automethod:: Subscription.registerquery -.. attribute:: Subscription.id +Subscription Attributes +----------------------- - This read-only attribute returns the value of the REGID column found in the - database view USER_CHANGE_NOTIFICATION_REGS or the value of the REG_ID - column found in the database view USER_SUBSCR_REGISTRATIONS. For AQ - subscriptions, the value is *0*. +.. autoproperty:: Subscription.callback +.. autoproperty:: Subscription.connection -.. attribute:: Subscription.ip_address +.. autoproperty:: Subscription.id - This read-only attribute returns the IP address used for callback - notifications from the database server. If not set during construction, - this value is *None*. +.. autoproperty:: Subscription.ip_address For consistency and compliance with the PEP 8 naming style, the attribute ``ipAddress`` was renamed to ``ip_address``. The old name will continue to work for a period of time. +.. autoproperty:: Subscription.name -.. attribute:: Subscription.name - - This read-only attribute returns the name used to register the subscription - when it was created. - - -.. attribute:: Subscription.namespace - - This read-only attribute returns the namespace used to register the - subscription when it was created. - +.. autoproperty:: Subscription.namespace -.. attribute:: Subscription.operations +.. autoproperty:: Subscription.operations - This read-only attribute returns the operations that will send - notifications for each table or query that is registered using this - subscription. +.. autoproperty:: Subscription.port +.. autoproperty:: Subscription.protocol -.. attribute:: Subscription.port - - This read-only attribute returns the port used for callback notifications - from the database server. If not set during construction, this value is - *0*. - - -.. attribute:: Subscription.protocol - - This read-only attribute returns the protocol used to register the - subscription when it was created. - - -.. attribute:: Subscription.qos - - This read-only attribute returns the quality of service flags used to - register the subscription when it was created. - - -.. attribute:: Subscription.timeout - - This read-only attribute returns the timeout (in seconds) that was - specified when the subscription was created. A value of *0* indicates that - there is no timeout. +.. autoproperty:: Subscription.qos +.. autoproperty:: Subscription.timeout .. _msgobjects: -Message Objects ---------------- +Message Class +============= -Message objects are created when a notification is received. They are passed to -the callback procedure specified when a subscription is created. +.. autoclass:: Message -.. attribute:: Message.consumer_name + A Message object is created when a notification is received. They are + passed to the callback procedure specified when a subscription is created. + +Message Attributes +------------------ - This read-only attribute returns the name of the consumer which generated - the notification. It will be populated if the subscription was created with - the namespace :data:`oracledb.SUBSCR_NAMESPACE_AQ` and the queue is a - multiple consumer queue. +.. autoproperty:: Message.consumer_name For consistency and compliance with the PEP 8 naming style, the attribute ``consumerName`` was renamed to ``consumer_name``. The old name will continue to work for a period of time. +.. autoproperty:: Message.dbname -.. attribute:: Message.dbname - - This read-only attribute returns the name of the database that generated - the notification. - -.. attribute:: Message.msgid - - This read-only attribute returns the message id of the AQ message which - generated the notification. It will only be populated if the subscription - was created with the namespace :data:`oracledb.SUBSCR_NAMESPACE_AQ`. +.. autoproperty:: Message.msgid -.. attribute:: Message.queries +.. autoproperty:: Message.queries - This read-only attribute returns a list of message query objects that give - information about query result sets changed for this notification. This - attribute will be an empty list if the ``qos`` parameter did not include - the flag :data:`~oracledb.SUBSCR_QOS_QUERY` when the subscription was - created. - - -.. attribute:: Message.queue_name - - This read-only attribute returns the name of the queue which generated the - notification. It will only be populated if the subscription was created - with the namespace :data:`oracledb.SUBSCR_NAMESPACE_AQ`. +.. autoproperty:: Message.queue_name For consistency and compliance with the PEP 8 naming style, the attribute ``queueName`` was renamed to ``queue_name``. The old name will continue to work for a period of time. +.. autoproperty:: Message.registered -.. attribute:: Message.registered - - This read-only attribute returns whether the subscription which generated - this notification is still registered with the database. The subscription - is automatically deregistered with the database when the subscription - timeout value is reached or when the first notification is sent (when the - quality of service flag :data:`oracledb.SUBSCR_QOS_DEREG_NFY` is used). - - -.. attribute:: Message.subscription - - This read-only attribute returns the subscription object for which this - notification was generated. - - -.. attribute:: Message.tables - - This read-only attribute returns a list of message table objects that give - information about the tables changed for this notification. This - attribute will be an empty list if the ``qos`` parameter included the flag - :data:`~oracledb.SUBSCR_QOS_QUERY` when the subscription was created. - - -.. attribute:: Message.txid - - This read-only attribute returns the id of the transaction that generated - the notification. - - -.. attribute:: Message.type +.. autoproperty:: Message.subscription - This read-only attribute returns the type of message that has been sent. - See the constants section on event types for additional information. +.. autoproperty:: Message.tables +.. autoproperty:: Message.txid -MessageTable Objects --------------------- - -MessageTable objects are created when a notification is received for each table -change. They are accessed in the tables attribute of message objects, and the -tables attribute of message query objects. - - -.. attribute:: MessageTable.name +.. autoproperty:: Message.type - This read-only attribute returns the name of the table that was changed. + See the constants section on :ref:`eventtypes` for additional information. +MessageTable Class +================== -.. attribute:: MessageTable.operation +.. autoclass:: MessageTable - This read-only attribute returns the operation that took place on the table - that was changed. + A MessageTable object is created when a notification is received for each + table change. They are accessed in the tables attribute of message + objects, and the tables attribute of message query objects. +.. autoproperty:: MessageTable.name -.. attribute:: MessageTable.rows - - This read-only attribute returns a list of message row objects that give - information about the rows changed on the table. This value is only filled - in if the ``qos`` parameter to the :meth:`Connection.subscribe()` method - included the flag :data:`~oracledb.SUBSCR_QOS_ROWIDS`. - - -MessageRow Objects ------------------- +.. autoproperty:: MessageTable.operation -MessageRow objects are created when a notification is received for each row -changed in a table. They are found in the rows attribute of message table -objects. +.. autoproperty:: MessageTable.rows +MessageRow Class +================ -.. attribute:: MessageRow.operation - - This read-only attribute returns the operation that took place on the row - that was changed. - - -.. attribute:: MessageRow.rowid - - This read-only attribute returns the rowid of the row that was changed. - - -MessageQuery Objects --------------------- +.. autoclass:: MessageRow -A MessageQuery object is created when a notification is received for a query -result set change. This object is found in the queries attribute of message -objects. + A MessageRow object is created when a notification is received for each + row changed in a table. They are found in the rows attribute of message + table objects. +MessageRow Attributes +--------------------- -.. attribute:: MessageQuery.id +.. autoproperty:: MessageRow.operation - This read-only attribute returns the query id of the query for which the - result set changed. The value will match the value returned by - :meth:`Subscription.registerquery()` when the related query was registered. +.. autoproperty:: MessageRow.rowid +MessageQuery Class +================== -.. attribute:: MessageQuery.operation +.. autoclass:: MessageQuery - This read-only attribute returns the operation that took place on the query - result set that was changed. Valid values for this attribute are - :data:`~oracledb.EVENT_DEREG` and :data:`~oracledb.EVENT_QUERYCHANGE`. + A MessageQuery object is created when a notification is received for a + query result set change. This object is found in the queries attribute of + message objects. +.. autoproperty:: MessageQuery.id -.. attribute:: MessageQuery.tables +.. autoproperty:: MessageQuery.operation - This read-only attribute returns a list of message table objects that give - information about the table changes that caused the query result set to - change for this notification. +.. autoproperty:: MessageQuery.tables diff --git a/doc/src/api_manual/variable.rst b/doc/src/api_manual/variable.rst index 0a14e611..e9857e01 100644 --- a/doc/src/api_manual/variable.rst +++ b/doc/src/api_manual/variable.rst @@ -4,108 +4,56 @@ API: Variable Objects ********************* -Variable objects are created with :meth:`Cursor.var()` or -:func:`Cursor.arrayvar()`. +.. currentmodule:: oracledb -.. dbapiobjectextension:: +Variable Class +============== -Variable Methods -================= +.. autoclass:: Var + + An Var object should be created with :meth:`Cursor.var()` or + :meth:`Cursor.arrayvar()`. -.. method:: Variable.getvalue([pos=0]) + .. dbapiobjectextension:: - Returns the value at the given position in the variable. For variables - created using the method :func:`Cursor.arrayvar()` the value returned will - be a list of each of the values in the PL/SQL index-by table. For variables - bound to DML returning statements, the value returned will also be a list - corresponding to the returned data for the given execution of the statement - (as identified by the ``pos`` parameter). +Variable Methods +================= -.. method:: Variable.setvalue(pos, value) +.. automethod:: Var.getvalue - Set the value at the given position in the variable. +.. automethod:: Var.setvalue Variable Attributes =================== -.. attribute:: Variable.actual_elements - - This read-only attribute returns the actual number of elements in the - variable. This corresponds to the number of elements in a PL/SQL index-by - table for variables that are created using the method - :func:`Cursor.arrayvar()`. For all other variables this value will be - identical to the attribute :attr:`~Variable.numElements`. +.. autoproperty:: Var.actual_elements For consistency and compliance with the PEP 8 naming style, the attribute ``actualElements`` was renamed to ``actual_elements``. The old name will continue to work for a period of time. - -.. attribute:: Variable.buffer_size - - This read-only attribute returns the size of the buffer allocated for each - element in bytes. +.. autoproperty:: Var.buffer_size For consistency and compliance with the PEP 8 naming style, the attribute ``bufferSize`` was renamed to ``buffer_size``. The old name will continue to work for a period of time. - -.. attribute:: Variable.convert_nulls - - This read-only attribute returns whether the :attr:`~Variable.outconverter` - method is called when null values are fetched from the database. +.. autoproperty:: Var.convert_nulls .. versionadded:: 1.4.0 -.. attribute:: Variable.inconverter - - This read-only attribute specifies the method used to convert data from - Python to the Oracle database. The method signature is converter(value) - and the expected return value is the value to bind to the database. If this - attribute is *None*, the value is bound directly without any conversion. +.. autoproperty:: Var.inconverter - -.. attribute:: Variable.num_elements - - This read-only attribute returns the number of elements allocated in an - array, or the number of scalar items that can be fetched into the variable - or bound to the variable. +.. autoproperty:: Var.num_elements For consistency and compliance with the PEP 8 naming style, the attribute ``numElements`` was renamed to ``num_elements``. The old name will continue to work for a period of time. +.. autoproperty:: Var.outconverter -.. attribute:: Variable.outconverter - - This read-only attribute specifies the method used to convert data from - the Oracle database to Python. The method signature is converter(value) - and the expected return value is the value to return to Python. If this - attribute is *None*, the value is returned directly without any conversion. - - -.. attribute:: Variable.size - - This read-only attribute returns the size of the variable. For strings this - value is the size in characters. For all others, this is same value as the - attribute bufferSize. - - -.. attribute:: Variable.type - - This read-only attribute returns the type of the variable. This will be an - :ref:`Oracle Object Type ` if the variable binds - Oracle objects; otherwise, it will be one of the - :ref:`database type constants `. - - Database type constants are now used when the variable is not used for - binding Oracle objects. - +.. autoproperty:: Var.size -.. attribute:: Variable.values +.. autoproperty:: Var.type - This read-only attribute returns a copy of the value of all actual - positions in the variable as a list. This is the equivalent of calling - :meth:`~Variable.getvalue()` for each valid position and the length will - correspond to the value of the :attr:`~Variable.actualElements` attribute. +.. autoproperty:: Var.values diff --git a/doc/src/conf.py b/doc/src/conf.py index ddc4fef6..9bf9d7ff 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -18,7 +18,7 @@ # If your extensions are in another directory, add it here. sys.path.append(os.path.abspath("_ext")) -# include the path of the source so that autodoc willfunction +# include the path of the source so that autodoc will function sys.path.insert(0, str(pathlib.Path("..", "src").resolve())) # General configuration @@ -37,6 +37,9 @@ # preserve defaults in function signatures autodoc_preserve_defaults = True +# ensure that the constructor documentation is used in class documentation +autoclass_content = "init" + # Add any paths that contain templates here, relative to this directory. templates_path = [".templates"] diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 40c199a1..f547b2aa 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -2,6 +2,8 @@ .. _releasenotes: +.. currentmodule:: oracledb + python-oracledb Release Notes ============================= diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 057583d9..1bb51c30 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -1,5 +1,7 @@ .. _featuresummary: +.. currentmodule:: oracledb + ***************************************************************** Appendix A: Oracle Database Features Supported by python-oracledb ***************************************************************** diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index 191e0440..ef9626d4 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -1,5 +1,7 @@ .. _driverdiff: +.. currentmodule:: oracledb + ******************************************************************** Appendix B: Differences between python-oracledb Thin and Thick Modes ******************************************************************** diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index a0a74455..82b5a1e7 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -1,3 +1,5 @@ +.. currentmodule:: oracledb + ***************************************************** Appendix C: The python-oracledb and cx_Oracle Drivers ***************************************************** diff --git a/doc/src/user_guide/appendix_d.rst b/doc/src/user_guide/appendix_d.rst index b9e113e4..e2172f3c 100644 --- a/doc/src/user_guide/appendix_d.rst +++ b/doc/src/user_guide/appendix_d.rst @@ -1,5 +1,7 @@ .. _frameworks: +.. currentmodule:: oracledb + ******************************************************* Appendix D: Python Frameworks, SQL Generators, and ORMs ******************************************************* diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 123de2c5..1105d3a8 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -1,5 +1,7 @@ .. _aqusermanual: +.. currentmodule:: oracledb + ************************************************************ Using Oracle Transactional Event Queues and Advanced Queuing ************************************************************ diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index 7ce0c212..0d261b0a 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -1,5 +1,7 @@ .. _asyncio: +.. currentmodule:: oracledb + ************************************************** Concurrent Programming with asyncio and Pipelining ************************************************** diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index bc860312..97977583 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -1,5 +1,7 @@ .. _batchstmnt: +.. currentmodule:: oracledb + **************************************** Batch Statement and Bulk Copy Operations **************************************** diff --git a/doc/src/user_guide/bind.rst b/doc/src/user_guide/bind.rst index a5531022..b7ba8c48 100644 --- a/doc/src/user_guide/bind.rst +++ b/doc/src/user_guide/bind.rst @@ -1,5 +1,7 @@ .. _bind: +.. currentmodule:: oracledb + ******************** Using Bind Variables ******************** diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 016e7e6c..ad4a4db4 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -1,5 +1,7 @@ .. _connhandling: +.. currentmodule:: oracledb + ***************************** Connecting to Oracle Database ***************************** diff --git a/doc/src/user_guide/cqn.rst b/doc/src/user_guide/cqn.rst index bfa55f8a..4dca8da6 100644 --- a/doc/src/user_guide/cqn.rst +++ b/doc/src/user_guide/cqn.rst @@ -1,5 +1,7 @@ .. _cqn: +.. currentmodule:: oracledb + ************************************************ Working with Continuous Query Notification (CQN) ************************************************ diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 73620b07..bcbe5e02 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -1,5 +1,7 @@ .. _dataframeformat: +.. currentmodule:: oracledb + ************************ Working with Data Frames ************************ diff --git a/doc/src/user_guide/exception_handling.rst b/doc/src/user_guide/exception_handling.rst index 1f715a20..13138c61 100644 --- a/doc/src/user_guide/exception_handling.rst +++ b/doc/src/user_guide/exception_handling.rst @@ -1,5 +1,7 @@ .. _exception: +.. currentmodule:: oracledb + ******************* Catching Exceptions ******************* diff --git a/doc/src/user_guide/extending.rst b/doc/src/user_guide/extending.rst index 5d324884..4fb43458 100644 --- a/doc/src/user_guide/extending.rst +++ b/doc/src/user_guide/extending.rst @@ -1,5 +1,7 @@ .. _extendingpython-oracledb: +.. currentmodule:: oracledb + ************************* Extending python-oracledb ************************* diff --git a/doc/src/user_guide/globalization.rst b/doc/src/user_guide/globalization.rst index 7f4749fe..80709052 100644 --- a/doc/src/user_guide/globalization.rst +++ b/doc/src/user_guide/globalization.rst @@ -1,5 +1,7 @@ .. _globalization: +.. currentmodule:: oracledb + ******************************** Character Sets and Globalization ******************************** diff --git a/doc/src/user_guide/ha.rst b/doc/src/user_guide/ha.rst index b6d916d4..7777aea3 100644 --- a/doc/src/user_guide/ha.rst +++ b/doc/src/user_guide/ha.rst @@ -1,5 +1,7 @@ .. _highavailability: +.. currentmodule:: oracledb + ************************************** High Availability with python-oracledb ************************************** diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index 1521adca..1e2ac2ee 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -1,5 +1,7 @@ .. _initialization: +.. currentmodule:: oracledb + **************************** Initializing python-oracledb **************************** diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 32e97c23..18accaba 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -1,5 +1,7 @@ .. _installation: +.. currentmodule:: oracledb + *************************** Installing python-oracledb *************************** diff --git a/doc/src/user_guide/introduction.rst b/doc/src/user_guide/introduction.rst index 0124e8f6..8c9b1a6c 100644 --- a/doc/src/user_guide/introduction.rst +++ b/doc/src/user_guide/introduction.rst @@ -1,5 +1,7 @@ .. _introduction: +.. currentmodule:: oracledb + ***************************************************** Introduction to the Python Driver for Oracle Database ***************************************************** diff --git a/doc/src/user_guide/json_data_type.rst b/doc/src/user_guide/json_data_type.rst index 74a1dcf6..c95e6dbf 100644 --- a/doc/src/user_guide/json_data_type.rst +++ b/doc/src/user_guide/json_data_type.rst @@ -1,5 +1,7 @@ .. _jsondatatype: +.. currentmodule:: oracledb + *************** Using JSON Data *************** diff --git a/doc/src/user_guide/lob_data.rst b/doc/src/user_guide/lob_data.rst index 93ff141c..4ac6dc71 100644 --- a/doc/src/user_guide/lob_data.rst +++ b/doc/src/user_guide/lob_data.rst @@ -1,5 +1,7 @@ .. _lobdata: +.. currentmodule:: oracledb + *************************************** Using CLOB, BLOB, NCLOB, and BFILE Data *************************************** diff --git a/doc/src/user_guide/plsql_execution.rst b/doc/src/user_guide/plsql_execution.rst index 918fc16f..ee398b43 100644 --- a/doc/src/user_guide/plsql_execution.rst +++ b/doc/src/user_guide/plsql_execution.rst @@ -1,5 +1,7 @@ .. _plsqlexecution: +.. currentmodule:: oracledb + **************** Executing PL/SQL **************** diff --git a/doc/src/user_guide/soda.rst b/doc/src/user_guide/soda.rst index cdd44c92..4261542c 100644 --- a/doc/src/user_guide/soda.rst +++ b/doc/src/user_guide/soda.rst @@ -1,5 +1,7 @@ .. _sodausermanual: +.. currentmodule:: oracledb + ************************************************* Working with Simple Oracle Document Access (SODA) ************************************************* diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 4ecc2ed4..29bade82 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -1,5 +1,7 @@ .. _sqlexecution: +.. currentmodule:: oracledb + ************* Executing SQL ************* diff --git a/doc/src/user_guide/startup.rst b/doc/src/user_guide/startup.rst index c3673dbb..b0fa97d5 100644 --- a/doc/src/user_guide/startup.rst +++ b/doc/src/user_guide/startup.rst @@ -1,5 +1,7 @@ .. _startup: +.. currentmodule:: oracledb + ************************************* Starting and Stopping Oracle Database ************************************* diff --git a/doc/src/user_guide/tracing.rst b/doc/src/user_guide/tracing.rst index 1aa9c145..991847fd 100644 --- a/doc/src/user_guide/tracing.rst +++ b/doc/src/user_guide/tracing.rst @@ -1,5 +1,7 @@ .. _tracingsql: +.. currentmodule:: oracledb + *********************** Tracing python-oracledb *********************** diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index f9b1cd95..c651743f 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -1,5 +1,7 @@ .. _troubleshooting: +.. currentmodule:: oracledb + ********************** Troubleshooting Errors ********************** diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index de5c3e49..d04885d5 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -1,5 +1,7 @@ .. _tuning: +.. currentmodule:: oracledb + *********************** Tuning python-oracledb *********************** @@ -28,8 +30,8 @@ Some general tuning tips are: Use :ref:`bind variables ` to avoid statement reparsing. - Tune :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` for each query, - see :ref:`Tuning Fetch Performance `. + Tune :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` for each SELECT + query, see :ref:`Tuning Fetch Performance `. Do simple optimizations like :ref:`limiting the number of rows ` and avoiding selecting columns not used in the application. @@ -79,103 +81,127 @@ Some general tuning tips are: Tuning Fetch Performance ======================== -To improve application performance and scalability you can adjust the sizes of -python-oracledb's internal query result buffers. Increasing the buffers can -reduce :ref:`round-trips ` to improve the overall speed of fetching -rows across the network from the database. The buffer sizes can be used to tune -the behavior of all python-oracledb :ref:`row fetching methods ` but -do not affect how many rows are returned to your application by those methods. -You should tune the buffers for optimal performance and memory usage. - -Tune "array fetching" with -:attr:`Cursor.arraysize` and tune "row prefetching" with -:attr:`Cursor.prefetchrows`. Set these before calling -:meth:`Cursor.execute()`. The value used for prefetching can also be set in an -``oraaccess.xml`` file, see :ref:`optclientfiles`. In python-oracledb Thick -mode, the internal buffers allocated for ``prefetchrows`` and ``arraysize`` are -separate, so increasing both settings will require more Python process memory. -Queries that return LOBs and similar types will never prefetch rows, so the -``prefetchrows`` value is ignored in those cases. - -The difference between row prefetching and array fetching is when the internal -buffering occurs. Internally python-oracledb performs separate "execute SQL -statement" and "fetch data" steps. Prefetching allows query results to be -returned to the application when the acknowledgment of successful statement -execution is returned from the database. This means that the subsequent -internal "fetch data" operation does not always need to make a round-trip to -the database because rows are already buffered in python-oracledb or in the -Oracle Client libraries. An overhead of prefetching when using the -python-oracledb Thick mode is the need for additional data copies from Oracle -Client's prefetch buffer when fetching the first batch of rows. This cost may -outweigh the benefits of using prefetching in some cases. +To improve application performance and scalability, you can set +:attr:`Cursor.prefetchrows` and :attr:`Cursor.arraysize` to adjust +python-oracledb's internal buffers used for SELECT and REF CURSOR query +results. Increasing the sizes causes bigger batches of rows to be fetched by +each internal request to the database. This reduces the number of +:ref:`round-trips ` required. The performance benefit of larger +buffer sizes can be significant when fetching lots of rows, or when using a +slow network. The database also benefits from reduced overheads. + +Internally, python-oracledb always does row prefetching and array fetching. +Row prefetching is when rows are returned from the database in the round-trip +used for initial statement execution. Rows are then available in an internal +buffer for the application to consume. Without prefetching, the first set of +rows would have to fetched from the database using a separate round-trip. When +all rows in the prefetch buffer have been consumed by the application, the next +set of rows are internally fetched into a buffer using an array fetch which +takes one round-trip. These rows are made available to the application when it +wants them. After the initial array fetch buffer has been emptied, further +array fetches into the buffer occur as needed to completely retrieve all rows +from the database. + +Changing the buffer sizes with :attr:`~Cursor.prefetchrows` and +:attr:`~Cursor.arraysize` affects the internal buffering behavior of +:meth:`Cursor.fetchone()`, :meth:`Cursor.fetchmany()`, and +:meth:`Cursor.fetchall()`. The attribute values do not affect how rows are +returned to the application with the exception of :meth:`Cursor.fetchmany()`, +where :attr:`Cursor.arraysize` is also used as the default for its ``size`` +parameter. + +An overhead of row prefetching in python-oracledb Thick mode is the use of a +separate buffer from the array fetch buffer, increasing the memory +requirements. Also, an additional data copy from Oracle Client's prefetch +buffer is performed. These costs may outweigh the benefits of using prefetching +in some cases. + +SELECT queries that return :ref:`LOB ` objects and similar types will +never prefetch rows, so the :attr:`~Cursor.prefetchrows` value is ignored in +those cases. + +The attributes do not affect data insertion. To reduce round-trips for DML +statements, see :ref:`batchstmnt`. Choosing values for ``arraysize`` and ``prefetchrows`` ------------------------------------------------------ -The best :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` values can be -found by experimenting with your application under the expected load of normal -application use. The reduction of round-trips may help performance and overall -system scalability. The documentation in :ref:`round-trips ` shows -how to measure round-trips. +Tune "array fetching" with :attr:`Cursor.arraysize`. Tune "row prefetching" +with :attr:`Cursor.prefetchrows`. Set these before calling +:meth:`Cursor.execute()`. Also, see :ref:`defprefarray`. -Here are some suggestions for tuning: +The best :attr:`~Cursor.arraysize` and :attr:`~Cursor.prefetchrows` values can +be found by benchmarking your application under production load. Here are +starting suggestions for four common scenarios: -* To tune queries that return an unknown, large, number of rows, estimate the - number of rows returned and increase the :attr:`Cursor.arraysize` value for - best performance, memory and round-trip usage. The default is 100. For - example: +* Scenario 1: To tune queries that return an unknown and large number of rows, + increase the :attr:`~Cursor.arraysize` value from its default of 100 until + you are satisfied with performance, memory, and round-trip usage. For example: .. code-block:: python - cur = connection.cursor() + cursor = connection.cursor() - cur.arraysize = 1000 + # cursor.prefetchrows = 2 # generally leave this at its default + cursor.arraysize = 1000 - for row in cur.execute("SELECT * FROM very_big_table"): + for row in cursor.execute("select * from very_big_table"): print(row) - In general for this scenario, leave ``prefetchrows`` at its default value. - If you do change it, then set ``arraysize`` as big, or bigger. Do not make - the sizes unnecessarily large. + In general for this scenario, leave :attr:`~Cursor.prefetchrows` at its + default value. If you do change it, then set :attr:`Cursor.arraysize` to a + bigger value. Do not make the sizes unnecessarily large. -* If you are fetching a fixed number of rows, set ``arraysize`` to the number - of expected rows, and set ``prefetchrows`` to one greater than this value. - Adding one removes the need for a round-trip to check for end-of-fetch. For - example, if you are querying 20 rows, perhaps to :ref:`display a page - ` of data, then set ``prefetchrows`` to 21 and ``arraysize`` to 20: +* Scenario 2: If you are fetching a fixed number of rows, set + :attr:`~Cursor.arraysize` to the number of expected rows, and set + :attr:`~Cursor.prefetchrows` to one greater than this value. Adding one + removes the need for a round-trip to check for end-of-fetch. For example, if + you are using a SELECT query to retrieve 20 rows, perhaps to :ref:`display a + page ` of data, then set :attr:`~Cursor.prefetchrows` to 21 and + :attr:`~Cursor.arraysize` to 20: .. code-block:: python - cur = connection.cursor() + cursor = connection.cursor() - cur.prefetchrows = 21 - cur.arraysize = 20 + cursor.prefetchrows = 21 + cursor.arraysize = 20 - for row in cur.execute(""" - SELECT last_name - FROM employees - ORDER BY last_name - OFFSET 0 ROWS FETCH NEXT 20 ROWS ONLY"""): + for row in cursor.execute(""" + select last_name + from employees + order by last_name + offset 0 rows fetch next 20 rows only"""): print(row) This will return all rows for the query in one round-trip. -* If you know that a query returns just one row then set - :attr:`Cursor.arraysize` to 1 to minimize memory usage. The default prefetch +* Scenario 3: If the number of rows returned by a SELECT statement varies from + execution to execution in your application but is never large, you can use a + similar strategy to Scenario 2. + + Choose :attr:`~Cursor.arraysize` and :attr:`~Cursor.prefetchrows` values that + work well for the expected maximum number of rows. + +* Scenario 4: If you know that a SELECT query returns just one row then set + :attr:`~Cursor.arraysize` to 1 to minimize memory usage. The default prefetch value of 2 allows minimal round-trips for single-row queries: .. code-block:: python - cur = connection.cursor() + cursor = connection.cursor() - cur.arraysize = 1 + cursor.arraysize = 1 - cur.execute("select * from MyTable where id = 1"): - row = cur.fetchone() + cursor.execute("select * from MyTable where id = 1"): + row = cursor.fetchone() print(row) +**Round-trips Required for Fetching Rows** + The following table shows the number of round-trips required to fetch various -numbers of rows with different ``prefetchrows`` and ``arraysize`` values. +numbers of rows with different :attr:`~Cursor.prefetchrows` and +:attr:`~Cursor.arraysize` values. .. list-table-with-summary:: Effect of ``prefetchrows`` and ``arraysize`` on the number of round-trips :header-rows: 1 @@ -223,6 +249,8 @@ numbers of rows with different ``prefetchrows`` and ``arraysize`` values. The number of round-trips will be the same regardless of which :ref:`python-oracledb method ` is used to fetch query results. +.. _defprefarray: + Application Default Prefetchrows and Arraysize Values +++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -247,10 +275,10 @@ first tuning choice. Changing Prefetchrows and Arraysize for Re-executed Statements ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -In python-oracledb, the ``arraysize`` and ``prefetchrows`` values are only -examined when a statement is executed the first time. To change the values for -a re-executed statement, create a new cursor. For example, to change -``arraysize``: +In python-oracledb, the :attr:`~Cursor.prefetchrows` and +:attr:`~Cursor.arraysize` values are only examined when a statement is executed +the first time. To change the values for a re-executed statement, create a new +cursor. For example, to change :attr:`~Cursor.arraysize``: .. code-block:: python @@ -266,12 +294,13 @@ a re-executed statement, create a new cursor. For example, to change Avoiding Premature Prefetching ++++++++++++++++++++++++++++++ -There are two cases that will benefit from setting ``prefetchrows`` to zero: +There are two cases that will benefit from setting :attr:`~Cursor.prefetchrows` +to zero: * When passing a python-oracledb cursor *into* PL/SQL. Setting - ``prefetchrows`` to 0 can stop rows being prematurely (and silently) fetched - into the python-oracledb internal buffer, making those rows unavailable to - the PL/SQL REF CURSOR parameter:: + :attr:`~Cursor.prefetchrows` to 0 can stop rows being prematurely (and + silently) fetched into the python-oracledb internal buffer, making those rows + unavailable to the PL/SQL REF CURSOR parameter:: refcursor = connection.cursor() refcursor.prefetchrows = 0 @@ -281,15 +310,16 @@ There are two cases that will benefit from setting ``prefetchrows`` to zero: * When querying a PL/SQL function that uses PIPE ROW to emit rows at intermittent intervals. By default, several rows needs to be emitted by the function before python-oracledb can return them to the application. Setting - ``prefetchrows`` to 0 helps give a consistent flow of data to the + :attr:`~Cursor.prefetchrows` to 0 helps give a consistent flow of data to the application. Tuning Fetching from REF CURSORS -------------------------------- The internal buffering and performance of fetching data from REF CURSORS can be -tuned by setting the value of ``arraysize`` before rows are fetched from the -cursor. The ``prefetchrows`` value is ignored when fetching *from* REF CURSORS. +tuned by setting the value of :attr:`~Cursor.arraysize` before rows are fetched +from the cursor. The :attr:`~Cursor.prefetchrows` value is ignored when +fetching *from* REF CURSORS. For example: @@ -304,7 +334,8 @@ For example: sum_rows += row[0] print(sum_rows) -The ``arraysize`` value can also be set before calling the procedure: +The :attr:`Cursor.arraysize`` value can also be set before calling the +procedure: .. code-block:: python @@ -315,8 +346,6 @@ The ``arraysize`` value can also be set before calling the procedure: for row in ref_cursor: . . . -.. _roundtrips: - Also see `Avoiding Premature Prefetching`_. Tuning Fetching for Data Frames @@ -334,8 +363,8 @@ Parallelizing Data Fetches from a Single Table ---------------------------------------------- Before trying to improve the performance of querying a single table by issuing -multiple SQL queries in multiple threads, where each query extracts a different -range of data, you should do careful benchmarking. +multiple SELECT queries in multiple threads, where each query extracts a +different range of data, you should do careful benchmarking. Factors that will impact such a solution: @@ -375,6 +404,8 @@ Factors that will impact such a solution: - Is it better to execute a single query in Python but use a PARALLEL query hint? Or will this overload the database. +.. _roundtrips: + Database Round-trips ==================== @@ -393,7 +424,7 @@ overall system scalability. Some general tips for reducing round-trips are: * Tune :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` for each - query. + SELECT query. * Use :meth:`Cursor.executemany()` for optimal DML execution. * Only commit when necessary. Use :attr:`Connection.autocommit` on the last statement of a transaction. @@ -607,12 +638,11 @@ Client Result Caching (CRC) Python-oracledb applications can use Oracle Database's `Client Result Cache `__. The CRC enables client-side caching of SQL query -(SELECT statement) results in client memory for immediate use when the same -query is re-executed. This is useful for reducing the cost of queries for -small, mostly static, lookup tables, such as for postal codes. CRC reduces -network :ref:`round-trips `, and also reduces database server CPU -usage. +4C2D-9075-6F639F25425E>`__. The CRC enables client-side caching of SELECT +query results in client memory for immediate use when the same query is +re-executed. This is useful for reducing the cost of queries for small, +mostly static, lookup tables, such as for postal codes. CRC reduces network +:ref:`round-trips `, and also reduces database server CPU usage. .. note:: diff --git a/doc/src/user_guide/two_phase_commit.rst b/doc/src/user_guide/two_phase_commit.rst index d9ac436e..dc703613 100644 --- a/doc/src/user_guide/two_phase_commit.rst +++ b/doc/src/user_guide/two_phase_commit.rst @@ -1,5 +1,7 @@ .. _tpc: +.. currentmodule:: oracledb + ***************************** Using Two-Phase Commits (TPC) ***************************** diff --git a/doc/src/user_guide/txn_management.rst b/doc/src/user_guide/txn_management.rst index 5826b285..3ffed179 100644 --- a/doc/src/user_guide/txn_management.rst +++ b/doc/src/user_guide/txn_management.rst @@ -1,5 +1,7 @@ .. _txnmgmnt: +.. currentmodule:: oracledb + ********************* Managing Transactions ********************* diff --git a/doc/src/user_guide/vector_data_type.rst b/doc/src/user_guide/vector_data_type.rst index 1f070c45..b64f7ce8 100644 --- a/doc/src/user_guide/vector_data_type.rst +++ b/doc/src/user_guide/vector_data_type.rst @@ -1,5 +1,7 @@ .. _vectors: +.. currentmodule:: oracledb + ***************** Using VECTOR Data ***************** diff --git a/doc/src/user_guide/xml_data_type.rst b/doc/src/user_guide/xml_data_type.rst index e95b2a4d..fdd104cc 100644 --- a/doc/src/user_guide/xml_data_type.rst +++ b/doc/src/user_guide/xml_data_type.rst @@ -1,5 +1,7 @@ .. _xmldatatype: +.. currentmodule:: oracledb + ****************** Using XMLTYPE Data ****************** diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index dacf46cf..9ec8da65 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -230,7 +230,10 @@ from .errors import _Error as _Error -from .defaults import defaults as defaults +from .defaults import ( + defaults as defaults, + Defaults as Defaults, +) from .pipeline import ( Pipeline as Pipeline, @@ -260,12 +263,29 @@ ) from .subscr import ( + Subscription as Subscription, Message as Message, MessageQuery as MessageQuery, MessageRow as MessageRow, MessageTable as MessageTable, ) +from .aq import ( + Queue as Queue, + AsyncQueue as AsyncQueue, + DeqOptions as DeqOptions, + EnqOptions as EnqOptions, + MessageProperties as MessageProperties, +) + +from .soda import ( + SodaDatabase as SodaDatabase, + SodaCollection as SodaCollection, + SodaDocument as SodaDocument, + SodaDocCursor as SodaDocCursor, + SodaOperation as SodaOperation, +) + from .connect_params import ConnectParams as ConnectParams from .pool_params import PoolParams as PoolParams @@ -275,7 +295,11 @@ AsyncLOB as AsyncLOB, ) -from .dbobject import DbObject as DbObject, DbObjectType as DbObjectType +from .dbobject import ( + DbObject as DbObject, + DbObjectAttr as DbObjectAttr, + DbObjectType as DbObjectType, +) from .fetch_info import FetchInfo as FetchInfo @@ -286,19 +310,16 @@ from .driver_mode import is_thin_mode as is_thin_mode from .utils import ( + clientversion as clientversion, enable_thin_mode as enable_thin_mode, from_arrow as from_arrow, + init_oracle_client as init_oracle_client, register_params_hook as register_params_hook, register_password_type as register_password_type, register_protocol as register_protocol, unregister_params_hook as unregister_params_hook, ) -from .thick_impl import ( - clientversion as clientversion, - init_oracle_client as init_oracle_client, -) - from .constructors import ( Binary as Binary, Date as Date, diff --git a/src/oracledb/aq.py b/src/oracledb/aq.py index d67e41e8..7bbd2564 100644 --- a/src/oracledb/aq.py +++ b/src/oracledb/aq.py @@ -32,7 +32,7 @@ import datetime from . import connection as connection_module -from typing import Any, Union, List +from typing import Any, Union from . import errors from .dbobject import DbObject, DbObjectType @@ -72,15 +72,16 @@ def _verify_message(self, message: "MessageProperties") -> None: @property def connection(self) -> "connection_module.Connection": """ - Returns the connection on which the queue was created. + This read-only attribute returns a reference to the connection object + on which the queue was created. """ return self._connection @property def deqoptions(self) -> "DeqOptions": """ - Returns the options that will be used when dequeuing messages from the - queue. + This read-only attribute returns a reference to the options that will + be used when dequeuing messages from the queue. """ return self._deq_options @@ -94,8 +95,8 @@ def deqOptions(self) -> "DeqOptions": @property def enqoptions(self) -> "EnqOptions": """ - Returns the options that will be used when enqueuing messages into the - queue. + This read-only attribute returns a reference to the options that will + be used when enqueuing messages into the queue. """ return self._enq_options @@ -109,15 +110,16 @@ def enqOptions(self) -> "EnqOptions": @property def name(self) -> str: """ - Returns the name of the queue. + This read-only attribute returns the name of the queue. """ return self._impl.name @property def payload_type(self) -> Union[DbObjectType, None]: """ - Returns the object type for payloads that can be enqueued and dequeued. - If using a raw queue, this returns the value None. + This read-only attribute returns the object type for payloads that can + be enqueued and dequeued. If using a JSON queue, this returns the value + "JSON". If using a raw queue, this returns the value *None*. """ if self._payload_type is None: if self._impl.is_json: @@ -138,7 +140,7 @@ def payloadType(self) -> Union[DbObjectType, None]: class Queue(BaseQueue): - def deqmany(self, max_num_messages: int) -> list: + def deqmany(self, max_num_messages: int) -> list["MessageProperties"]: """ Dequeues up to the specified number of messages from the queue and returns a list of these messages. @@ -154,7 +156,7 @@ def deqmany(self, max_num_messages: int) -> list: message_impls.append(message_impl) return [MessageProperties._from_impl(impl) for impl in message_impls] - def deqMany(self, max_num_messages: int) -> List["MessageProperties"]: + def deqMany(self, max_num_messages: int) -> list["MessageProperties"]: """ Deprecated: use deqmany() instead. """ @@ -175,24 +177,27 @@ def deqOne(self) -> Union["MessageProperties", None]: """ return self.deqone() - def enqmany(self, messages: list) -> None: + def enqmany(self, messages: list["MessageProperties"]) -> None: """ Enqueues multiple messages into the queue. The messages parameter must be a sequence containing message property objects which have all had their payload attribute set to a value that the queue supports. - Warning: calling this function in parallel on different connections - acquired from the same pool may fail due to Oracle bug 29928074. Ensure - that this function is not run in parallel, use standalone connections - or connections from different pools, or make multiple calls to - enqone() instead. The function Queue.deqmany() call is not affected. + Warning: In python-oracledb Thick mode using Oracle Client libraries + prior to 21c, calling :meth:`Queue.enqmany()` in parallel on different + connections acquired from the same connection pool may fail due to + Oracle bug 29928074. To avoid this, do one of: upgrade the client + libraries, ensure that :meth:`Queue.enqmany()` is not run in parallel, + use standalone connections or connections from different pools, or make + multiple calls to :meth:`Queue.enqone()`. The function + :meth:`Queue.deqmany()` call is not affected. """ for message in messages: self._verify_message(message) message_impls = [m._impl for m in messages] self._impl.enq_many(message_impls) - def enqMany(self, messages: list) -> None: + def enqMany(self, messages: list["MessageProperties"]) -> None: """ Deprecated: use enqmany() instead. """ @@ -216,7 +221,9 @@ def enqOne(self, message: "MessageProperties") -> None: class AsyncQueue(BaseQueue): - async def deqmany(self, max_num_messages: int) -> list: + async def deqmany( + self, max_num_messages: int + ) -> list["MessageProperties"]: """ Dequeues up to the specified number of messages from the queue and returns a list of these messages. @@ -233,7 +240,7 @@ async def deqone(self) -> Union["MessageProperties", None]: if message_impl is not None: return MessageProperties._from_impl(message_impl) - async def enqmany(self, messages: list) -> None: + async def enqmany(self, messages: list["MessageProperties"]) -> None: """ Enqueues multiple messages into the queue. The messages parameter must be a sequence containing message property objects which have all had @@ -270,10 +277,10 @@ def _from_impl(cls, impl): @property def condition(self) -> str: """ - Specifies a boolean expression similar to the where clause of a SQL - query. The boolean expression can include conditions on message - properties, user data properties and PL/SQL or SQL functions. The - default is to have no condition specified. + This read-write attribute specifies a boolean expression similar to the + where clause of a SQL query. The boolean expression can include + conditions on message properties, user data properties, and PL/SQL or + SQL functions. The default is to have no condition specified. """ return self._impl.get_condition() @@ -284,10 +291,10 @@ def condition(self, value: str) -> None: @property def consumername(self) -> str: """ - Specifies the name of the consumer. Only messages matching the consumer - name will be accessed. If the queue is not set up for multiple - consumers this attribute should not be set. The default is to have no - consumer name specified. + This read-write attribute specifies the name of the consumer. Only + messages matching the consumer name will be accessed. If the queue is + not set up for multiple consumers this attribute should not be set. The + default is to have no consumer name specified. """ return self._impl.get_consumer_name() @@ -298,11 +305,11 @@ def consumername(self, value: str) -> None: @property def correlation(self) -> str: """ - Specifies the correlation identifier of the message to be dequeued. - Special pattern-matching characters, such as the percent sign (%) and - the underscore (_), can be used. If multiple messages satisfy the - pattern, the order of dequeuing is indeterminate. The default is to - have no correlation specified. + This read-write attribute specifies the correlation identifier of the + message to be dequeued. Special pattern-matching characters, such as + the percent sign (%) and the underscore (_), can be used. If multiple + messages satisfy the pattern, the order of dequeuing is indeterminate. + The default is to have no correlation specified. """ return self._impl.get_correlation() @@ -311,11 +318,13 @@ def correlation(self, value: str) -> None: self._impl.set_correlation(value) @property - def deliverymode(self) -> None: + def deliverymode(self) -> int: """ - Specifies what types of messages should be dequeued. It should be one - of the values MSG_PERSISTENT (default), MSG_BUFFERED or - MSG_PERSISTENT_OR_BUFFERED. + This write-only attribute specifies what types of messages should be + dequeued. It should be one of the values + :data:`~oracledb.MSG_PERSISTENT` (default), + :data:`~oracledb.MSG_BUFFERED`, or + :data:`~oracledb.MSG_PERSISTENT_OR_BUFFERED`. """ raise AttributeError("deliverymode can only be written") @@ -326,9 +335,11 @@ def deliverymode(self, value: int) -> None: @property def mode(self) -> int: """ - Specifies the locking behaviour associated with the dequeue operation. - It should be one of the values DEQ_BROWSE, DEQ_LOCKED, DEQ_REMOVE - (default), or DEQ_REMOVE_NODATA. + This read-write attribute specifies the locking behaviour associated + with the dequeue operation. It should be one of the values + :data:`~oracledb.DEQ_BROWSE`, :data:`~oracledb.DEQ_LOCKED`, + :data:`~oracledb.DEQ_REMOVE` (default), or + :data:`~oracledb.DEQ_REMOVE_NODATA`. """ return self._impl.get_mode() @@ -339,8 +350,8 @@ def mode(self, value: int) -> None: @property def msgid(self) -> bytes: """ - Specifies the identifier of the message to be dequeued. The default is - to have no message identifier specified. + This read-write attribute specifies the identifier of the message to + be dequeued. The default is to have no message identifier specified. """ return self._impl.get_message_id() @@ -351,9 +362,10 @@ def msgid(self, value: bytes) -> None: @property def navigation(self) -> int: """ - Specifies the position of the message that is retrieved. It should be - one of the values DEQ_FIRST_MSG, DEQ_NEXT_MSG (default), or - DEQ_NEXT_TRANSACTION. + This read-write attribute specifies the position of the message that is + retrieved. It should be one of the values + :data:`~oracledb.DEQ_FIRST_MSG`, :data:`~oracledb.DEQ_NEXT_MSG` + (default), or :data:`~oracledb.DEQ_NEXT_TRANSACTION`. """ return self._impl.get_navigation() @@ -364,10 +376,11 @@ def navigation(self, value: int) -> None: @property def transformation(self) -> str: """ - Specifies the name of the transformation that must be applied after the - message is dequeued from the database but before it is returned to the - calling application. The transformation must be created using - dbms_transform. The default is to have no transformation specified. + This read-write attribute specifies the name of the transformation that + must be applied after the message is dequeued from the database but + before it is returned to the calling application. The transformation + must be created using dbms_transform. The default is to have no + transformation specified. """ return self._impl.get_transformation() @@ -378,10 +391,12 @@ def transformation(self, value: str) -> None: @property def visibility(self) -> int: """ - Specifies the transactional behavior of the dequeue request. It should - be one of the values DEQ_ON_COMMIT (default) or DEQ_IMMEDIATE. This - attribute is ignored when using the DEQ_BROWSE mode. Note the value of - autocommit is always ignored. + This read-write attribute specifies the transactional behavior of the + dequeue request. It should be one of the values + :data:`~oracledb.DEQ_ON_COMMIT` (default) or + :data:`~oracledb.DEQ_IMMEDIATE`. This attribute is ignored when using + the :data:`~oracledb.DEQ_BROWSE` mode. Note the value of + :attr:`~Connection.autocommit` is always ignored. """ return self._impl.get_visibility() @@ -392,10 +407,11 @@ def visibility(self, value: int) -> None: @property def wait(self) -> int: """ - Specifies the time to wait, in seconds, for a message matching the - search criteria to become available for dequeuing. One of the values - DEQ_NO_WAIT or DEQ_WAIT_FOREVER can also be used. The default is - DEQ_WAIT_FOREVER. + This read-write attribute specifies the time to wait, in seconds, for a + message matching the search criteria to become available for dequeuing. + One of the values :data:`~oracledb.DEQ_NO_WAIT` or + :data:`~oracledb.DEQ_WAIT_FOREVER` can also be used. The default is + :data:`~oracledb.DEQ_WAIT_FOREVER`. """ return self._impl.get_wait() @@ -414,8 +430,10 @@ def _from_impl(cls, impl): @property def deliverymode(self) -> int: """ - Specifies what type of messages should be enqueued. It should be one of - the values MSG_PERSISTENT (default) or MSG_BUFFERED. + This write-only attribute specifies what type of messages should be + enqueued. It should be one of the values + :data:`~oracledb.MSG_PERSISTENT` (default) or + :data:`~oracledb.MSG_BUFFERED`. """ raise AttributeError("deliverymode can only be written") @@ -426,10 +444,10 @@ def deliverymode(self, value: int) -> None: @property def transformation(self) -> str: """ - Specifies the name of the transformation that must be applied before - the message is enqueued into the database. The transformation must be - created using dbms_transform. The default is to have no transformation - specified. + This read-write attribute specifies the name of the transformation that + must be applied before the message is enqueued into the database. The + transformation must be created using dbms_transform. The default is to + have no transformation specified. """ return self._impl.get_transformation() @@ -440,9 +458,11 @@ def transformation(self, value: str) -> None: @property def visibility(self) -> int: """ - Specifies the transactional behavior of the enqueue request. It should - be one of the values ENQ_ON_COMMIT (default) or ENQ_IMMEDIATE. Note the - value of autocommit is ignored. + This read-write attribute specifies the transactional behavior of the + enqueue request. It should be one of the values + :data:`~oracledb.ENQ_ON_COMMIT` (default) or + :data:`~oracledb.ENQ_IMMEDIATE`. Note the value of + :attr:`~Connection.autocommit` is ignored. """ return self._impl.get_visibility() @@ -463,15 +483,16 @@ def _from_impl(cls, impl): @property def attempts(self) -> int: """ - Specifies the number of attempts that have been made to dequeue the - message. + This read-only attribute specifies the number of attempts that have + been made to dequeue the message. """ return self._impl.get_num_attempts() @property def correlation(self) -> str: """ - Specifies the correlation used when the message was enqueued. + This read-write attribute specifies the correlation used when the + message was enqueued. """ return self._impl.get_correlation() @@ -482,9 +503,10 @@ def correlation(self, value: str) -> None: @property def delay(self) -> int: """ - Specifies the number of seconds to delay an enqueued message. Any - integer is acceptable but the constant MSG_NO_DELAY can also be used - indicating that the message is available for immediate dequeuing. + This read-write attribute specifies the number of seconds to delay an + enqueued message. Any integer is acceptable but the constant + :data:`~oracledb.MSG_NO_DELAY` can also be used indicating that the + message is available for immediate dequeuing. """ return self._impl.get_delay() @@ -495,27 +517,31 @@ def delay(self, value: int) -> None: @property def deliverymode(self) -> int: """ - Specifies the type of message that was dequeued. It will be one of the - values MSG_PERSISTENT or MSG_BUFFERED. + This read-only attribute specifies the type of message that was + dequeued. It will be one of the values + :data:`~oracledb.MSG_PERSISTENT` or + :data:`~oracledb.MSG_BUFFERED`. """ return self._impl.get_delivery_mode() @property def enqtime(self) -> datetime.datetime: """ - Specifies the time that the message was enqueued. + This read-only attribute specifies the time that the message was + enqueued. """ return self._impl.get_enq_time() @property def exceptionq(self) -> str: """ - Specifies the name of the queue to which the message is moved if it - cannot be processed successfully. Messages are moved if the number of - unsuccessful dequeue attempts has exceeded the maximum number of - retries or if the message has expired. All messages in the exception - queue are in the MSG_EXPIRED state. The default value is the name of - the exception queue associated with the queue table. + This read-write attribute specifies the name of the queue to which the + message is moved if it cannot be processed successfully. Messages are + moved if the number of unsuccessful dequeue attempts has exceeded the + maximum number of retries or if the message has expired. All messages + in the exception queue are in the :data:`~oracledb.MSG_EXPIRED` state. + The default value is the name of the exception queue associated with + the queue table. """ return self._impl.get_exception_queue() @@ -526,11 +552,12 @@ def exceptionq(self, value: str) -> None: @property def expiration(self) -> int: """ - Specifies, in seconds, how long the message is available for dequeuing. - This attribute is an offset from the delay attribute. Expiration - processing requires the queue monitor to be running. Any integer is - accepted but the constant MSG_NO_EXPIRATION can also be used indicating - that the message never expires. + This read-write attribute specifies, in seconds, how long the message + is available for dequeuing. This attribute is an offset from the delay + attribute. Expiration processing requires the queue monitor to be + running. Any integer is accepted but the constant + :data:`~oracledb.MSG_NO_EXPIRATION` can also be used indicating that + the message never expires. """ return self._impl.get_expiration() @@ -541,20 +568,21 @@ def expiration(self, value: int) -> None: @property def msgid(self) -> bytes: """ - Specifies the id of the message in the last queue that enqueued or - dequeued this message. If the message has never been dequeued or - enqueued, the value will be `None`. + This read-only attribute specifies the id of the message in the last + queue that enqueued or dequeued this message. If the message has never + been dequeued or enqueued, the value will be `None`. """ return self._impl.get_message_id() @property def payload(self) -> Union[bytes, DbObject]: """ - Specifies the payload that will be enqueued or the payload that was - dequeued when using a queue. When enqueuing, the value is checked to - ensure that it conforms to the type expected by that queue. For RAW - queues, the value can be a bytes object or a string. If the value is a - string it will be converted to bytes in the encoding UTF-8. + This read-write attribute specifies the payload that will be enqueued + or the payload that was dequeued when using a queue. When enqueuing, + the value is checked to ensure that it conforms to the type expected + by that queue. For RAW queues, the value can be a bytes object or a + string. If the value is a string it will be converted to bytes in the + encoding UTF-8. """ return self._impl.payload @@ -575,9 +603,9 @@ def payload(self, value: Any) -> None: @property def priority(self) -> int: """ - Specifies the priority of the message. A smaller number indicates a - higher priority. The priority can be any integer, including negative - numbers. The default value is zero. + This read-write attribute specifies the priority of the message. A + smaller number indicates a higher priority. The priority can be any + integer, including negative numbers. The default value is 0. """ return self._impl.get_priority() @@ -586,13 +614,14 @@ def priority(self, value: int) -> None: self._impl.set_priority(value) @property - def recipients(self) -> list: + def recipients(self) -> list[str]: """ - A list of recipient names can be associated with a message at the time - a message is enqueued. This allows a limited set of recipients to - dequeue each message. The recipient list associated with the message - overrides the queue subscriber list, if there is one. The recipient - names need not be in the subscriber list but can be, if desired. + This read-write attribute specifies a list of recipient names that can + be associated with a message at the time a message is enqueued. This + allows a limited set of recipients to dequeue each message. The + recipient list associated with the message overrides the queue + subscriber list, if there is one. The recipient names need not be in + the subscriber list but can be, if desired. To dequeue a message, the consumername attribute can be set to one of the recipient names. The original message recipient list is not @@ -615,8 +644,9 @@ def recipients(self, value: list) -> None: @property def state(self) -> int: """ - Specifies the state of the message at the time of the dequeue. It will - be one of the values MSG_WAITING, MSG_READY, MSG_PROCESSED or - MSG_EXPIRED. + This read-only attribute specifies the state of the message at the time + of the dequeue. It will be one of the values + :data:`~oracledb.MSG_WAITING`, :data:`~oracledb.MSG_READY`, + :data:`~oracledb.MSG_PROCESSED`, or :data:`~oracledb.MSG_EXPIRED`. """ return self._impl.get_state() diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index c4b4db90..e0bfde82 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -239,8 +239,7 @@ def __init__( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string. This value is only used in thick mode - (default: None) + tuple should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick @@ -398,7 +397,7 @@ def appcontext(self) -> list: """ Application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be - a string. This value is only used in thick mode. + a string. """ return self._impl.appcontext @@ -832,7 +831,7 @@ def wallet_location(self) -> Union[list, str]: def copy(self) -> "ConnectParams": """ - Creates a copy of the parameters and returns it. + Creates a copy of the ConnectParams instance and returns it. """ params = ConnectParams.__new__(ConnectParams) params._impl = self._impl.copy() @@ -840,36 +839,38 @@ def copy(self) -> "ConnectParams": def get_connect_string(self) -> str: """ - Returns a connect string generated from the parameters. + Returns the connection string associated with the instance. """ return self._impl.get_connect_string() def get_network_service_names(self) -> list: """ - Returns a list of the network service names found in the tnsnames.ora - file found in the configuration directory associated with the - parameters. If no such file exists, an error is raised. + Returns a list of the network service names found in the + :ref:`tnsnames.ora ` file which is inside the directory + that can be identified by the attribute + :attr:`~ConnectParams.config_dir`. If a tnsnames.ora file does not + exist, then an exception is raised. """ return self._impl.get_network_service_names() def parse_connect_string(self, connect_string: str) -> None: """ Parses the connect string into its components and stores the - parameters. The connect string could be an Easy Connect string, - name-value pairs or a simple alias which is looked up in tnsnames.ora. - Any parameters found in the connect string override any currently - stored values. + parameters. + + The ``connect string`` parameter can be an Easy Connect string, + name-value pairs, or a simple alias which is looked up in + ``tnsnames.ora``. Parameters that are found in the connect string + override any currently stored values. """ self._impl.parse_connect_string(connect_string) def parse_dsn_with_credentials(self, dsn: str) -> tuple: """ - Parses a dsn in the form /@ or in the + Parses a DSN in the form /@ or in the form / and returns a 3-tuple containing the parsed user, password and connect string. Empty strings are returned as the - value None. This is done automatically when a value is passed to - the dsn parameter but no value is passed to the user password when - creating a standalone connection or connection pool. + value *None*. """ return self._impl.parse_dsn_with_credentials(dsn) @@ -932,8 +933,9 @@ def set( handle: Optional[int] = None, ): """ - All parameters are optional. A brief description of each parameter - follows: + Sets the values for one or more of the parameters of a ConnectParams + object. All parameters are optional. A brief description of each + parameter follows: - user: the name of the user to connect to @@ -1052,7 +1054,7 @@ def set( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string. This value is only used in thick mode + tuple should be a string - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick @@ -1135,24 +1137,27 @@ def set( def set_from_config(self, config: dict) -> None: """ - Sets the property values based on the supplied configuration. The - configuration consists of a dictionary with the following keys, all of - which are optional: "connect_descriptor", "user", "password" and "pyo". + Sets the property values based on the specified configuration. This + method is intended for use with Centralized Configuration Providers. + + The ``config`` parameter is a dictionary which consists of the + following optional keys: "connect_descriptor", "user", "password", and + "pyo". - If the "connect_descriptor" key is supplied, it is expected to be a - string, which will be parsed and the properties found within it stored - in the parameters. + If the key "connect_descriptor" is specified, it is expected to be a + string, which will be parsed and the properties found within it are + stored in the ConnectParams instance. - If the "user" or "password" keys are supplied, and the parameters do - not already have a user or password, these values will be stored; - otherwise, they will be ignored. The "user" key is expected to be a - string. The "password" key may be a string or it may be a dictionary - containing the keys "type" and "value" which will be used to determine - the actual password. + If the keys "user" or "password" are specified, and the parameters do + not already have a user or password set, these values will be stored; + otherwise, they will be ignored. The key "user" is expected to be a + string. The "key" password may be a string or it may be a dictionary + which will be examined by a :ref:`registered password type handler + ` to determine the actual password. - If the "pyo" key is supplied, it is expected to be a dictionary + If the key "pyo" is specified, it is expected to be a dictionary containing keys corresponding to property names. Any property names - accepted by the parameters will be stored; all other values will be - ignored. + accepted by the ConnectParams class will be stored in the ConnectParams + instance; all other values will be ignored. """ self._impl.set_from_config(config) diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index a9da0ebd..d1c0d1c3 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -51,7 +51,7 @@ from .dataframe import DataFrame from .dbobject import DbObjectType, DbObject from .lob import AsyncLOB, LOB -from .pipeline import Pipeline +from .pipeline import Pipeline, PipelineOpResult from .soda import SodaDatabase from .subscr import Subscription from .utils import normalize_sessionless_transaction_id @@ -491,7 +491,7 @@ def msgproperties( ) -> MessageProperties: """ Returns an object specifying the properties of messages used in - advanced queuing. See :ref:`msgproperties` for more information. + advanced queuing. Each of the parameters are optional. If specified, they act as a shortcut for setting each of the equivalently named properties. @@ -520,10 +520,10 @@ def queue( payload_type: Optional[Union[DbObjectType, str]] = None, *, payloadType: Optional[DbObjectType] = None, - ) -> Queue: + ) -> Union[Queue, AsyncQueue]: """ - Creates a :ref:`queue ` which is used to enqueue and dequeue - messages in Advanced Queuing. + Creates a queue which is used to enqueue and dequeue messages in + Advanced Queuing. The ``name`` parameter is expected to be a string identifying the queue in which messages are to be enqueued or dequeued. @@ -851,27 +851,6 @@ def __init__( ) -> None: """ Constructor for creating a connection to the database. - - The dsn parameter (data source name) can be a string in the format - user/password@connect_string or can simply be the connect string (in - which case authentication credentials such as the username and password - need to be specified separately). See the documentation on connection - strings for more information. - - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling acquire() on the pool. - - The params parameter is expected to be of type ConnectParams and - contains connection parameters that will be used when establishing the - connection. See the documentation on ConnectParams for more - information. If this parameter is not specified, the additional keyword - parameters will be used to create an instance of ConnectParams. If both - the params parameter and additional keyword parameters are specified, - the values in the keyword parameters have precedence. Note that if a - dsn is also supplied, then in the python-oracledb Thin mode, the values - of the parameters specified (if any) within the dsn will override the - values passed as additional keyword parameters, which themselves - override the values set in the params parameter object. """ super().__init__() @@ -1872,7 +1851,7 @@ def connect( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string. This value is only used in thick mode (default: None) + should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -1967,8 +1946,7 @@ def __init__( kwargs: dict, ) -> None: """ - Constructor for asynchronous connection pool. Not intended to be used - directly but only indirectly through async_connect(). + Constructor for creating an asynchronous connection to the database. """ super().__init__() self._pool = pool @@ -2492,12 +2470,11 @@ async def run_pipeline( self, pipeline: Pipeline, continue_on_error: bool = False, - ) -> list: + ) -> list[PipelineOpResult]: """ - Runs all of the operations in the :ref:`pipeline ` and - returns a list of :ref:`PipelineOpResult Objects - `, each entry corresponding to an operation - executed in the pipeline. + Runs all of the operations in the pipeline and returns a list of + PipelineOpResult, each entry corresponding to an operation executed in + the pipeline. The ``continue_on_error`` parameter determines whether operations should continue to run after an error has occurred. If this parameter @@ -2976,7 +2953,7 @@ def connect_async( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string. This value is only used in thick mode (default: None) + should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode diff --git a/src/oracledb/constructors.py b/src/oracledb/constructors.py index bad890b9..b2759a87 100644 --- a/src/oracledb/constructors.py +++ b/src/oracledb/constructors.py @@ -29,22 +29,23 @@ # ----------------------------------------------------------------------------- import datetime +from typing import Any from . import errors -# synonyms for the types mandated by the database API -Binary = bytes -Date = datetime.date -Timestamp = datetime.datetime +def Binary(value: Any) -> bytes: + """ + Constructs an object holding a binary (long) string value. + """ + return bytes(value) -def Time(hour: int, minute: int, second: int) -> None: + +def Date(year: int, month: int, day: int) -> datetime.date: """ - Constructor mandated by the database API for creating a time value. Since - Oracle doesn't support time only values, an exception is raised when this - method is called. + Constructs an object holding a date value. """ - errors._raise_err(errors.ERR_TIME_NOT_SUPPORTED) + return datetime.date(year, month, day) def DateFromTicks(ticks: float) -> datetime.date: @@ -56,6 +57,15 @@ def DateFromTicks(ticks: float) -> datetime.date: return datetime.date.fromtimestamp(ticks) +def Time(hour: int, minute: int, second: int) -> None: + """ + Constructor mandated by the database API for creating a time value. Since + Oracle doesn't support time only values, an exception is raised when this + method is called. + """ + errors._raise_err(errors.ERR_TIME_NOT_SUPPORTED) + + def TimeFromTicks(ticks: float) -> None: """ Constructor mandated by the database API for creating a time value given @@ -66,6 +76,20 @@ def TimeFromTicks(ticks: float) -> None: errors._raise_err(errors.ERR_TIME_NOT_SUPPORTED) +def Timestamp( + year: int, + month: int, + day: int, + hour: int = 0, + minute: int = 0, + second: int = 0, +) -> datetime.datetime: + """ + Constructs an object holding a time stamp value. + """ + return datetime.datetime(year, month, day, hour, minute, second) + + def TimestampFromTicks(ticks: float) -> datetime.datetime: """ Constructor mandated by the database API for creating a timestamp value diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index e0eea0f0..3b4008d9 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -260,21 +260,12 @@ def close(self) -> None: self._impl = None @property - def connection(self) -> "connection_module.Connection": - """ - This read-only attribute returns a reference to the connection object - on which the cursor was created. - """ - return self._connection - - @property - def description(self) -> FetchInfo: + def description(self) -> Union[list[FetchInfo], None]: """ This read-only attribute contains information about the columns used in - a query. It is a sequence of :ref:`FetchInfo ` objects, - one per column. This attribute will be *None* for statements that are - not SELECT or WITH statements, or if the cursor has not had - :meth:`execute()` invoked yet. + a query. It is a list of FetchInfo objects, one per column. This + attribute will be *None* for statements that are not SELECT or WITH + statements, or if the cursor has not had :meth:`execute()` invoked yet. """ self._verify_open() if self._impl.is_query(self): @@ -761,6 +752,14 @@ def callproc( v.get_value(0) for v in self._impl.bind_vars[: len(parameters)] ] + @property + def connection(self) -> "connection_module.Connection": + """ + This read-only attribute returns a reference to the connection object + on which the cursor was created. + """ + return self._connection + def execute( self, statement: Optional[str], @@ -1076,6 +1075,14 @@ async def callproc( v.get_value(0) for v in self._impl.bind_vars[: len(parameters)] ] + @property + def connection(self) -> "connection_module.AsyncConnection": + """ + This read-only attribute returns a reference to the connection object + on which the cursor was created. + """ + return self._connection + async def execute( self, statement: Optional[str], diff --git a/src/oracledb/dataframe.py b/src/oracledb/dataframe.py index c263073c..aacc766c 100644 --- a/src/oracledb/dataframe.py +++ b/src/oracledb/dataframe.py @@ -29,8 +29,6 @@ # array data to other data frame libraries. # ----------------------------------------------------------------------------- -from typing import List - from .arrow_array import ArrowArray from .arrow_impl import DataFrameImpl from . import errors @@ -73,23 +71,24 @@ def __arrow_c_stream__(self, requested_schema=None): raise NotImplementedError("requested_schema") return self._impl.get_stream_capsule() - def column_arrays(self) -> List: + def column_arrays(self) -> list[ArrowArray]: """ - Returns a list of the Arrow arrays corresponding to each column in the - data frame. + Returns a list of ArrowArray objects, each containing a select list + column. """ return self._arrays - def column_names(self) -> List[str]: + def column_names(self) -> list[str]: """ - Returns a list of the names of the columns in the data frame. + Returns a list of the column names in the data frame. """ return [a.name for a in self._arrays] def get_column(self, i: int) -> ArrowArray: """ - Returns a column from the data frame given its zero-based index. If the - index is out of range, an IndexError exception is raised. + Returns an :ref:`ArrowArray ` object for the + column at the given index ``i``. If the index is out of range, an + IndexError exception is raised. """ if i < 0 or i >= self.num_columns(): raise IndexError( @@ -100,8 +99,9 @@ def get_column(self, i: int) -> ArrowArray: def get_column_by_name(self, name: str) -> ArrowArray: """ - Returns a column from the data frame given the name of the column. If - the column name is not found, a KeyError exception is raised. + Returns an :ref:`ArrowArray ` object for the + column with the given name ``name``. If the column name is not found, + a KeyError exception is raised. """ try: return self._arrays_by_name[name] diff --git a/src/oracledb/dbobject.py b/src/oracledb/dbobject.py index 336d6f10..da571c80 100644 --- a/src/oracledb/dbobject.py +++ b/src/oracledb/dbobject.py @@ -86,17 +86,17 @@ def _from_impl(cls, impl): def append(self, element: Any) -> None: """ - Append an element to the collection object. If no elements exist in the - collection, this creates an element at index 0; otherwise, it creates - an element immediately following the highest index available in the - collection. + Appends an element to the collection object. If no elements exist in + the collection, this creates an element at index 0; otherwise, it + creates an element immediately following the highest index available in + the collection. """ self._impl.append(element) def asdict(self) -> dict: """ - Return a dictionary where the collection’s indexes are the keys and the - elements are its values. + Returns a dictionary where the collection’s indexes are the keys and + the elements are its values. """ self._ensure_is_collection() result = {} @@ -108,13 +108,13 @@ def asdict(self) -> dict: def aslist(self) -> list: """ - Return a list of each of the collection’s elements in index order. + Returns a list of each of the collection’s elements in index order. """ return list(self) def copy(self) -> "DbObject": """ - Create a copy of the object and return it. + Creates a copy of the object and returns it. """ copied_impl = self._impl.copy() return DbObject._from_impl(copied_impl) @@ -140,7 +140,7 @@ def exists(self, index: int) -> bool: def extend(self, seq: list) -> None: """ - Append all of the elements in the sequence to the collection. This is + Appends all of the elements in the sequence to the collection. This is the equivalent of performing append() for each element found in the sequence. """ @@ -150,7 +150,7 @@ def extend(self, seq: list) -> None: def first(self) -> int: """ - Return the index of the first element in the collection. If the + Returns the index of the first element in the collection. If the collection is empty, None is returned. """ self._ensure_is_collection() @@ -158,7 +158,7 @@ def first(self) -> int: def getelement(self, index: int) -> Any: """ - Return the element at the specified index of the collection. If no + Returns the element at the specified index of the collection. If no element exists at that index, an exception is raised. """ self._ensure_is_collection() @@ -166,7 +166,7 @@ def getelement(self, index: int) -> Any: def last(self) -> int: """ - Return the index of the last element in the collection. If the + Returns the index of the last element in the collection. If the collection is empty, None is returned. """ self._ensure_is_collection() @@ -174,7 +174,7 @@ def last(self) -> int: def next(self, index: int) -> int: """ - Return the index of the next element in the collection following the + Returns the index of the next element in the collection following the specified index. If there are no elements in the collection following the specified index, None is returned. """ @@ -183,7 +183,7 @@ def next(self, index: int) -> int: def prev(self, index: int) -> int: """ - Return the index of the element in the collection preceding the + Returns the index of the element in the collection preceding the specified index. If there are no elements in the collection preceding the specified index, None is returned. """ @@ -192,7 +192,7 @@ def prev(self, index: int) -> int: def setelement(self, index: int, value: Any) -> None: """ - Set the value in the collection at the specified index to the given + Sets the value in the collection at the specified index to the given value. """ self._ensure_is_collection() @@ -200,14 +200,15 @@ def setelement(self, index: int, value: Any) -> None: def size(self) -> int: """ - Return the number of elements in the collection. + Returns the number of elements in the collection. """ self._ensure_is_collection() return self._impl.get_size() def trim(self, num: int) -> None: """ - Remove the specified number of elements from the end of the collection. + Removes the specified number of elements from the end of the + collection. """ self._ensure_is_collection() self._impl.trim(num) @@ -215,7 +216,8 @@ def trim(self, num: int) -> None: @property def type(self) -> "DbObjectType": """ - Returns an ObjectType corresponding to the type of the object. + This read-only attribute arturns an ObjectType corresponding to the + type of the object. """ if self._type is None: self._type = DbObjectType._from_impl(self._impl.type) @@ -238,9 +240,10 @@ def _from_impl(cls, impl): @property def max_size(self) -> Union[int, None]: """ - Returns the max size of the attribute (in bytes) for attributes of type + This read-only attribute returns the maximum size (in bytes) of the + attribute when the attribute's type is one of DB_TYPE_RAW, DB_TYPE_CHAR, DB_TYPE_NCHAR, DB_TYPE_VARCHAR and - DB_TYPE_NVARCHAR. + DB_TYPE_NVARCHAR. For all other types, the value returned is None. """ if self._impl.max_size: return self._impl.max_size @@ -255,7 +258,9 @@ def name(self) -> str: @property def precision(self) -> Union[int, None]: """ - Returns the precision of the attribute. + This read-only attribute returns the precision of the attribute when + the attribute's type is DB_TYPE_NUMBER. For all other types, the value + returned is None. """ if self._impl.precision or self._impl.scale: return self._impl.precision @@ -263,7 +268,9 @@ def precision(self) -> Union[int, None]: @property def scale(self) -> Union[int, None]: """ - Returns the scale of the column. + This read-only attribute returns the scale of the attribute when the + attribute's type is DB_TYPE_NUMBER. For all other types, the value + returned is None. """ if self._impl.precision or self._impl.scale: return self._impl.scale @@ -286,7 +293,11 @@ def type(self) -> Union["DbObjectType", DbType]: class DbObjectType: __module__ = MODULE_NAME - def __call__(self, value=None): + def __call__(self, value: Sequence = None) -> DbObject: + """ + The object type may be called directly and serves as an alternative way + of calling :meth:`~DbObjectType.newobject()`. + """ return self.newobject(value) def __eq__(self, other): @@ -312,7 +323,7 @@ def _get_full_name(self): return self._impl._get_fqn() @property - def attributes(self) -> list: + def attributes(self) -> list["DbObjectAttr"]: """ This read-only attribute returns a list of the attributes that make up the object type. @@ -356,7 +367,7 @@ def element_type(self) -> Union["DbObjectType", DbType]: def newobject(self, value: Sequence = None) -> DbObject: """ - Return a new Oracle object of the given type. This object can then be + Returns a new Oracle object of the given type. This object can then be modified by setting its attributes and then bound to a cursor for interaction with Oracle. If the object type refers to a collection, a sequence may be passed and the collection will be initialized with the diff --git a/src/oracledb/defaults.py b/src/oracledb/defaults.py index 614dd484..be0e85f5 100644 --- a/src/oracledb/defaults.py +++ b/src/oracledb/defaults.py @@ -47,7 +47,13 @@ def __init__(self) -> None: @property def arraysize(self) -> int: """ - Specifies the default arraysize to use when cursors are created. + This read-write attribute specifies the default arraysize to use when + cursors are created. + + This value is the default for :attr:`Cursor.arraysize` and + :attr:`AsyncCursor.arraysize`. + + This attribute has an initial value of *100*. """ return self._impl.arraysize @@ -58,7 +64,12 @@ def arraysize(self, value: int): @property def config_dir(self) -> str: """ - Specifies the directory to search for tnsnames.ora. + This read-write attribute specifies the directory in which the optional + configuration file ``tnsnames.ora`` will be read in python-oracledb + Thin mode. + + This attribute is used in python-oracledb Thin mode. It is also used in + Thick mode if :attr:`defaults.thick_mode_dsn_passthrough` is *False*. """ return self._impl.config_dir @@ -69,8 +80,19 @@ def config_dir(self, value: str): @property def fetch_lobs(self) -> bool: """ - Specifies whether queries that contain LOBs should return LOB objects - or their contents instead. + This read-write attribute specifies whether queries that contain LOBs + should return LOB objects or their contents instead. + + When the value of this attribute is *True*, then queries to LOB columns + return LOB locators. When the value of this attribute is *False*, then + CLOBs and NCLOBs are fetched as strings, and BLOBs are fetched as + bytes. If LOBs are larger than 1 GB, then this attribute should be set + to *True* and the LOBs should be streamed. + + The value of ``oracledb.defaults.fetch_lobs`` does not affect LOBs + returned as OUT binds. + + This attribute has an initial value of *True*. """ return self._impl.fetch_lobs @@ -81,8 +103,13 @@ def fetch_lobs(self, value: bool): @property def fetch_decimals(self) -> bool: """ - Specifies whether queries that contain numbers should return - decimal.Decimal objects or floating point numbers. + This read-write attribute specifies whether queries that contain + numbers should be fetched as `decimal.Decimal `__ objects or floating point + numbers. This can help avoid issues with converting numbers from Oracle + Database's decimal format to Python's binary format. + + This attribute has an initial value of *False*. """ return self._impl.fetch_decimals @@ -93,8 +120,18 @@ def fetch_decimals(self, value: bool): @property def prefetchrows(self) -> int: """ - Specifies the default number of rows to prefetch when cursors are - executed. + This read-write attribute specifies the default number of rows to + prefetch when cursors are executed. + + This value is the default for :attr:`Cursor.prefetchrows` and + :attr:`AsyncCursor.prefetchrows`. + + This attribute is ignored when using :meth:`Connection.fetch_df_all()` + or :meth:`Connection.fetch_df_batches()` since these methods always set + the internal prefetch size to their relevant ``arraysize`` or ``size`` + parameter value. + + This attribute has an initial value of *2*. """ return self._impl.prefetchrows @@ -105,7 +142,15 @@ def prefetchrows(self, value: int): @property def stmtcachesize(self) -> int: """ - Specifies the default size of the statement cache. + This read-write attribute specifies the default size of the statement + cache. + + This value is the default for :attr:`Connection.stmtcachesize`, + :attr:`ConnectionPool.stmtcachesize`, + :attr:`AsyncConnection.stmtcachesize`, and + :attr:`AsyncConnectionPool.stmtcachesize`. + + This attribute has an initial value of *20*. """ return self._impl.stmtcachesize @@ -116,7 +161,15 @@ def stmtcachesize(self, value: int): @property def program(self) -> str: """ - Specifies the program name connected to the Oracle Database. + This read-write attribute specifies the program name connected to + Oracle Database. This is the value used in the PROGRAM column of the + V$SESSION view. + + This attribute has an initial value that is populated by + `sys.executable `__. + + This attribute is only used in python-oracledb Thin mode. """ return self._impl.program @@ -129,7 +182,14 @@ def program(self, value: str): @property def machine(self) -> str: """ - Specifies the machine name connected to the Oracle Database. + This read-write attribute specifies the machine name of the client + connecting to Oracle Database. This is the value used in the MACHINE + column of the V$SESSION view. + + This attribute takes the host name where the application is running as + its initial value. + + This attribute is only used in python-oracledb Thin mode. """ return self._impl.machine @@ -142,7 +202,13 @@ def machine(self, value: str): @property def terminal(self) -> str: """ - Specifies the terminal identifier from which the connection originates. + This read-write attribute specifies the terminal identifier from which + the connection originates. This is the value used in the TERMINAL + column of the V$SESSION view. + + This attribute has an initial value of *unknown*. + + This attribute is only used in python-oracledb Thin mode. """ return self._impl.terminal @@ -153,8 +219,13 @@ def terminal(self, value: str): @property def osuser(self) -> str: """ - Specifies the os user that initiates the connection to the - Oracle Database. + This read-write attribute specifies the operating system user that + initiates the database connection. This is the value used in the OSUSER + column of the V$SESSION view. + + This attribute takes the login name of the user as its initial value. + + This attribute is only used in python-oracledb Thin mode. """ return self._impl.osuser @@ -167,7 +238,23 @@ def osuser(self, value: str): @property def driver_name(self) -> str: """ - Specifies the driver used for the connection. + This read-write attribute specifies the driver used by the client to + connect to Oracle Database. This is the value used in the CLIENT_DRIVER + column of the V$SESSION_CONNECT_INFO view. + + This attribute has an initial value of *None*. It is used as required + in python-oracledb Thick and Thin mode. + + In python-oracledb Thick mode, this attribute is used if the + ``driver_name`` parameter is not specified in + :meth:`oracledb.init_oracle_client()`. In Thin mode, this attribute is + used if the ``driver_name`` parameter is not specified in + :meth:`oracledb.connect()`, :meth:`oracledb.connect_async()`, + :meth:`oracledb.create_pool()`, or + :meth:`oracledb.create_pool_async()`. If the value of this attribute is + *None*, the value set when connecting in python-oracledb Thick mode is + like "python-oracledb thk : " and in Thin mode is like + "python-oracledb thn : ". """ return self._impl.driver_name @@ -178,8 +265,19 @@ def driver_name(self, value: str): @property def thick_mode_dsn_passthrough(self) -> str: """ - Specifies whether to pass connect strings to the Oracle Client - libraries unchanged when using thick mode. + This read-write attribute determines whether + :ref:`connection strings ` passed as the ``dsn`` parameter to + :meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, + :meth:`oracledb.connect_async()`, and + :meth:`oracledb.create_pool_async()` in python-oracledb Thick mode will + be parsed by Oracle Client libraries or by python-oracledb itself. + + The value of ``thick_mode_dsn_passthrough`` is ignored in + python-oracledb Thin mode, which always parses all connect strings + (including reading a :ref:`tnsnames.ora ` file, if + required). + + This attribute has an initial value of *True*. """ return self._impl.thick_mode_dsn_passthrough diff --git a/src/oracledb/driver_mode.py b/src/oracledb/driver_mode.py index 42594a9f..e1abd501 100644 --- a/src/oracledb/driver_mode.py +++ b/src/oracledb/driver_mode.py @@ -119,18 +119,21 @@ def get_manager(requested_thin_mode=None): def is_thin_mode() -> bool: """ - Return a boolean specifying whether the driver is using thin mode (True) or - thick mode (False). + Returns a boolean indicating if python-oracledb is in Thin mode. Immediately after python-oracledb is imported, this function will return - True indicating that python-oracledb defaults to Thin mode. If - oracledb.init_oracle_client() is called successfully, then a subsequent - call to is_thin_mode() will return False indicating that Thick mode is - enabled. Once the first standalone connection or connection pool is - created successfully, or a call to oracledb.init_oracle_client() is made - successfully, then python-oracledb's mode is fixed and the value returned - by is_thin_mode() will never change for the lifetime of the process. - + *True* indicating that python-oracledb defaults to Thin mode. If a call to + :func:`oracledb.init_oracle_client()` returns successfully, then a + subsequent call to ``is_thin_mode()`` will return False indicating that + Thick mode is enabled. Once the first standalone connection or connection + pool is created, or a successful call to ``oracledb.init_oracle_client()`` + is made, or :meth:`oracledb.enable_thin_mode()` is called, then + python-oracledb’s mode is fixed and the value returned by + ``is_thin_mode()`` will never change for the lifetime of the process. + + The attribute :attr:`Connection.thin` can be used to check a connection's + mode. The attribute :attr:`ConnectionPool.thin` can be used to check a + pool's mode. """ if manager.thin_mode is not None: return manager.thin_mode diff --git a/src/oracledb/dsn.py b/src/oracledb/dsn.py index 7404307b..81f48a7e 100644 --- a/src/oracledb/dsn.py +++ b/src/oracledb/dsn.py @@ -52,8 +52,9 @@ def makedsn( super_sharding_key: str = None, ) -> str: """ - Return a string suitable for use as the dsn parameter for connect(). This - string is identical to the strings that are defined in the tnsnames.ora + Returns a string suitable for use as the ``dsn`` parameter for + :meth:`~oracledb.connect()`. This string is identical to the strings that + are defined by the Oracle names server or defined in the ``tnsnames.ora`` file. """ connect_data_parts = [] diff --git a/src/oracledb/fetch_info.py b/src/oracledb/fetch_info.py index c693267b..94f106d8 100644 --- a/src/oracledb/fetch_info.py +++ b/src/oracledb/fetch_info.py @@ -104,15 +104,19 @@ def _from_impl(cls, impl): @property def annotations(self) -> Union[dict, None]: """ - Returns a dictionary of the annotations associated with the column, if - applicable. + This read-only attribute returns a dictionary containing the + `annotations `__ associated with the + fetched column. If there are no annotations, the value *None* is + returned. Annotations require Oracle Database 23ai. If using + python-oracledb Thick mode, Oracle Client 23ai is also required. """ return self._impl.annotations @property def display_size(self) -> Union[int, None]: """ - Returns the display size of the column. + This read-only attribute returns the display size of the column. """ if self._impl.max_size > 0: return self._impl.max_size @@ -141,22 +145,36 @@ def display_size(self) -> Union[int, None]: @property def domain_name(self) -> Union[str, None]: """ - Returns the name of the domain, if applicable. + This read-only attribute returns the name of the `data use case domain + `__ associated with the fetched column. If + there is no data use case domain, the value *None* is returned. `Data + use case domains `__ require Oracle + Database 23ai. If using python-oracledb Thick mode, Oracle Client 23ai + is also required """ return self._impl.domain_name @property def domain_schema(self) -> Union[str, None]: """ - Returns the name of the schema in which the domain is found, if - applicable. + This read-only attribute returns the schema of the `data use case + domain `__ associated with the fetched + column. If there is no data use case domain, the value *None* is + returned. `Data use case domains `__ + require Oracle Database 23ai. If using python-oracledb Thick mode, + Oracle Client 23ai is also required. """ return self._impl.domain_schema @property def internal_size(self) -> Union[int, None]: """ - Returns the size in bytes of the column. + This read-only attribute returns the internal size of the column as + mandated by the Python Database API. """ if self._impl.max_size > 0: return self._impl.buffer_size @@ -164,35 +182,45 @@ def internal_size(self) -> Union[int, None]: @property def is_json(self) -> bool: """ - Returns whether the column contains JSON. + This read-only attribute returns whether the column is known to contain + JSON data. This will be *True* when the type code is + :data:`oracledb.DB_TYPE_JSON` as well as when an "IS JSON" constraint + is enabled on LOB and VARCHAR2 columns. """ return self._impl.is_json @property def is_oson(self) -> bool: """ - Returns whether the column contains OSON encoded bytes. + This read-only attribute returns whether the column is known to contain + binary encoded `OSON `__ data. This + will be *True* when an "IS JSON FORMAT OSON" check constraint is + enabled on BLOB columns. """ return self._impl.is_oson @property def name(self) -> str: """ - Returns the name of the column. + This read-only attribute returns the name of the column as mandated by + the Python Database API. """ return self._impl.name @property def null_ok(self) -> bool: """ - Returns whether nulls or permitted or not in the column. + This read-only attribute returns whether nulls are allowed in the + column as mandated by the Python Database API. """ return self._impl.nulls_allowed @property def precision(self) -> Union[int, None]: """ - Returns the precision of the column. + This read-only attribute returns the precision of the column as + mandated by the Python Database API. """ if self._impl.precision or self._impl.scale: return self._impl.precision @@ -200,7 +228,8 @@ def precision(self) -> Union[int, None]: @property def scale(self) -> Union[int, None]: """ - Returns the scale of the column. + This read-only attribute returns the scale of the column as mandated by + the Python Database API. """ if self._impl.precision or self._impl.scale: return self._impl.scale @@ -208,8 +237,10 @@ def scale(self) -> Union[int, None]: @property def type(self) -> Union[DbType, DbObjectType]: """ - Returns the type of the column, as either a database object type or a - database type. + This read-only attribute returns the type of the column. This will be + an :ref:`Oracle Object Type ` if the column contains + Oracle objects; otherwise, it will be one of the + :ref:`database type constants ` defined at the module level. """ if self._type is None: if self._impl.objtype is not None: @@ -221,16 +252,18 @@ def type(self) -> Union[DbType, DbObjectType]: @property def type_code(self) -> DbType: """ - Returns the type of the column. + This read-only attribute returns the type of the column as mandated by + the Python Database API. The type will be one of the + :ref:`database type constants ` defined at the module level. """ return self._impl.dbtype @property - def vector_dimensions(self) -> [int, None]: + def vector_dimensions(self) -> Union[int, None]: """ - Returns the number of dimensions required by vector columns. If the - column is not a vector column or allows for any number of dimensions, - the value returned is None. + This read-only attribute returns the number of dimensions required by + VECTOR columns. If the column is not a VECTOR column or allows for any + number of dimensions, the value returned is *None*. """ if self._impl.dbtype is DB_TYPE_VECTOR: flags = self._impl.vector_flags @@ -238,11 +271,22 @@ def vector_dimensions(self) -> [int, None]: return self._impl.vector_dimensions @property - def vector_format(self) -> [oracledb.VectorFormat, None]: + def vector_format(self) -> Union[oracledb.VectorFormat, None]: """ - Returns the storage type required by vector columns. If the column is - not a vector column or allows for any type of storage, the value - returned is None. + This read-only attribute returns the storage type used by VECTOR + columns. The value of this attribute can be: + + - :data:`oracledb.VECTOR_FORMAT_BINARY` which represents 8-bit unsigned + integers + - :data:`oracledb.VECTOR_FORMAT_INT8` which represents 8-bit signed + integers + - :data:`oracledb.VECTOR_FORMAT_FLOAT32` which represents 32-bit + floating-point numbers + - :data:`oracledb.VECTOR_FORMAT_FLOAT64` which represents 64-bit + floating-point numbers + + If the column is not a VECTOR column or allows for any type of storage, + the value returned is *None*. """ if ( self._impl.dbtype is DB_TYPE_VECTOR @@ -253,8 +297,13 @@ def vector_format(self) -> [oracledb.VectorFormat, None]: @property def vector_is_sparse(self) -> Union[bool, None]: """ - Returns a boolean indicating if the vector is sparse or not. If the - column is not a vector column, the value returned is None. + This read-only attribute returns a boolean indicating if the vector is + sparse or not. + + If the column contains vectors that are SPARSE, the value returned is + *True*. If the column contains vectors that are DENSE, the value + returned is *False*. If the column is not a VECTOR column, the value + returned is *None*. """ if self._impl.dbtype is DB_TYPE_VECTOR: flags = self._impl.vector_flags diff --git a/src/oracledb/lob.py b/src/oracledb/lob.py index 01dbd5a1..97a01a23 100644 --- a/src/oracledb/lob.py +++ b/src/oracledb/lob.py @@ -28,9 +28,9 @@ # Contains the LOB class for managing BLOB, CLOB, NCLOB and BFILE data. # ----------------------------------------------------------------------------- -from typing import Any, Union +from typing import Optional, Union -from .base_impl import DB_TYPE_BFILE, DB_TYPE_BLOB +from .base_impl import DbType, DB_TYPE_BFILE, DB_TYPE_BLOB from . import __name__ as MODULE_NAME from . import errors @@ -51,7 +51,7 @@ def _check_not_bfile(self): def _check_value_to_write(self, value): """ - Check the value to write and return the actual value to write. + Checks the value to write and returns the actual value to write. Character LOBs must write strings but can accept UTF-8 encoded bytes (which will be decoded to strings). Binary LOBs must write bytes but can accept strings (which will be encoded in UTF-8). @@ -78,7 +78,7 @@ def _from_impl(cls, impl): def getfilename(self) -> tuple: """ - Return a two-tuple consisting of the directory alias and file name for + Returns a two-tuple consisting of the directory alias and file name for a BFILE type LOB. """ self._check_is_bfile() @@ -86,15 +86,16 @@ def getfilename(self) -> tuple: def setfilename(self, dir_alias: str, name: str) -> None: """ - Set the directory alias and name of a BFILE type LOB. + Sets the directory alias and name of a BFILE type LOB. """ self._check_is_bfile() self._impl.set_file_name(dir_alias, name) @property - def type(self) -> Any: + def type(self) -> DbType: """ - Returns the type of the LOB as one of the database type constants. + This read-only attribute returns the type of the LOB as one of the + database type constants. """ return self._impl.dbtype @@ -111,7 +112,7 @@ def __str__(self): def close(self) -> None: """ - Close the LOB. Call this when writing is completed so that the indexes + Closes the LOB. Call this when writing is completed so that the indexes associated with the LOB can be updated -– but only if open() was called first. """ @@ -119,7 +120,7 @@ def close(self) -> None: def fileexists(self) -> bool: """ - Return a boolean indicating if the file referenced by a BFILE type LOB + Returns a boolean indicating if the file referenced by a BFILE type LOB exists. """ self._check_is_bfile() @@ -127,7 +128,7 @@ def fileexists(self) -> bool: def getchunksize(self) -> int: """ - Return the chunk size for the LOB. Reading and writing to the LOB in + Returns the chunk size for the LOB. Reading and writing to the LOB in chunks of multiples of this size will improve performance. """ self._check_not_bfile() @@ -135,24 +136,26 @@ def getchunksize(self) -> int: def isopen(self) -> bool: """ - Return a boolean indicating if the LOB has been opened using the method - open(). + Returns a boolean indicating if the LOB has been opened using the + method open(). """ return self._impl.get_is_open() def open(self) -> None: """ - Open the LOB for writing. This will improve performance when writing to - the LOB in chunks and there are functional or extensible indexes + Opens the LOB for writing. This will improve performance when writing + to the LOB in chunks and there are functional or extensible indexes associated with the LOB. If this method is not called, each write will perform an open internally followed by a close after the write has been completed. """ self._impl.open() - def read(self, offset: int = 1, amount: int = None) -> Union[str, bytes]: + def read( + self, offset: int = 1, amount: Optional[int] = None + ) -> Union[str, bytes]: """ - Return a portion (or all) of the data in the LOB. Note that the amount + Returns a portion (or all) of the data in the LOB. Note that the amount and offset are in bytes for BLOB and BFILE type LOBs and in UCS-2 code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent to characters for all but supplemental characters. If supplemental @@ -173,16 +176,18 @@ def read(self, offset: int = 1, amount: int = None) -> Union[str, bytes]: def size(self) -> int: """ - Returns the size of the data in the LOB. For BLOB and BFILE type LOBs - this is the number of bytes. For CLOB and NCLOB type LOBs this is the + Returns the size of the data in the LOB. For BLOB and BFILE type LOBs, + this is the number of bytes. For CLOB and NCLOB type LOBs, this is the number of UCS-2 code points. UCS-2 code points are equivalent to characters for all but supplemental characters. """ return self._impl.get_size() - def trim(self, new_size: int = 0, *, newSize: int = None) -> None: + def trim( + self, new_size: int = 0, *, newSize: Optional[int] = None + ) -> None: """ - Trim the LOB to the new size (the second parameter is deprecated and + Trims the LOB to the new size (the second parameter is deprecated and should not be used). """ self._check_not_bfile() @@ -198,7 +203,7 @@ def trim(self, new_size: int = 0, *, newSize: int = None) -> None: def write(self, data: Union[str, bytes], offset: int = 1) -> None: """ - Write the data to the LOB at the given offset. The offset is in bytes + Writes the data to the LOB at the given offset. The offset is in bytes for BLOB type LOBs and in UCS-2 code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent to characters for all but supplemental characters. If supplemental characters are in the LOB, the @@ -215,7 +220,7 @@ class AsyncLOB(BaseLOB): async def close(self) -> None: """ - Close the LOB. Call this when writing is completed so that the indexes + Closes the LOB. Call this when writing is completed so that the indexes associated with the LOB can be updated -– but only if open() was called first. """ @@ -223,7 +228,7 @@ async def close(self) -> None: async def fileexists(self) -> bool: """ - Return a boolean indicating if the file referenced by a BFILE type LOB + Returns a boolean indicating if the file referenced by a BFILE type LOB exists. """ self._check_is_bfile() @@ -231,7 +236,7 @@ async def fileexists(self) -> bool: async def getchunksize(self) -> int: """ - Return the chunk size for the LOB. Reading and writing to the LOB in + Returns the chunk size for the LOB. Reading and writing to the LOB in chunks of multiples of this size will improve performance. """ self._check_not_bfile() @@ -239,15 +244,15 @@ async def getchunksize(self) -> int: async def isopen(self) -> bool: """ - Return a boolean indicating if the LOB has been opened using the method - open(). + Returns a boolean indicating if the LOB has been opened using the + method open(). """ return await self._impl.get_is_open() async def open(self) -> None: """ - Open the LOB for writing. This will improve performance when writing to - the LOB in chunks and there are functional or extensible indexes + Opens the LOB for writing. This will improve performance when writing + to the LOB in chunks and there are functional or extensible indexes associated with the LOB. If this method is not called, each write will perform an open internally followed by a close after the write has been completed. @@ -255,10 +260,10 @@ async def open(self) -> None: await self._impl.open() async def read( - self, offset: int = 1, amount: int = None + self, offset: int = 1, amount: Optional[int] = None ) -> Union[str, bytes]: """ - Return a portion (or all) of the data in the LOB. Note that the amount + Returns a portion (or all) of the data in the LOB. Note that the amount and offset are in bytes for BLOB and BFILE type LOBs and in UCS-2 code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent to characters for all but supplemental characters. If supplemental @@ -284,9 +289,11 @@ async def size(self) -> int: """ return await self._impl.get_size() - async def trim(self, new_size: int = 0, *, newSize: int = None) -> None: + async def trim( + self, new_size: int = 0, *, newSize: Optional[int] = None + ) -> None: """ - Trim the LOB to the new size (the second parameter is deprecated and + Trims the LOB to the new size (the second parameter is deprecated and should not be used). """ self._check_not_bfile() @@ -302,7 +309,7 @@ async def trim(self, new_size: int = 0, *, newSize: int = None) -> None: async def write(self, data: Union[str, bytes], offset: int = 1) -> None: """ - Write the data to the LOB at the given offset. The offset is in bytes + Writes the data to the LOB at the given offset. The offset is in bytes for BLOB type LOBs and in UCS-2 code points for CLOB and NCLOB type LOBs. UCS-2 code points are equivalent to characters for all but supplemental characters. If supplemental characters are in the LOB, the diff --git a/src/oracledb/pipeline.py b/src/oracledb/pipeline.py index 613d72d6..4422f86f 100644 --- a/src/oracledb/pipeline.py +++ b/src/oracledb/pipeline.py @@ -28,7 +28,7 @@ # Contains the Pipeline class used for executing multiple operations. # ----------------------------------------------------------------------------- -from typing import Any, Callable, Union +from typing import Any, Callable, Optional, Union from . import __name__ as MODULE_NAME from . import utils @@ -61,71 +61,75 @@ def _create_result(self): @property def arraysize(self) -> int: """ - Returns the array size to use when fetching all of the rows in a query. - For all other operations the value returned is 0. + This read-only attribute returns the array size that will be used when + fetching query rows with :meth:`Pipeline.add_fetchall()`. For all other + operations, the value returned is *0*. """ return self._impl.arraysize @property def keyword_parameters(self) -> Any: """ - Returns the keyword parameters to the stored procedure or function - being called by the operation, if applicable. + This read-only attribute returns the keyword parameters to the stored + procedure or function being called by the operation, if applicable. """ return self._impl.keyword_parameters @property def name(self) -> Union[str, None]: """ - Returns the name of the stored procedure or function being called by - the operation, if applicable. + This read-only attribute returns the name of the stored procedure or + function being called by the operation, if applicable. """ return self._impl.name @property def num_rows(self) -> int: """ - Returns the number of rows to fetch when performing a query of a - specific number of rows. For all operations, the value returned is 0. + This read-only attribute returns the number of rows to fetch when + performing a query of a specific number of rows. For all other + operations, the value returned is *0*. """ return self._impl.num_rows @property def op_type(self) -> PipelineOpType: """ - Returns the type of operation that is taking place. + This read-only attribute returns the type of operation that is taking + place. """ return PipelineOpType(self._impl.op_type) @property def parameters(self) -> Any: """ - Returns the parameters to the stored procedure or function or the - parameters bound to the statement being executed by the operation, if - applicable. + This read-only attribute returns the parameters to the stored procedure + or function or the parameters bound to the statement being executed by + the operation, if applicable. """ return self._impl.parameters @property def return_type(self) -> Any: """ - Returns the return type of the stored function being called by the - operation, if applicable. + This read-only attribute returns the return type of the stored function + being called by the operation, if applicable. """ return self._impl.return_type @property def rowfactory(self) -> Union[Callable, None]: """ - Returns the row factory callable function to be used in a query - executed by the operation, if applicable. + This read-only attribute returns the row factory callable function to + be used in a query executed by the operation, if applicable. """ return self._impl.rowfactory @property def statement(self) -> Union[str, None]: """ - Returns the statement being executed by the operation, if applicable. + This read-only attribute returns the statement being executed by the + operation, if applicable. """ return self._impl.statement @@ -141,10 +145,10 @@ def __repr__(self): ) @property - def columns(self) -> Union[list, None]: + def columns(self) -> Union[list[FetchInfo], None]: """ - Returns a list of FetchInfo instances containing metadata about an - executed query, or the value None, if no fetch operation took place. + This read-only attribute is a list of FetchInfo objects. This + attribute will be *None* for operations that do not return rows. """ if self._impl.fetch_metadata is not None: return [FetchInfo._from_impl(i) for i in self._impl.fetch_metadata] @@ -152,39 +156,42 @@ def columns(self) -> Union[list, None]: @property def error(self) -> Union[_Error, None]: """ - Returns the error that occurred when running this operation, or the - value None, if no error occurred. + This read-only attribute returns the error that occurred when running + this operation. If no error occurred, then the value *None* is + returned. """ return self._impl.error @property def operation(self) -> PipelineOp: """ - Returns the operation associated with the result. + This read-only attribute returns the PipelineOp operation object that + generated the result. """ return self._operation @property def return_value(self) -> Any: """ - Returns the return value of the called function, if a function was - called for the operation. + This read-only attribute returns the return value of the called PL/SQL + function, if a function was called for the operation. """ return self._impl.return_value @property def rows(self) -> Union[list, None]: """ - Returns the rows that were fetched by the operation, if a query was - executed. + This read-only attribute returns the rows that were fetched by the + operation, if a query was executed. """ return self._impl.rows @property def warning(self) -> Union[_Error, None]: """ - Returns the warning that was encountered when running this operation, - or the value None, if no warning was encountered. + This read-only attribute returns any warning that was encountered when + running this operation. If no warning was encountered, then the value + *None* is returned. """ return self._impl.warning @@ -213,14 +220,19 @@ def add_callfunc( self, name: str, return_type: Any, - parameters: Union[list, tuple] = None, - keyword_parameters: dict = None, + parameters: Optional[Union[list, tuple]] = None, + keyword_parameters: Optional[dict] = None, ) -> PipelineOp: """ - Adds an operation that calls a stored function with the given - parameters and return type. The PipelineOpResult object that is - returned will have the "return_value" attribute populated with the - return value of the function if the call completes successfully. + Adds an operation to the pipeline that calls a stored PL/SQL function + with the given parameters and return type. The created PipelineOp + object is also returned from this function. + + When the Pipeline is executed, the PipelineOpResult object that is + returned for this operation will have the + :attr:`~PipelineOpResult.return_value` attribute populated with the + return value of the PL/SQL function if the call completes + successfully. """ utils.verify_stored_proc_args(parameters, keyword_parameters) op_impl = PipelineOpImpl( @@ -235,12 +247,13 @@ def add_callfunc( def add_callproc( self, name: str, - parameters: Union[list, tuple] = None, - keyword_parameters: dict = None, + parameters: Optional[Union[list, tuple]] = None, + keyword_parameters: Optional[dict] = None, ) -> PipelineOp: """ Adds an operation that calls a stored procedure with the given - parameters. + parameters. The created PipelineOp object is also returned from + this function. """ utils.verify_stored_proc_args(parameters, keyword_parameters) op_impl = PipelineOpImpl( @@ -261,10 +274,15 @@ def add_commit(self) -> PipelineOp: def add_execute( self, statement: str, - parameters: Union[list, tuple, dict] = None, + parameters: Optional[Union[list, tuple, dict]] = None, ) -> PipelineOp: """ Adds an operation that executes a statement with the given parameters. + The created PipelineOp object is also returned from this function. + + Do not use this for queries that return rows. Instead use + :meth:`Pipeline.add_fetchall()`, :meth:`Pipeline.add_fetchmany()`, or + :meth:`Pipeline.add_fetchone()`. """ op_impl = PipelineOpImpl( op_type=PipelineOpType.EXECUTE, @@ -275,12 +293,23 @@ def add_execute( def add_executemany( self, - statement: Union[str, None], + statement: str, parameters: Union[list, int], ) -> PipelineOp: """ - Adds an operation that executes a statement multiple times with the - given list of parameters (or number of iterations). + Adds an operation that executes a SQL statement once using all bind + value mappings or sequences found in the sequence parameters. This can + be used to insert, update, or delete multiple rows in a table. It can + also invoke a PL/SQL procedure multiple times. + + The created PipelineOp object is also returned from this function. + + The ``parameters`` parameter can be a list of tuples, where each tuple + item maps to one bind variable placeholder in ``statement``. It can + also be a list of dictionaries, where the keys match the bind variable + placeholder names in ``statement``. If there are no bind values, or + values have previously been bound, the ``parameters`` value can be an + integer specifying the number of iterations. """ op_impl = PipelineOpImpl( op_type=PipelineOpType.EXECUTE_MANY, @@ -292,15 +321,24 @@ def add_executemany( def add_fetchall( self, statement: str, - parameters: Union[list, tuple, dict] = None, - arraysize: int = None, - rowfactory: Callable = None, + parameters: Optional[Union[list, tuple, dict]] = None, + arraysize: Optional[int] = None, + rowfactory: Optional[Callable] = None, ) -> PipelineOp: """ - Adds an operation that executes a query and returns up to the - specified number of rows from the result set. The PipelineOpResult - object that is returned will have the "return_value" attribute - populated with the list of rows returned by the query. + Adds an operation that executes a query and returns all of the rows + from the result set. The created PipelineOp object is also returned + from this function. + + When the Pipeline is executed, the PipelineOpResult object that is + returned for this operation will have the + :attr:`~PipelineOpResult.rows` attribute populated with the list of + rows returned by the query. + + The default value for ``arraysize`` is :attr:`defaults.arraysize`. + + Internally, this operation's :attr:`Cursor.prefetchrows` size is set + to the value of the explicit or default ``arraysize`` parameter value. """ if arraysize is None: arraysize = defaults.arraysize @@ -316,15 +354,30 @@ def add_fetchall( def add_fetchmany( self, statement: str, - parameters: Union[list, tuple, dict] = None, - num_rows: int = None, - rowfactory: Callable = None, + parameters: Optional[Union[list, tuple, dict]] = None, + num_rows: Optional[int] = None, + rowfactory: Optional[Callable] = None, ) -> PipelineOp: """ Adds an operation that executes a query and returns up to the specified - number of rows from the result set. The PipelineOpResult object that is - returned will have the "return_value" attribute populated with the list - of rows returned by the query. + number of rows from the result set. The created PipelineOp object is + also returned from this function. + + When the Pipeline is executed, the PipelineOpResult object that is + returned for this operation will have the + :attr:`~PipelineOpResult.rows` attribute populated with the list of + rows returned by the query. + + The default value for ``num_rows`` is the value of + :attr:`defaults.arraysize`. + + Internally, this operation's :attr:`Cursor.prefetchrows` size is set to + the value of the explicit or default ``num_rows`` parameter, allowing + all rows to be fetched in one round-trip. + + Since only one fetch is performed for a query operation, consider + adding a ``FETCH NEXT`` clause to the statement to prevent the + database processing rows that will never be fetched. """ if num_rows is None: num_rows = defaults.arraysize @@ -340,15 +393,26 @@ def add_fetchmany( def add_fetchone( self, statement: str, - parameters: Union[list, tuple, dict] = None, - rowfactory: Callable = None, + parameters: Optional[Union[list, tuple, dict]] = None, + rowfactory: Optional[Callable] = None, ) -> PipelineOp: """ Adds an operation that executes a query and returns the first row of - the result set if one exists (or None, if no rows exist). The - PipelineOpResult object that is returned will have the "return_value" - attribute populated with this row if the query is performed - successfully. + the result set if one exists (or *None*, if no rows exist). The + created PipelineOp object is also returned from this function. + + When the Pipeline is executed, the PipelineOpResult object that is + returned for this operation will have the + :attr:`~PipelineOpResult.rows` attribute populated with this row if the + query is performed successfully. + + Internally, this operation's :attr:`Cursor.prefetchrows` and + :attr:`Cursor.arraysize` sizes will be set to *1*. + + Since only one fetch is performed for a query operation, consider + adding a ``WHERE`` condition or using a ``FETCH NEXT`` clause in the + statement to prevent the database processing rows that will never be + fetched. """ op_impl = PipelineOpImpl( op_type=PipelineOpType.FETCH_ONE, @@ -359,9 +423,10 @@ def add_fetchone( return self._add_op(op_impl) @property - def operations(self) -> list: + def operations(self) -> list[PipelineOp]: """ - Returns the list of operations associated with the pipeline. + This read-only attribute returns the list of operations associated with + the pipeline. """ return self._operations diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index d848d131..b0c68001 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -60,29 +60,7 @@ def __init__( **kwargs, ) -> None: """ - Constructor for creating a connection pool. Connection pooling creates - a pool of available connections to the database, allowing applications - to acquire a connection very quickly. It is of primary use in a server - where connections are requested in rapid succession and used for a - short period of time, for example in a web server. - - The dsn parameter (data source name) can be a string in the format - user/password@connect_string or can simply be the connect string (in - which case authentication credentials such as the username and password - need to be specified separately). See the documentation on connection - strings for more information. - - The params parameter is expected to be of type PoolParams and contains - parameters that are used to create the pool. See the documentation on - PoolParams for more information. If this parameter is not specified, - the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword - parameters are specified, the values in the keyword parameters have - precedence. Note that if a dsn is also supplied, then in the - python-oracledb Thin mode, the values of the parameters specified - (if any) within the dsn will override the values passed as additional - keyword parameters, which themselves override the values set in the - params parameter object. + Constructor for creating a connection pool. """ if params is None: params_impl = base_impl.PoolParamsImpl() @@ -123,8 +101,8 @@ def _verify_open(self) -> None: @property def busy(self) -> int: """ - Returns the number of connections that have been acquired from the pool - and have not yet been returned to the pool. + This read-only attribute returns the number of connections currently + acquired. """ self._verify_open() return self._impl.get_busy_count() @@ -132,14 +110,27 @@ def busy(self) -> int: @property def dsn(self) -> str: """ - Returns the connection string (TNS entry) of the database to which - connections in the pool have been established. + This read-only attribute returns the TNS entry of the database to which + a connection has been established. """ self._verify_open() return self._impl.dsn @property def getmode(self) -> oracledb.PoolGetMode: + """ + This read-write attribute determines how connections are returned from + the pool. If :data:`~oracledb.POOL_GETMODE_FORCEGET` is specified, a + new connection will be returned even if there are no free connections + in the pool. :data:`~oracledb.POOL_GETMODE_NOWAIT` will raise an + exception if there are no free connections are available in the pool. + If :data:`~oracledb.POOL_GETMODE_WAIT` is specified and there are no + free connections in the pool, the caller will wait until a free + connection is available. :data:`~oracledb.POOL_GETMODE_TIMEDWAIT` uses + the value of :data:`~ConnectionPool.wait_timeout` to determine how long + the caller should wait for a connection to become available before + returning an error. + """ self._verify_open() return oracledb.PoolGetMode(self._impl.get_getmode()) @@ -151,8 +142,9 @@ def getmode(self, value: oracledb.PoolGetMode) -> None: @property def homogeneous(self) -> bool: """ - Returns a boolean indicating if the pool is homogeneous or not. If the - pool is not homogeneous, different authentication can be used for each + This read-only boolean attribute indicates whether the pool is + considered :ref:`homogeneous ` or not. If the pool is + not homogeneous, different authentication can be used for each connection acquired from the pool. """ self._verify_open() @@ -161,8 +153,8 @@ def homogeneous(self) -> bool: @property def increment(self) -> int: """ - Returns the number of connections that will be created when additional - connections need to be created to satisfy requests. + This read-only attribute returns the number of connections that will be + established when additional connections need to be created. """ self._verify_open() return self._impl.increment @@ -170,7 +162,8 @@ def increment(self) -> int: @property def max(self) -> int: """ - Returns the maximum number of connections that the pool can control. + This read-only attribute returns the maximum number of connections that + the pool can control. """ self._verify_open() return self._impl.max @@ -178,14 +171,15 @@ def max(self) -> int: @property def max_lifetime_session(self) -> int: """ - Returns the maximum length of time (in seconds) that a pooled - connection may exist. Connections that are in use will not be closed. - They become candidates for termination only when they are released back - to the pool and have existed for longer than max_lifetime_session - seconds. Note that termination only occurs when the pool is accessed. A - value of 0 means that there is no maximum length of time that a pooled - connection may exist. This attribute is only available in Oracle - Database 12.1. + This read-write attribute is the maximum length of time (in seconds) + that a pooled connection may exist since first being created. A value + of *0* means there is no limit. Connections become candidates for + termination when they are acquired or released back to the pool, and + have existed for longer than ``max_lifetime_session`` seconds. + Connections that are in active use will not be closed. In + python-oracledb Thick mode, Oracle Client libraries 12.1 or later must + be used and, prior to Oracle Client 21, cleanup only occurs when the + pool is accessed. """ self._verify_open() return self._impl.get_max_lifetime_session() @@ -198,13 +192,13 @@ def max_lifetime_session(self, value: int) -> None: @property def max_sessions_per_shard(self) -> int: """ - Returns the number of sessions that can be created per shard in the - pool. Setting this attribute greater than zero specifies the maximum - number of sessions in the pool that can be used for any given shard in - a sharded database. This lets connections in the pool be balanced - across the shards. A value of zero will not set any maximum number of - sessions for each shard. This attribute is only available in Oracle - Client 18.3 and higher. + This read-write attribute returns the number of sessions that can be + created per shard in the pool. Setting this attribute greater than zero + specifies the maximum number of sessions in the pool that can be used + for any given shard in a sharded database. This lets connections in the + pool be balanced across the shards. A value of *0* will not set any + maximum number of sessions for each shard. This attribute is only + available in Oracle Client 18.3 and higher. """ self._verify_open() return self._impl.get_max_sessions_per_shard() @@ -217,8 +211,9 @@ def max_sessions_per_shard(self, value: int) -> None: @property def min(self) -> int: """ - Returns the minimum number of connections that the pool will control. - These are created when the pool is first created. + This read-only attribute returns the number of connections with which + the connection pool was created and the minimum number of connections + that will be controlled by the connection pool. """ self._verify_open() return self._impl.min @@ -226,8 +221,8 @@ def min(self) -> int: @property def name(self) -> str: """ - Returns the name assigned to the pool by Oracle. This attribute is only - relevant in python-oracledb thick mode. + This read-only attribute returns the name assigned to the pool by + Oracle. """ self._verify_open() return self._impl.name @@ -235,7 +230,8 @@ def name(self) -> str: @property def opened(self) -> int: """ - Returns the number of connections currently opened by the pool. + This read-only attribute returns the number of connections currently + opened by the pool. """ self._verify_open() return self._impl.get_open_count() @@ -243,15 +239,15 @@ def opened(self) -> int: @property def ping_interval(self) -> int: """ - Returns the pool ping interval in seconds. When a connection is - acquired from the pool, a check is first made to see how long it - has been since the connection was put into the pool. If - this idle time exceeds ping_interval, then a round-trip ping to the - database is performed. If the connection is unusable, it is discarded - and a different connection is selected to be returned by - SessionPool.acquire(). Setting ping_interval to a negative value - disables pinging. Setting it to 0 forces a ping for every aquire() - and is not recommended. + This read-write integer attribute specifies the pool ping interval in + seconds. When a connection is acquired from the pool, a check is first + made to see how long it has been since the connection was put into the + pool. If this idle time exceeds ``ping_interval``, then a + :ref:`round-trip ` ping to the database is performed. If + the connection is unusable, it is discarded and a different connection + is selected to be returned by :meth:`acquire()`. Setting + ``ping_interval`` to a negative value disables pinging. Setting it to + *0* forces a ping for every :meth:`acquire()` and is not recommended. """ self._verify_open() return self._impl.get_ping_interval() @@ -263,12 +259,13 @@ def ping_interval(self, value: int) -> None: @property def soda_metadata_cache(self) -> bool: """ - Specifies whether the SODA metadata cache is enabled or not. Enabling - the cache significantly improves the performance of methods - SodaDatabase.createCollection() (when not specifying a value for the - metadata parameter) and SodaDatabase.openCollection(). Note that the - cache can become out of date if changes to the metadata of cached - collections are made externally. + This read-write boolean attribute returns whether the SODA metadata + cache is enabled or not. Enabling the cache significantly improves the + performance of methods :meth:`SodaDatabase.createCollection()` (when + not specifying a value for the ``metadata`` parameter) and + :meth:`SodaDatabase.openCollection()`. Note that the cache can become + out of date if changes to the metadata of cached collections are made + externally. """ self._verify_open() return self._impl.get_soda_metadata_cache() @@ -284,10 +281,10 @@ def soda_metadata_cache(self, value: bool) -> None: @property def stmtcachesize(self) -> int: """ - Specifies the size of the statement cache that will be used as the - starting point for any connections that are created by the pool. Once a + This read-write attribute specifies the size of the statement cache + that will be used for connections obtained from the pool. Once a connection is created, that connection’s statement cache size can only - be changed by setting the stmtcachesize attribute on the connection + be changed by setting the ``stmtcachesize`` attribute on the connection itself. """ self._verify_open() @@ -301,8 +298,12 @@ def stmtcachesize(self, value: int) -> None: @property def thin(self) -> bool: """ - Returns a boolean indicating if the pool was created in - python-oracledb's thin mode (True) or thick mode (False). + This read-only attribute returns a boolean which indicates the + python-oracledb mode in which the pool was created. If the value of + this attribute is *True*, it indicates that the pool was created in the + python-oracledb Thin mode. If the value of this attribute is *False*, + it indicates that the pool was created in the python-oracledb Thick + mode. """ self._verify_open() return not isinstance(self._impl, thick_impl.ThickPoolImpl) @@ -310,11 +311,12 @@ def thin(self) -> bool: @property def timeout(self) -> int: """ - Specifies the time (in seconds) after which idle connections will be - terminated in order to maintain an optimum number of open connections. - A value of 0 means that no idle connections are terminated. Note that - in thick mode with older Oracle Client libraries termination only - occurs when the pool is accessed. + This read-write attribute specifies the time (in seconds) after which + idle connections will be terminated in order to maintain an optimum + number of open connections. A value of *0* means that no idle + connections are terminated. Note that in python-oracledb Thick mode + with older Oracle Client Libraries, the termination only occurs when + the pool is accessed. """ self._verify_open() return self._impl.get_timeout() @@ -334,7 +336,8 @@ def tnsentry(self) -> str: @property def username(self) -> str: """ - Returns the name of the user which was used to create the pool. + This read-only attribute returns the name of the user which established + the connection to the database. """ self._verify_open() return self._impl.username @@ -342,10 +345,11 @@ def username(self) -> str: @property def wait_timeout(self) -> int: """ - Specifies the time (in milliseconds) that the caller should wait for a - connection to become available in the pool before returning with an - error. This value is only used if the getmode parameter used to create - the pool was POOL_GETMODE_TIMEDWAIT. + This read-write attribute specifies the time (in milliseconds) that the + caller should wait for a connection to become available in the pool + before returning with an error. This value is only used if the + ``getmode`` parameter to :meth:`oracledb.create_pool()` was the value + :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. """ self._verify_open() return self._impl.get_wait_timeout() @@ -389,31 +393,39 @@ def acquire( supershardingkey: Optional[list] = None, ) -> "connection_module.Connection": """ - Acquire a connection from the pool and return it. + Acquires a connection from the session pool and returns a + :ref:`connection object `. - If the pool is homogeneous, the user and password parameters cannot be - specified. If they are, an exception will be raised. + If the pool is :ref:`homogeneous `, the ``user`` and + ``password`` parameters cannot be specified. If they are, an exception + will be raised. - The cclass parameter, if specified, should be a string corresponding to - the connection class for database resident connection pooling (DRCP). + The ``cclass`` parameter, if specified, should be a string + corresponding to the connection class for :ref:`drcp`. - The purity parameter is expected to be one of PURITY_DEFAULT, - PURITY_NEW, or PURITY_SELF. + The ``purity`` parameter is expected to be one of + :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or + :data:`~oracledb.PURITY_DEFAULT`. - The tag parameter, if specified, is expected to be a string with - name=value pairs like “k1=v1;k2=v2” and will limit the connections that - can be returned from a pool unless the matchanytag parameter is - set to True. In that case connections with the specified tag will be - preferred over others, but if no such connections are available a - connection with a different tag may be returned instead. In any case, - untagged connections will always be returned if no connections with the - specified tag are available. Connections are tagged when they are - released back to the pool. + The ``tag`` parameter, if specified, is expected to be a string with + name=value pairs like "k1=v1;k2=v2" and will limit the connections that + can be returned from a connection pool unless the ``matchanytag`` + parameter is set to *True*. In that case, connections with the + specified tag will be preferred over others, but if no such connections + are available, then a connection with a different tag may be returned + instead. In any case, untagged connections will always be returned if + no connections with the specified tag are available. Connections are + tagged when they are :meth:`released ` back to + the pool. - The shardingkey and supershardingkey parameters, if specified, are - expected to be a sequence of values which will be used to identify the - database shard to connect to. The key values can be strings, numbers, - bytes or dates. + The ``shardingkey`` and ``supershardingkey`` parameters, if specified, + are expected to be a sequence of values which will be used to identify + the database shard to connect to. The key values can be strings, + numbers, bytes, or dates. See :ref:`connsharding`. + + When using the :ref:`connection pool cache `, calling + :meth:`oracledb.connect()` with a ``pool_alias`` parameter is the same + as calling ``pool.acquire()``. """ self._verify_open() @@ -432,11 +444,12 @@ def acquire( def close(self, force: bool = False) -> None: """ - Close the pool now, rather than when the last reference to it is + Closes the pool now, rather than when the last reference to it is released, which makes it unusable for further work. If any connections have been acquired and not released back to the - pool, this method will fail unless the force parameter is set to True. + pool, this method will fail unless the ``force`` parameter is set to + *True*. """ self._verify_open() self._impl.close(force) @@ -446,8 +459,8 @@ def close(self, force: bool = False) -> None: def drop(self, connection: "connection_module.Connection") -> None: """ - Drop the connection from the pool, which is useful if the connection is - no longer usable (such as when the database session is killed). + Drops the connection from the pool which is useful if the connection is + no longer usable (such as when the session is killed). """ self._verify_open() if not isinstance(connection, connection_module.Connection): @@ -457,42 +470,6 @@ def drop(self, connection: "connection_module.Connection") -> None: self._impl.drop(connection._impl) connection._impl = None - def release( - self, - connection: "connection_module.Connection", - tag: Optional[str] = None, - ) -> None: - """ - Release the connection back to the pool now, rather than whenever - __del__ is called. The connection will be unusable from this point - forward; an Error exception will be raised if any operation is - attempted with the connection. Any cursors or LOBs created by the - connection will also be marked unusable and an Error exception will be - raised if any operation is attempted with them. - - Internally, references to the connection are held by cursor objects, - LOB objects, etc. Once all of these references are released, the - connection itself will be released back to the pool automatically. - Either control references to these related objects carefully or - explicitly release connections back to the pool in order to ensure - sufficient resources are available. - - If the tag is not None, it is expected to be a string with name=value - pairs like “k1=v1;k2=v2” and will override the value in the property - Connection.tag. If either Connection.tag or the tag parameter are not - None, the connection will be retagged when it is released back to the - pool. - """ - self._verify_open() - if not isinstance(connection, connection_module.Connection): - message = "connection must be an instance of oracledb.Connection" - raise TypeError(message) - connection._verify_connected() - if tag is not None: - connection.tag = tag - self._impl.return_connection(connection._impl) - connection._impl = None - def reconfigure( self, min: Optional[int] = None, @@ -508,41 +485,52 @@ def reconfigure( ping_interval: Optional[int] = None, ) -> None: """ - Reconfigures various parameters of a connection pool. The pool size - can be altered with reconfigure() by passing values for min, max - or increment. The getmode, timeout, wait_timeout, - max_lifetime_session, max_sessions_per_shard, soda_metadata_cache, - stmtcachesize and ping_interval can be set directly or by using - reconfigure(). All parameters are optional. Unspecified parameters - will leave those pool attributes unchanged. The parameters are - processed in two stages. After any size change has been processed, - reconfiguration on the other parameters is done sequentially. If - an error such as an invalid value occurs when changing one attribute, - then an exception will be generated but any already changed - attributes will retain their new values. - - During reconfiguration of a pool's size, the behavior of acquire() - depends on the getmode in effect when acquire() is called: - - * With mode POOL_GETMODE_FORCEGET, an acquire() call will wait until - the pool has been reconfigured. - - * With mode POOL_GETMODE__TIMEDWAIT, an acquire() call will try to - acquire a connection in the time specified by pool.wait_timeout and - return an error if the time taken exceeds that value. - - * With mode POOL_GETMODE_WAIT, an acquire() call will wait until after - the pool has been reconfigured and a connection is available. - - * With mode POOL_GETMODE_NOWAIT, if the number of busy connections is - less than the pool size, acquire() will return a new connection - after pool reconfiguration is complete. - - Closing connections with pool.release() or connection.close() will - wait until any pool size reconfiguration is complete. - - Closing the connection pool with pool.close() will wait until + Reconfigures various parameters of a connection pool. The pool size can + be altered with ``reconfigure()`` by passing values for + :data:`~ConnectionPool.min`, :data:`~ConnectionPool.max` or + :data:`~ConnectionPool.increment`. The + :data:`~ConnectionPool.getmode`, :data:`~ConnectionPool.timeout`, + :data:`~ConnectionPool.wait_timeout`, + :data:`~ConnectionPool.max_lifetime_session`, + :data:`~ConnectionPool.max_sessions_per_shard`, + :data:`~ConnectionPool.soda_metadata_cache`, + :data:`~ConnectionPool.stmtcachesize` and + :data:`~ConnectionPool.ping_interval` attributes can be set directly or + with ``reconfigure()``. + + All parameters are optional. Unspecified parameters will leave those + pool attributes unchanged. The parameters are processed in two stages. + After any size change has been processed, reconfiguration on the other + parameters is done sequentially. If an error such as an invalid value + occurs when changing one attribute, then an exception will be generated + but any already changed attributes will retain their new values. + + During reconfiguration of a pool's size, the behavior of + :meth:`ConnectionPool.acquire()` depends on the ``getmode`` in effect + when ``acquire()`` is called: + + * With mode :data:`~oracledb.POOL_GETMODE_FORCEGET`, an ``acquire()`` + call will wait until the pool has been reconfigured. + + * With mode :data:`~oracledb.POOL_GETMODE_TIMEDWAIT`, an ``acquire()`` + call will try to acquire a connection in the time specified by + pool.wait_timeout and return an error if the time taken exceeds that + value. + + * With mode :data:`~oracledb.POOL_GETMODE_WAIT`, an ``acquire()`` call + will wait until after the pool has been reconfigured and a connection + is available. + + * With mode :data:`~oracledb.POOL_GETMODE_NOWAIT`, if the number of + busy connections is less than the pool size, ``acquire()`` will + return a new connection after pool reconfiguration is complete. + + Closing connections with :meth:`ConnectionPool.release()` or + :meth:`Connection.close()` will wait until any pool size reconfiguration is complete. + + Closing the connection pool with :meth:`ConnectionPool.close()` will + wait until reconfiguration is complete. """ if min is None: @@ -570,6 +558,42 @@ def reconfigure( if ping_interval is not None: self.ping_interval = ping_interval + def release( + self, + connection: "connection_module.Connection", + tag: Optional[str] = None, + ) -> None: + """ + Releases the connection back to the pool now, rather than whenever + __del__ is called. The connection will be unusable from this point + forward; an Error exception will be raised if any operation is + attempted with the connection. Any cursors or LOBs created by the + connection will also be marked unusable and an Error exception will be + raised if any operation is attempted with them. + + Internally, references to the connection are held by cursor objects, + LOB objects, etc. Once all of these references are released, the + connection itself will be released back to the pool automatically. + Either control references to these related objects carefully or + explicitly release connections back to the pool in order to ensure + sufficient resources are available. + + If the tag is not *None*, it is expected to be a string with name=value + pairs like "k1=v1;k2=v2" and will override the value in the property + :attr:`Connection.tag`. If either :attr:`Connection.tag` or the tag + parameter are not *None*, the connection will be retagged when it is + released back to the pool. + """ + self._verify_open() + if not isinstance(connection, connection_module.Connection): + message = "connection must be an instance of oracledb.Connection" + raise TypeError(message) + connection._verify_connected() + if tag is not None: + connection.tag = tag + self._impl.return_connection(connection._impl) + connection._impl = None + def _pool_factory( f: Callable[..., ConnectionPool], @@ -888,7 +912,7 @@ def create_pool( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string. This value is only used in thick mode (default: None) + should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -998,31 +1022,22 @@ def acquire( supershardingkey: Optional[list] = None, ) -> "connection_module.AsyncConnection": """ - Acquire a connection from the pool and return it. - - If the pool is homogeneous, the user and password parameters cannot be - specified. If they are, an exception will be raised. + Acquires a connection from the pool and returns an :ref:`asynchronous + connection object `. - The cclass parameter, if specified, should be a string corresponding to - the connection class for database resident connection pooling (DRCP). + If the pool is :ref:`homogeneous `, the ``user`` and + ``password`` parameters cannot be specified. If they are, an exception + will be raised. - The purity parameter is expected to be one of PURITY_DEFAULT, - PURITY_NEW, or PURITY_SELF. + The ``cclass`` parameter, if specified, should be a string + corresponding to the connection class for :ref:`drcp`. - The tag parameter, if specified, is expected to be a string with - name=value pairs like “k1=v1;k2=v2” and will limit the connections that - can be returned from a pool unless the matchanytag parameter is - set to True. In that case connections with the specified tag will be - preferred over others, but if no such connections are available a - connection with a different tag may be returned instead. In any case, - untagged connections will always be returned if no connections with the - specified tag are available. Connections are tagged when they are - released back to the pool. + The ``purity`` parameter is expected to be one of + :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or + :data:`~oracledb.PURITY_DEFAULT`. - The shardingkey and supershardingkey parameters, if specified, are - expected to be a sequence of values which will be used to identify the - database shard to connect to. The key values can be strings, numbers, - bytes or dates. + The ``tag``, ``matchanytag``, ``shardingkey``, and ``supershardingkey`` + parameters are ignored in python-oracledb Thin mode. """ self._verify_open() @@ -1041,11 +1056,12 @@ def acquire( async def close(self, force: bool = False) -> None: """ - Close the pool now, rather than when the last reference to it is + Closes the pool now, rather than when the last reference to it is released, which makes it unusable for further work. If any connections have been acquired and not released back to the - pool, this method will fail unless the force parameter is set to True. + pool, this method will fail unless the ``force`` parameter is set to + *True*. """ self._verify_open() await self._impl.close(force) @@ -1055,8 +1071,8 @@ async def close(self, force: bool = False) -> None: async def drop(self, connection: "connection_module.Connection") -> None: """ - Drop the connection from the pool, which is useful if the connection is - no longer usable (such as when the database session is killed). + Drops the connection from the pool which is useful if the connection is + no longer usable (such as when the session is killed). """ self._verify_open() if not isinstance(connection, connection_module.AsyncConnection): @@ -1074,25 +1090,13 @@ async def release( tag: Optional[str] = None, ) -> None: """ - Release the connection back to the pool now, rather than whenever - __del__ is called. The connection will be unusable from this point - forward; an Error exception will be raised if any operation is - attempted with the connection. Any cursors or LOBs created by the - connection will also be marked unusable and an Error exception will be - raised if any operation is attempted with them. + Releases the connection back to the pool now. The connection will be + unusable from this point forward. An Error exception will be raised if + any operation is attempted with the connection. Any cursors or LOBs + created by the connection will also be marked unusable and an Error + exception will be raised if any operation is attempted with them. - Internally, references to the connection are held by cursor objects, - LOB objects, etc. Once all of these references are released, the - connection itself will be released back to the pool automatically. - Either control references to these related objects carefully or - explicitly release connections back to the pool in order to ensure - sufficient resources are available. - - If the tag is not None, it is expected to be a string with name=value - pairs like “k1=v1;k2=v2” and will override the value in the property - Connection.tag. If either Connection.tag or the tag parameter are not - None, the connection will be retagged when it is released back to the - pool. + The ``tag`` parameter is ignored in python-oracledb Thin mode. """ self._verify_open() if not isinstance(connection, connection_module.AsyncConnection): @@ -1425,7 +1429,7 @@ def create_pool_async( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string. This value is only used in thick mode (default: None) + should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -1545,8 +1549,11 @@ def get_pool( pool_alias: str, ) -> Union[ConnectionPool, AsyncConnectionPool, None]: """ - Returns the connection pool with the given alias from the python-oracledb - connection pool cache. If a pool with that alias does not exist, the value - "None" will be returned. + Returns a :ref:`ConnectionPool object ` from the python-oracledb + pool cache. The pool must have been previously created by passing the same + ``pool_alias`` value to :meth:`oracledb.create_pool()` or + :meth:`oracledb.create_pool_async()`. + + If a pool with the given name does not exist, *None* is returned. """ return named_pools.pools.get(pool_alias) diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index a73323b9..28214f60 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -308,8 +308,7 @@ def __init__( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string. This value is only used in thick mode - (default: None) + tuple should be a string (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick @@ -835,7 +834,7 @@ def set( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string. This value is only used in thick mode + tuple should be a string - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick diff --git a/src/oracledb/soda.py b/src/oracledb/soda.py index ef589591..1c5bffb2 100644 --- a/src/oracledb/soda.py +++ b/src/oracledb/soda.py @@ -29,7 +29,7 @@ # SodaDatabase, SodaCollection, SodaDocument, SodaDocCursor and SodaOperation. # ----------------------------------------------------------------------------- -from typing import Any, Union, List +from typing import Any, Optional, Union import json from . import errors @@ -66,7 +66,7 @@ def _create_doc_impl( def createCollection( self, name: str, - metadata: Union[str, dict] = None, + metadata: Optional[Union[str, dict]] = None, mapMode: bool = False, ) -> "SodaCollection": """ @@ -75,7 +75,7 @@ def createCollection( with the same name and metadata already exists, then that existing collection is opened without error. - If metadata is specified, it is expected to be a string containing + If ``metadata`` is specified, it is expected to be a string containing valid JSON or a dictionary that will be transformed into a JSON string. This JSON permits you to specify the configuration of the collection including storage options; specifying the presence or absence of @@ -84,10 +84,10 @@ def createCollection( key and version generation. The default metadata creates a collection that only supports JSON documents and uses system generated keys. - If the mapMode parameter is set to True, the new collection is mapped - to an existing table instead of creating a table. If a collection is - created in this way, dropping the collection will not drop the existing - table either. + If the ``mapMode`` parameter is set to *True*, the new collection is + mapped to an existing table instead of creating a table. If a + collection is created in this way, dropping the collection will not + drop the existing table either. """ if metadata is not None and not isinstance(metadata, str): metadata = json.dumps(metadata) @@ -97,46 +97,47 @@ def createCollection( def createDocument( self, content: Any, - key: str = None, + key: Optional[str] = None, mediaType: str = "application/json", ) -> "SodaDocument": """ Creates a SODA document usable for SODA write operations. You only need to use this method if your collection requires client-assigned keys or has non-JSON content; otherwise, you can pass your content directly to - SODA write operations. SodaDocument attributes "createdOn", - "lastModified" and "version" will be None. + SODA write operations. SodaDocument attributes + :attr:`~SodaDoc.createdOn`, :attr:`~SodaDoc.lastModified`, and + :attr:`~SodaDoc.version` will be *None*. - The content parameter can be a dictionary or list which will be + The ``content`` parameter can be a dictionary or list which will be transformed into a JSON string and then UTF-8 encoded. It can also be a string which will be UTF-8 encoded or it can be a bytes object which will be stored unchanged. If a bytes object is provided and the content - is expected to be JSON, note that SODA only supports UTF-8, UTF-16LE + is expected to be JSON, note that SODA only supports UTF-8, UTF-16LE, and UTF-16BE encodings. - The key parameter should only be supplied if the collection in which - the document is to be placed requires client-assigned keys. + The ``key`` parameter should only be supplied if the collection in + which the document is to be placed requires client-assigned keys. - The mediaType parameter should only be supplied if the collection in - which the document is to be placed supports non-JSON documents and the - content for this document is non-JSON. Using a standard MIME type for - this value is recommended but any string will be accepted. + The ``mediaType`` parameter should only be supplied if the collection + in which the document is to be placed supports non-JSON documents and + the content for this document is non-JSON. Using a standard MIME type + for this value is recommended but any string will be accepted. """ doc_impl = self._create_doc_impl(content, key, mediaType) return SodaDocument._from_impl(doc_impl) def getCollectionNames( - self, startName: str = None, limit: int = 0 - ) -> List[str]: + self, startName: Optional[str] = None, limit: int = 0 + ) -> list[str]: """ Returns a list of the names of collections in the database that match the criteria, in alphabetical order. - If the startName parameter is specified, the list of names returned + If the ``startName`` parameter is specified, the list of names returned will start with this value and also contain any names that fall after this value in alphabetical order. - If the limit parameter is specified and is non-zero, the number of + If the ``limit`` parameter is specified and is non-zero, the number of collection names returned will be limited to this value. """ return self._impl.get_collection_names(startName, limit) @@ -144,8 +145,8 @@ def getCollectionNames( def openCollection(self, name: str) -> "SodaCollection": """ Opens an existing collection with the given name and returns a new SODA - collection object. If a collection with that name does not exist, None - is returned. + collection object. If a collection with that name does not exist, + *None* is returned. """ collection_impl = self._impl.open_collection(name) if collection_impl is not None: @@ -167,8 +168,10 @@ def _process_doc_arg(self, arg): def createIndex(self, spec: Union[dict, str]) -> None: """ - Creates an index on a SODA collection. The spec is expected to be a - dictionary or a JSON-encoded string. + Creates an index on a SODA collection. + + The ``spec`` parameter is expected to be a dictionary or a JSON-encoded + string. Note that a commit should be performed before attempting to create an index. @@ -182,8 +185,8 @@ def createIndex(self, spec: Union[dict, str]) -> None: def drop(self) -> bool: """ Drops the collection from the database, if it exists. Note that if the - collection was created with mapMode set to True the underlying table - will not be dropped. + collection was created with ``mapMode`` set to *True*, the underlying + table will not be dropped. A boolean value is returned indicating if the collection was actually dropped. @@ -194,10 +197,10 @@ def dropIndex(self, name: str, force: bool = False) -> bool: """ Drops the index with the specified name, if it exists. - The force parameter, if set to True, can be used to force the dropping - of an index that the underlying Oracle Database domain index doesn’t - normally permit. This is only applicable to spatial and JSON search - indexes. See here for more information. + The force parameter, if set to *True*, can be used to force the + dropping of an index that the underlying Oracle Database domain index + does not normally permit. This is only applicable to spatial and JSON + search indexes. A boolean value is returned indicating if the index was actually dropped. @@ -206,21 +209,21 @@ def dropIndex(self, name: str, force: bool = False) -> bool: def find(self) -> "SodaOperation": """ - This method is used to begin an operation that will act upon documents - in the collection. It creates and returns a SodaOperation object which - is used to specify the criteria and the operation that will be - performed on the documents that match that criteria. + Begins an operation that will act upon documents in the collection. It + creates and returns a SodaOperation object which is used to specify the + criteria and the operation that will be performed on the documents that + match that criteria. """ return SodaOperation(self) def getDataGuide(self) -> "SodaDocument": """ - Returns a SODA document object containing property names, data types + Returns a SODA document object containing property names, data types, and lengths inferred from the JSON documents in the collection. It can be useful for exploring the schema of a collection. Note that this method is only supported for JSON-only collections where a JSON search index has been created with the ‘dataguide’ option enabled. If there - are no documents in the collection, None is returned. + are no documents in the collection, *None* is returned. """ doc_impl = self._impl.get_data_guide() if doc_impl is not None: @@ -231,24 +234,31 @@ def insertMany(self, docs: list) -> None: Inserts a list of documents into the collection at one time. Each of the input documents can be a dictionary or list or an existing SODA document object. + + This method requires Oracle Client 18.5 (or later) and is available + only as a preview. """ doc_impls = [self._process_doc_arg(d) for d in docs] self._impl.insert_many(doc_impls, hint=None, return_docs=False) - def insertManyAndGet(self, docs: list, hint: str = None) -> list: + def insertManyAndGet( + self, docs: list, hint: Optional[str] = None + ) -> list["SodaDocument"]: """ - Similarly to insertMany() this method inserts a list of documents into - the collection at one time. The only difference is that it returns a - list of SODA Document objects. Note that for performance reasons the - returned documents do not contain the content. + Similar to :meth:`SodaCollection.insertMany()`, this method inserts a + list of documents into the collection at one time. The only difference + is that it returns a list of SODA Document objects. Note that for + performance reasons the returned documents do not contain the content. - The hint parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the - same format as SQL hints but without any comment characters, for + The ``hint`` parameter, if specified, supplies a hint to the database + when processing the SODA operation. This is expected to be a string in + the same format as SQL hints but without any comment characters, for example hint="MONITOR". While you could use this to pass any SQL hint, the hints MONITOR (turn on monitoring) and NO_MONITOR (turn off - monitoring) are the most useful. Use of the hint parameter requires - Oracle Client 21.3 or higher (or Oracle Client 19 from 19.11). + monitoring) are the most useful. Use of the ``hint`` parameter requires + Oracle Client 21.3 or later (or Oracle Client 19 from 19.11). + + This method requires Oracle Client 18.5 (or later). """ doc_impls = [self._process_doc_arg(d) for d in docs] if hint is not None and not isinstance(hint, str): @@ -266,20 +276,22 @@ def insertOne(self, doc: Any) -> None: doc_impl = self._process_doc_arg(doc) self._impl.insert_one(doc_impl, hint=None, return_doc=False) - def insertOneAndGet(self, doc: Any, hint: str = None) -> "SodaDocument": + def insertOneAndGet( + self, doc: Any, hint: Optional[str] = None + ) -> "SodaDocument": """ - Similarly to insertOne() this method inserts a given document into the - collection. The only difference is that it returns a SODA Document - object. Note that for performance reasons the returned document does - not contain the content. + Similar to :meth:`~SodaCollection.insertOne()`, this method inserts a + given document into the collection. The only difference is that it + returns a SODA Document object. Note that for performance reasons the + returned document does not contain the content. - The hint parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the - same format as SQL hints but without any comment characters, for + The ``hint`` parameter, if specified, supplies a hint to the database + when processing the SODA operation. This is expected to be a string in + the same format as SQL hints but without any comment characters, for example hint="MONITOR". While you could use this to pass any SQL hint, the hints MONITOR (turn on monitoring) and NO_MONITOR (turn off - monitoring) are the most useful. Use of the hint parameter requires - Oracle Client 21.3 or higher (or Oracle Client 19 from 19.11). + monitoring) are the most useful. Use of the ``hint`` parameter requires + Oracle Client 21.3 or later (or Oracle Client 19 from 19.11). """ doc_impl = self._process_doc_arg(doc) if hint is not None and not isinstance(hint, str): @@ -291,7 +303,11 @@ def insertOneAndGet(self, doc: Any, hint: str = None) -> "SodaDocument": def listIndexes(self) -> list: """ - Return a list of indexes associated with the collection. + Returns a list of specifications for the indexes found on the + collection. + + This method requires Oracle Client 21.3 or later (or Oracle Client 19 + from 19.13). """ return [json.loads(s) for s in self._impl.list_indexes()] @@ -313,27 +329,36 @@ def name(self) -> str: def save(self, doc: Any) -> None: """ Saves a document into the collection. This method is equivalent to - insertOne() except that if client-assigned keys are used, and the - document with the specified key already exists in the collection, it - will be replaced with the input document. + :meth:`~SodaCollection.insertOne()` except that if client-assigned keys + are used, and the document with the specified key already exists in the + collection, it will be replaced with the input document. + + This method requires Oracle Client 19.9 (or later) in addition to the + usual SODA requirements. """ doc_impl = self._process_doc_arg(doc) self._impl.save(doc_impl, hint=None, return_doc=False) - def saveAndGet(self, doc: Any, hint: str = None) -> "SodaDocument": + def saveAndGet( + self, doc: Any, hint: Optional[str] = None + ) -> "SodaDocument": """ Saves a document into the collection. This method is equivalent to - insertOneAndGet() except that if client-assigned keys are used, and the - document with the specified key already exists in the collection, it - will be replaced with the input document. - - The hint parameter, if specified, supplies a hint to the database when - processing the SODA operation. This is expected to be a string in the - same format as SQL hints but without any comment characters, for + :meth:`~SodaCollection.insertOneAndGet()` except that if + client-assigned keys are used, and the document with the specified key + already exists in the collection, it will be replaced with the input + document. + + The ``hint`` parameter, if specified, supplies a hint to the database + when processing the SODA operation. This is expected to be a string in + the same format as SQL hints but without any comment characters, for example hint="MONITOR". While you could use this to pass any SQL hint, the hints MONITOR (turn on monitoring) and NO_MONITOR (turn off - monitoring) are the most useful. Use of the hint parameter requires - Oracle Client 21.3 or higher (or Oracle Client 19 from 19.11). + monitoring) are the most useful. Use of the ``hint`` parameter requires + Oracle Client 21.3 or later (or Oracle Client 19 from 19.11). + + This method requires Oracle Client 19.9 (or later) in addition to the + usual SODA requirements. """ doc_impl = self._process_doc_arg(doc) if hint is not None and not isinstance(hint, str): @@ -360,9 +385,9 @@ def _from_impl(cls, impl): def createdOn(self) -> str: """ This read-only attribute returns the creation time of the document in - ISO 8601 format. Documents created by SodaDatabase.createDocument() or - fetched from collections where this attribute is not stored will return - None. + ISO 8601 format. Documents created by + :meth:`SodaDatabase.createDocument()` or fetched from collections where + this attribute is not stored will return *None*. """ return self._impl.get_created_on() @@ -371,7 +396,7 @@ def getContent(self) -> Union[dict, list]: Returns the content of the document as a dictionary or list. This method assumes that the content is application/json and will raise an exception if this is not the case. If there is no content, however, - None will be returned. + *None* will be returned. """ content, encoding = self._impl.get_content() if isinstance(content, bytes) and self.mediaType == "application/json": @@ -381,7 +406,7 @@ def getContent(self) -> Union[dict, list]: def getContentAsBytes(self) -> bytes: """ Returns the content of the document as a bytes object. If there is no - content, however, None will be returned. + content, however, *None* will be returned. """ content, encoding = self._impl.get_content() if isinstance(content, bytes): @@ -393,7 +418,7 @@ def getContentAsString(self) -> str: """ Returns the content of the document as a string. If the document encoding is not known, UTF-8 will be used. If there is no content, - however, None will be returned. + however, *None* will be returned. """ content, encoding = self._impl.get_content() if isinstance(content, bytes): @@ -405,8 +430,8 @@ def getContentAsString(self) -> str: def key(self) -> str: """ This read-only attribute returns the unique key assigned to this - document. Documents created by SodaDatabase.createDocument() may not - have a value assigned to them and return None. + document. Documents created by :meth:`SodaDatabase.createDocument()` + may not have a value assigned to them and return *None*. """ return self._impl.get_key() @@ -414,9 +439,9 @@ def key(self) -> str: def lastModified(self) -> str: """ This read-only attribute returns the last modified time of the document - in ISO 8601 format. Documents created by SodaDatabase.createDocument() - or fetched from collections where this attribute is not stored will - return None. + in ISO 8601 format. Documents created by + :meth:`SodaDatabase.createDocument()` or fetched from collections where + this attribute is not stored will return *None*. """ return self._impl.get_last_modified() @@ -426,9 +451,9 @@ def mediaType(self) -> str: This read-only attribute returns the media type assigned to the document. By convention this is expected to be a MIME type but no checks are performed on this value. If a value is not specified when - calling SodaDatabase.createDocument() or the document is fetched from a - collection where this component is not stored, the string - “application/json” is returned. + calling :meth:`SodaDatabase.createDocument()` or the document is + fetched from a collection where this component is not stored, the + string “application/json” is returned. """ return self._impl.get_media_type() @@ -436,8 +461,8 @@ def mediaType(self) -> str: def version(self) -> str: """ This read-only attribute returns the version assigned to this document. - Documents created by SodaDatabase.createDocument() or fetched from - collections where this attribute is not stored will return None. + Documents created by :meth:`SodaDatabase.createDocument()` or fetched + from collections where this attribute is not stored will return *None*. """ return self._impl.get_version() @@ -462,7 +487,7 @@ def _from_impl(cls, impl): def close(self) -> None: """ - Close the cursor now, rather than whenever __del__ is called. The + Closes the cursor now, rather than whenever __del__ is called. The cursor will be unusable from this point forward; an Error exception will be raised if any operation is attempted with the cursor. """ @@ -488,7 +513,8 @@ def __init__(self, collection: SodaCollection) -> None: def count(self) -> int: """ Returns a count of the number of documents in the collection that match - the criteria. If skip() or limit() were called on this object, an + the criteria. If :meth:`~SodaOperation.skip()` or + :meth:`~SodaOperation.limit()` were called on this object, an exception is raised. """ return self._collection._impl.get_count(self) @@ -496,13 +522,18 @@ def count(self) -> int: def fetchArraySize(self, value: int) -> "SodaOperation": """ This is a tuning method to specify the number of documents that are - internally fetched in batches by calls to getCursor() and - getDocuments(). It does not affect how many documents are returned to - the application. A value of 0 will use the default value (100). This - method is only available in Oracle Client 19.5 and higher. + internally fetched in batches by calls to + :meth:`~SodaOperation.getCursor()` and + :meth:`~SodaOperation.getDocuments()`. It does not affect how many + documents are returned to the application. + + If ``fetchArraySize()`` is not used, or the ``value`` parameter is *0*, + the array size will default to *100*. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. + + This method is only available when using Oracle Client 19.5, or later. """ if not isinstance(value, int) or value < 0: raise TypeError("expecting integer >= 0") @@ -517,8 +548,7 @@ def filter(self, value: Union[dict, str]) -> "SodaOperation": Sets a filter specification for complex document queries and ordering of JSON documents. Filter specifications must be provided as a dictionary or JSON-encoded string and can include comparisons, regular - expressions, logical and spatial operators, among others. See the - overview of SODA filter specifications for more information. + expressions, logical and spatial operators, among others. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -539,7 +569,7 @@ def getCursor(self) -> "SodaDocCursor": impl = self._collection._impl.get_cursor(self) return SodaDocCursor._from_impl(impl) - def getDocuments(self) -> list: + def getDocuments(self) -> list["SodaDocument"]: """ Returns a list of SodaDocument objects that match the criteria. """ @@ -562,7 +592,7 @@ def hint(self, value: str) -> "SodaOperation": hints but without any comment characters. While you could use this to pass any SQL hint, the hints MONITOR (turn on monitoring) and NO_MONITOR (turn off monitoring) are the most useful. Use of this - method requires Oracle Client 21.3 or higher (or Oracle Client 19 from + method requires Oracle Client 21.3 or later (or Oracle Client 19 from 19.11). As a convenience, the SodaOperation object is returned so that further @@ -577,7 +607,20 @@ def lock(self) -> "SodaOperation": """ Specifies whether the documents fetched from the collection should be locked (equivalent to SQL "select for update"). Use of this method - requires Oracle Client 21.3 or higher (or Oracle Client 19 from 19.11). + requires Oracle Client 21.3 or later (or Oracle Client 19 from 19.11). + + The next commit or rollback on the connection made after the operation + is performed will "unlock" the documents. Ensure that the connection is + not in autocommit mode or the documents will be unlocked immediately + after the operation is complete. + + This method should only be used with read operations (other than + :func:`~SodaOperation.count()`) and should not be used in conjunction + with non-terminal methods :meth:`~SodaOperation.skip()` and + :meth:`~SodaOperation.limit()`. + + If this method is specified in conjunction with a write operation, this + method is ignored. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -588,8 +631,8 @@ def lock(self) -> "SodaOperation": def key(self, value: str) -> "SodaOperation": """ Specifies that the document with the specified key should be returned. - This causes any previous calls made to this method and keys() to be - ignored. + This causes any previous calls made to this method and + :meth:`~SodaOperation.keys()` to be ignored. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -604,7 +647,7 @@ def keys(self, value: list) -> "SodaOperation": """ Specifies that documents that match the keys found in the supplied sequence should be returned. This causes any previous calls made to - this method and key() to be ignored. + this method and :meth:`~SodaOperation.key()` to be ignored. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -621,8 +664,9 @@ def limit(self, value: int) -> "SodaOperation": """ Specifies that only the specified number of documents should be returned. This method is only usable for read operations such as - getCursor() and getDocuments(). For write operations, any value set - using this method is ignored. + :meth:`~SodaOperation.getCursor()` and + :meth:`~SodaOperation.getDocuments()`. For write operations, any value + set using this method is ignored. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -646,8 +690,8 @@ def replaceOne(self, doc: Any) -> bool: SODA document object. A boolean indicating if a document was replaced or not is returned. - Currently the method key() must be called before this method can be - called. + Currently, the method :meth:`~SodaOperation.key()` must be called + before this method can be called. """ doc_impl = self._collection._process_doc_arg(doc) return self._collection._impl.replace_one( @@ -656,10 +700,10 @@ def replaceOne(self, doc: Any) -> bool: def replaceOneAndGet(self, doc: Any) -> "SodaDocument": """ - Similarly to replaceOne(), this method replaces a single document in - the collection with the specified document. The only difference is that - it returns a SodaDocument object. Note that for performance reasons the - returned document does not contain the content. + Similar to :meth:`~SodaOperation.replaceOne()`, this method replaces a + single document in the collection with the specified document. The only + difference is that it returns a SodaDocument object. Note that for + performance reasons the returned document does not contain the content. """ doc_impl = self._collection._process_doc_arg(doc) return_doc_impl = self._collection._impl.replace_one( @@ -671,8 +715,9 @@ def skip(self, value: int) -> "SodaOperation": """ Specifies the number of documents that match the other criteria that will be skipped. This method is only usable for read operations such as - getCursor() and getDocuments(). For write operations, any value set - using this method is ignored. + :meth:`~SodaOperation.getOne()`, :meth:`~SodaOperation.getCursor()`, + and :meth:`~SodaOperation.getDocuments()`. For write operations, any + value set using this method is ignored. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. @@ -685,9 +730,9 @@ def skip(self, value: int) -> "SodaOperation": def version(self, value: str) -> "SodaOperation": """ Specifies that documents with the specified version should be returned. - Typically this is used with key() to implement optimistic locking, so - that the write operation called later does not affect a document that - someone else has modified. + Typically this is used with :meth:`~SodaOperation.key()` to implement + optimistic locking, so that the write operation called later does not + affect a document that someone else has modified. As a convenience, the SodaOperation object is returned so that further criteria can be specified by chaining methods together. diff --git a/src/oracledb/sparse_vector.py b/src/oracledb/sparse_vector.py index 5cbb2b6a..61d64012 100644 --- a/src/oracledb/sparse_vector.py +++ b/src/oracledb/sparse_vector.py @@ -51,6 +51,17 @@ def __init__( indices: Union[list, array.array], values: Union[list, array.array], ): + """ + Creates and returns a :ref:`SparseVector object `. + + The ``num_dimensions`` parameter is the number of dimensions contained + in the vector. + + The ``indices`` parameter is the indices (zero-based) of non-zero + values in the vector. + + The ``values`` parameter is the non-zero values stored in the vector. + """ if ( not isinstance(indices, array.array) or indices.typecode != ARRAY_TYPE_CODE_UINT32 diff --git a/src/oracledb/subscr.py b/src/oracledb/subscr.py index bc6bbbc2..bc685a36 100644 --- a/src/oracledb/subscr.py +++ b/src/oracledb/subscr.py @@ -30,7 +30,7 @@ # events are detected. # ----------------------------------------------------------------------------- -from typing import Callable, Union, List +from typing import Callable, Optional, Union from . import connection @@ -47,34 +47,35 @@ def _from_impl(cls, impl): @property def callback(self) -> Callable: """ - Returns the callback that was registered when the subscription was - created. + This read-only attribute returns the callback that was registered when + the subscription was created. """ return self._impl.callback @property def connection(self) -> "connection.Connection": """ - Returns the connection that was used to register the subscription when - it was created. + This read-only attribute returns the connection that was used to + register the subscription when it was created. """ return self._impl.connection @property def id(self) -> int: """ - Returns the value of REGID found in the database view - USER_CHANGE_NOTIFICATION_REGS or the value of REG_ID found in the - database view USER_SUBSCR_REGISTRATIONS. For AQ subscriptions, this - value is 0. + This read-only attribute returns the value of REGID found in the + database view USER_CHANGE_NOTIFICATION_REGS or the value of REG_ID + found in the database view USER_SUBSCR_REGISTRATIONS. For AQ + subscriptions, this value is *0*. """ return self._impl.id @property def ip_address(self) -> str: """ - Returns the IP address used for callback notifications from the - database server. If not set during construction, this value is None. + This read-only attribute returns the IP address used for callback + notifications from the database server. If not set during construction, + this value is *None*. """ return self._impl.ip_address @@ -88,60 +89,65 @@ def ipAddress(self) -> str: @property def name(self) -> str: """ - Returns the name used to register the subscription when it was created. + This read-only attribute returns the name used to register the + subscription when it was created. """ return self._impl.name @property def namespace(self) -> int: """ - Returns the namespace used to register the subscription when it was - created. + This read-only attribute returns the namespace used to register the + subscription when it was created. """ return self._impl.namespace @property def operations(self) -> int: """ - Returns the operations that will send notifications for each table or - query that is registered using this subscription. + This read-only attribute returns the operations that will send + notifications for each table or query that is registered using this + subscription. """ return self._impl.operations @property def port(self) -> int: """ - Returns the port used for callback notifications from the database - server. If not set during construction, this value is zero. + This read-only attribute returns the port used for callback + notifications from the database server. If not set during + construction, this value is *0*. """ return self._impl.port @property def protocol(self) -> int: """ - Returns the protocol used to register the subscription when it was - created. + This read-only attribute returns the protocol used to register the + subscription when it was created. """ return self._impl.protocol @property def qos(self) -> int: """ - Returns the quality of service flags used to register the subscription - when it was created. + This read-only attribute returns the quality of service flags used to + register the subscription when it was created. """ return self._impl.qos def registerquery( - self, statement: str, args: Union[list, dict] = None + self, statement: str, args: Optional[Union[list, dict]] = None ) -> int: """ - Register the query for subsequent notification when tables referenced - by the query are changed. This behaves similarly to cursor.execute() - but only queries are permitted and the args parameter, if specified, - must be a sequence or dictionary. If the qos parameter included the - flag SUBSCR_QOS_QUERY when the subscription was created, then the ID - for the registered query is returned; otherwise, None is returned. + Registers the query for subsequent notification when tables referenced + by the query are changed. This behaves similarly to + :meth:`Cursor.execute()` but only queries are permitted and the + ``args`` parameter, if specified, must be a sequence or dictionary. If + the ``qos`` parameter included the flag + :data:`oracledb.SUBSCR_QOS_QUERY` when the subscription was created, + then the ID for the registered query is returned; otherwise, *None* is + returned. """ if args is not None and not isinstance(args, (list, dict)): raise TypeError("expecting args to be a dictionary or list") @@ -150,9 +156,9 @@ def registerquery( @property def timeout(self) -> int: """ - Returns the timeout (in seconds) that was specified when the - subscription was created. A value of 0 indicates that there is no - timeout. + This read-only attribute returns the timeout (in seconds) that was + specified when the subscription was created. A value of *0* indicates + that there is no timeout. """ return self._impl.timeout @@ -173,9 +179,11 @@ def __init__(self, subscription: Subscription) -> None: @property def consumer_name(self) -> Union[str, None]: """ - Returns the name of the consumer which generated the notification. It - will be populated if the subscription was created with the namespace - SUBSCR_NAMESPACE_AQ and the queue is a multiple consumer queue. + This read-only attribute returns the name of the consumer which + generated the notification. It will be populated if the + subscription was created with the namespace + :data:`oracledb.SUBSCR_NAMESPACE_AQ` and the queue is a multiple + consumer queue. """ return self._consumer_name @@ -189,34 +197,38 @@ def consumerName(self) -> Union[str, None]: @property def dbname(self) -> Union[str, None]: """ - Returns the name of the database that generated the notification. + This read-only attribute returns the name of the database that + generated the notification. """ return self._dbname @property def msgid(self) -> Union[bytes, None]: """ - Returns the message id of the AQ message that generated the - notification. + This read-only attribute returns the message id of the AQ message that + generated the notification. It will only be populated if the + subscription was created with the namespace + :data:`oracledb.SUBSCR_NAMESPACE_AQ`. """ return self._msgid @property - def queries(self) -> List["MessageQuery"]: + def queries(self) -> list["MessageQuery"]: """ - Returns a list of message query objects that give information about - query result sets changed for this notification. This attribute will be - an empty list if the qos parameter did not include the flag - SUBSCR_QOS_QUERY when the subscription was created. + This read-only attribute returns a list of message query objects that + give information about query result sets changed for this notification. + This attribute will be an empty list if the ``qos`` parameter did not + include the flag :data:`~oracledb.SUBSCR_QOS_QUERY` when the + subscription was created. """ return self._queries @property def queue_name(self) -> Union[str, None]: """ - Returns the name of the queue which generated the notification. It will - only be populated if the subscription was created with the namespace - SUBSCR_NAMESPACE_AQ. + This read-only attribute returns the name of the queue which generated + the notification. It will only be populated if the subscription was + created with the namespace :data:`oracledb.SUBSCR_NAMESPACE_AQ`. """ return self._queue_name @@ -230,43 +242,47 @@ def queueName(self) -> Union[str, None]: @property def registered(self) -> bool: """ - Returns whether the subscription which generated this notification is - still registered with the database. The subscription is automatically - deregistered with the database when the subscription timeout value is - reached or when the first notification is sent (when the quality of - service flag SUBSCR_QOS_DEREG_NFY is used). + This read-only attribute returns whether the subscription which + generated this notification is still registered with the database. The + subscription is automatically deregistered with the database when the + subscription timeout value is reached or when the first notification is + sent (when the quality of service flag + :data:`oracledb.SUBSCR_QOS_DEREG_NFY` is used). """ return self._registered @property def subscription(self) -> Subscription: """ - Returns the subscription object for which this notification was - generated. + This read-only attribute returns the subscription object for which this + notification was generated. """ return self._subscription @property - def tables(self) -> List["MessageTable"]: + def tables(self) -> list["MessageTable"]: """ - Returns a list of message table objects that give information about the - tables changed for this notification. This attribute will be an empty - list if the qos parameter included the flag SUBSCR_QOS_QUERY when the - subscription was created. + This read-only attribute returns a list of message table objects that + give information about the tables changed for this notification. This + attribute will be an empty list if the ``qos`` parameter included the + flag :data:`~oracledb.SUBSCR_QOS_QUERY` when the subscription was + created. """ return self._tables @property def txid(self) -> Union[bytes, None]: """ - Returns the id of the transaction that generated the notification. + This read-only attribute returns the id of the transaction that + generated the notification. """ return self._txid @property def type(self) -> int: """ - Returns the type of message that has been sent. + This read-only attribute returns the type of message that has been + sent. """ return self._type @@ -280,27 +296,28 @@ def __init__(self) -> None: @property def id(self) -> int: """ - Returns the query id of the query for which the result set changed. The - value will match the value returned by Subscription.registerquery() - when the related query was registered. + This read-only attribute returns the query id of the query for which + the result set changed. The value will match the value returned by + :meth:`Subscription.registerquery()` when the related query was + registered. """ return self._id @property def operation(self) -> int: """ - Returns the operation that took place on the query result set that was - changed. Valid values for this attribute are EVENT_DEREG and - EVENT_QUERYCHANGE. + This read-only attribute returns the operation that took place on the + query result set that was changed. Valid values for this attribute are + :data:`~oracledb.EVENT_DEREG` and :data:`~oracledb.EVENT_QUERYCHANGE`. """ return self._operation @property - def tables(self) -> List["MessageTable"]: + def tables(self) -> list["MessageTable"]: """ - Returns a list of message table objects that give information about the - table changes that caused the query result set to change for this - notification. + This read-only attribute returns a list of message table objects that + give information about the table changes that caused the query result + set to change for this notification. """ return self._tables @@ -313,14 +330,15 @@ def __init__(self) -> None: @property def operation(self) -> int: """ - Returns the operation that took place on the row that was changed. + This read-only attribute returns the operation that took place on the + row that was changed. """ return self._operation @property def rowid(self) -> Union[str, None]: """ - Returns the rowid of the row that was changed. + This read-only attribute returns the rowid of the row that was changed. """ return self._rowid @@ -334,23 +352,26 @@ def __init__(self) -> None: @property def name(self) -> Union[str, None]: """ - Returns the name of the table that was changed. + This read-only attribute returns the name of the table that was + changed. """ return self._name @property def operation(self) -> int: """ - Returns the operation that took place on the table that was changed. + This read-only attribute returns the operation that took place on the + table that was changed. """ return self._operation @property - def rows(self) -> List["MessageRow"]: + def rows(self) -> list["MessageRow"]: """ - Returns a list of message row objects that give information about the - rows changed on the table. This value is only filled in if the qos - parameter to the Connection.subscribe() method included the flag - SUBSCR_QOS_ROWIDS. + This read-only attribute returns a list of message row objects that + give information about the rows changed on the table. This value is + only filled in if the ``qos`` parameter to the + :meth:`Connection.subscribe()` method included the flag + :data:`~oracledb.SUBSCR_QOS_ROWIDS`. """ return self._rows diff --git a/src/oracledb/utils.py b/src/oracledb/utils.py index 134935aa..fd0c49ae 100644 --- a/src/oracledb/utils.py +++ b/src/oracledb/utils.py @@ -28,6 +28,7 @@ # Contains utility classes and methods. # ----------------------------------------------------------------------------- +import functools from typing import Any, Callable, Optional, Union from .arrow_array import ArrowArray @@ -36,22 +37,31 @@ from . import base_impl from . import driver_mode from . import errors +from . import thick_impl import uuid +def clientversion() -> tuple: + """ + This function can only be called when python-oracledb is in Thick mode. + Using it in Thin mode will throw an exception. + """ + return thick_impl.clientversion() + + def enable_thin_mode(): """ Makes python-oracledb be in Thin mode. After this method is called, Thick mode cannot be enabled. If python-oracledb is already in Thick mode, then - calling ``enable_thin_mode()`` will fail. If connections have already been - opened, or a connection pool created, in Thin mode, then calling - ``enable_thin_mode()`` is a no-op. + calling ``enable_thin_mode()`` will fail. If Thin mode connections have + already been opened, or a connection pool created in Thin mode, then + calling ``enable_thin_mode()`` is a no-op. Since python-oracledb defaults to Thin mode, almost all applications do not need to call this method. However, because it bypasses python-oracledb's internal mode-determination heuristic, it may be useful for applications - that are using standalone connections in multiple threads to concurrently - create connections when the application starts. + with multiple threads that concurrently create :ref:`standalone connections + ` when the application starts. """ with driver_mode.get_manager(requested_thin_mode=True): pass @@ -59,9 +69,15 @@ def enable_thin_mode(): def from_arrow(obj: Any) -> Union[DataFrame, ArrowArray]: """ - Uses the Arrow PyCapsule interface to return either a DataFrame or - ArrowArray object, depending on what interface is supported by the object - that is supplied to the function. + This method converts a data frame to a + :ref:`DataFrame ` or + :ref:`ArrowArray ` instance. + + If ``obj`` supports the Arrow PyCapsule interface ``__arrow_c_stream__`` + method, then ``from_arrow()`` returns the instance as a :ref:`DataFrame + `. If ``obj`` does not support that method, but does + support ``__arrow_c_array__``, then an :ref:`ArrowArray + ` is returned. """ if hasattr(obj, "__arrow_c_stream__"): return DataFrame._from_arrow(obj) @@ -71,6 +87,100 @@ def from_arrow(obj: Any) -> Union[DataFrame, ArrowArray]: raise ValueError(msg) +def init_oracle_client( + lib_dir: Optional[Union[str, bytes]] = None, + config_dir: Optional[Union[str, bytes]] = None, + error_url: Optional[str] = None, + driver_name: Optional[str] = None, +): + """ + Enables python-oracledb Thick mode by initializing the Oracle Client + library, see :ref:`enablingthick`. If a standalone connection or pool has + already been created in Thin mode, ``init_oracle_client()`` will raise an + exception and python-oracledb will remain in Thin mode. + + If a standalone connection or pool has *not* already been created in Thin + mode, but ``init_oracle_client()`` raises an exception, python-oracledb + will remain in Thin mode but further calls to ``init_oracle_client()`` can + be made, if desired. + + The ``init_oracle_client()`` method can be called multiple times in each + Python process as long as the arguments are the same each time. + + The ``lib_dir`` parameter is a string or a bytes object that specifies the + directory containing Oracle Client libraries. If the ``lib_dir`` parameter + is set, then the specified directory is the only one searched for the + Oracle Client libraries; otherwise, the operating system library search + path is used to locate the Oracle Client library. If you are using Python + 3.11 and later, then the value specified in this parameter is encoded + using `locale.getencoding() `__. For all other Python versions, the encoding + "utf-8" is used. If a bytes object is specified in this parameter, then + this value will be used as is without any encoding. + + The ``config_dir`` parameter is a string or a bytes object that specifies + the directory in which the + :ref:`Optional Oracle Net Configuration ` and + :ref:`Optional Oracle Client Configuration ` files reside. + If the ``config_dir`` parameter is set, then the specified directory is + used to find Oracle Client library configuration files. This is + equivalent to setting the environment variable ``TNS_ADMIN`` and overrides + any value already set in ``TNS_ADMIN``. If this parameter is not set, the + :ref:`Oracle standard ` way of locating Oracle Client + library configuration files is used. If you are using Python 3.11 and + later, then the value specified in this parameter is encoded using + `locale.getencoding() `__. For all other Python versions, the encoding + "utf-8" is used. If a bytes object is specified in this parameter, then + this value will be used as is without any encoding. + + The ``error_url`` parameter is a string that specifies the URL which is + included in the python-oracledb exception message if the Oracle Client + libraries cannot be loaded. If the ``error_url`` parameter is set, then + the specified value is included in the message of the exception raised + when the Oracle Client library cannot be loaded; otherwise, the + :ref:`installation` URL is included. This parameter lets your application + display custom installation instructions. + + The ``driver_name`` parameter is a string that specifies the driver name + value. If the ``driver_name`` parameter is set, then the specified value + can be found in database views that give information about connections. + For example, it is in the CLIENT_DRIVER column of the + V$SESSION_CONNECT_INFO view. From Oracle Database 12.2, the name displayed + can be 30 characters. The standard is to set this value to ``" : + version>"``, where is the name of the driver and is its + version. There should be a single space character before and after the + colon. If this parameter is not set, then the value specified in + :attr:`oracledb.defaults.driver_name ` is used. If + the value of this attribute is *None*, then the default value in + python-oracledb Thick mode is like "python-oracledb thk : ". See + :ref:`otherinit`. + + At successful completion of a call to ``oracledb.init_oracle_client()``, + the attribute :attr:`defaults.config_dir` will be set as determined below + (first one wins): + + - the value of the ``oracledb.init_oracle_client()`` parameter + ``config_dir``, if one was passed. + + - the value of :attr:`defaults.config_dir` if it has one. I.e. + :attr:`defaults.config_dir` remains unchanged after + ``oracledb.init_oracle_client()`` completes. + + - the value of the environment variable ``$TNS_ADMIN``, if it is set. + + - the value of ``$ORACLE_HOME/network/admin`` if the environment variable + ``$ORACLE_HOME`` is set. + + - the directory of the loaded Oracle Client library, appended with + ``network/admin``. Note this directory is not determinable on AIX. + + - otherwise the value *None* is used. (Leaving :attr:`defaults.config_dir` + unchanged). + """ + thick_impl.init_oracle_client(lib_dir, config_dir, error_url, driver_name) + + def normalize_sessionless_transaction_id( value: Optional[Union[bytes, str]] = None, ) -> bytes: @@ -109,6 +219,7 @@ class stored on the parameter class. It first, however, calls the original original method itself does nothing). """ + @functools.wraps(f) def wrapped_f(self, *args, **kwargs): f(self, *args, **kwargs) self._impl = self._impl_class() @@ -127,6 +238,7 @@ def params_setter(f): original method itself does nothing). """ + @functools.wraps(f) def wrapped_f(self, *args, **kwargs): f(self, *args, **kwargs) self._impl.set(kwargs) @@ -136,12 +248,16 @@ def wrapped_f(self, *args, **kwargs): def register_params_hook(hook_function: Callable) -> None: """ - Registers a user function to be called internally prior to connection or - pool creation. The hook function accepts a copy of the parameters that will - be used to create the pool or standalone connection and may modify them. - For example, the cloud native authentication plugins modify the - "access_token" parameter with a function that will acquire the token using - information found in the "extra_auth_parms" parameter. + Registers a user parameter hook function that will be called internally by + python-oracledb prior to connection or pool creation. The hook function + accepts a copy of the parameters that will be used to create the pool or + standalone connection and may modify them. For example, the cloud native + authentication plugins modify the "access_token" parameter with a function + that will acquire the token using information found in the + "extra_auth_parms" parameter. + + Multiple hooks may be registered. They will be invoked in order of + registration. """ if hook_function is None or not callable(hook_function): raise TypeError("hook_function must be a callable and cannot be None") @@ -152,10 +268,20 @@ def register_password_type( password_type: str, hook_function: Callable ) -> None: """ - Registers a user function to be called when a password is provided as a - dictionary containing a key "type" with the specified value. The hook - function is expected to use the dictionary and return the password value. - If the supplied function is None, the registration is removed. + Registers a user password hook function that will be called internally by + python-oracledb when a password is supplied as a dictionary containing the + given ``password_type`` as the key "type". The hook function is called for + passwords specified as the ``password``, ``newpassword`` and + ``wallet_parameter`` parameters in calls to :meth:`oracledb.connect()`, + :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and + :meth:`oracledb.create_pool_async()`. + + Your hook function is expected to accept the dictionary supplied by the + application and return the valid password. + + Calling :meth:`~oracledb.register_password_type()` with the + ``hook_function`` parameter set to *None* will result in a previously + registered user function being removed and the default behavior restored. """ if not isinstance(password_type, str): raise TypeError("password_type must be a string") @@ -170,13 +296,66 @@ def register_password_type( def register_protocol(protocol: str, hook_function: Callable) -> None: """ - Registers a user function to be called prior to connection or pool creation - when an Easy Connect connection string prefixed with the specified protocol - is being parsed internally by python-oracledb in Thin mode. The registered - function will also be invoked by ConnectParams.parse_connect_string() in - Thin and Thick modes. Your hook function is expected to find or construct a - valid connection string. If the supplied function is None, the registration - is removed. + Registers a user protocol hook function that will be called internally by + python-oracledb Thin mode prior to connection or pool creation. The hook + function will be invoked when :func:`oracledb.connect`, + :func:`oracledb.create_pool`, :meth:`oracledb.connect_async()`, or + :meth:`oracledb.create_pool_async()` are called with a ``dsn`` parameter + value prefixed with the specified protocol. The user function will also be + invoked when :meth:`ConnectParams.parse_connect_string()` is called in Thin + or Thick modes with a similar ``connect_string`` parameter value. + + Your hook function is expected to construct valid connection details. For + example, if a hook function is registered for the "ldaps" protocol, then + calling :func:`oracledb.connect` with a connection string prefixed with + "ldaps://" will invoke the function. The function can then perform LDAP + lookup to retrieve and set the actual database information that will be + used internally by python-oracledb to complete the connection creation. + + The ``protocol`` parameter is a string that will be matched against the + prefix appearing before "://" in connection strings. + + The ``hook_function`` parameter should be a function with the signature:: + + hook_function(protocol, protocol_arg, params) + + The hook function will be called with the following arguments: + + - The ``protocol`` parameter is the value that was registered. + + - The ``protocol_arg`` parameter is the section after "://" in the + connection string used in the connection or pool creation call, or passed + to :meth:`~ConnectParams.parse_connect_string()`. + + - The ``params`` parameter is an instance of :ref:`ConnectParams + `. + + When your hook function is invoked internally prior to connection or pool + creation, ``params`` will be the ConnectParams instance originally passed + to the :func:`oracledb.connect`, :func:`oracledb.create_pool`, + :meth:`oracledb.connect_async()`, or :meth:`oracledb.create_pool_async()` + call, if such an instance was passed. Otherwise it will be a new + ConnectParams instance. The hook function should parse ``protocol`` and + ``protocol_arg`` and take any desired action to update ``params`` + :ref:`attributes ` with appropriate connection + parameters. Attributes can be set using :meth:`ConnectParams.set()` or + :meth:`ConnectParams.parse_connect_string()`. The ConnectParams instance + will then be used to complete the connection or pool creation. + + When your hook function is invoked by + :meth:`ConnectParams.parse_connect_string()`, then ``params`` will be the + invoking ConnectParams instance that you can update using + :meth:`ConnectParams.set()` or + :meth:`ConnectParams.parse_connect_string()`. + + Internal hook functions for the "tcp" and "tcps" protocols are + pre-registered but can be overridden if needed. If any other protocol has + not been registered, then connecting will result in the error ``DPY-4021: + invalid protocol``. + + Calling :meth:`~oracledb.register_protocol()` with the ``hook_function`` + parameter set to *None* will result in a previously registered user + function being removed and the default behavior restored. """ if not isinstance(protocol, str): raise TypeError("protocol must be a string") @@ -191,8 +370,8 @@ def register_protocol(protocol: str, hook_function: Callable) -> None: def unregister_params_hook(hook_function: Callable) -> None: """ - Unregisters a user function that was earlier registered with a call to - register_params_hook(). + Unregisters a user parameter function that was earlier registered with a + call to :meth:`oracledb.register_params_hook()`. """ base_impl.REGISTERED_PARAMS_HOOKS.remove(hook_function) diff --git a/src/oracledb/var.py b/src/oracledb/var.py index d04a5673..626d46d3 100644 --- a/src/oracledb/var.py +++ b/src/oracledb/var.py @@ -29,7 +29,7 @@ # fetch. These hold the metadata as well as any necessary buffers. # ----------------------------------------------------------------------------- -from typing import Any, Callable, Union +from typing import Any, Callable, Optional, Union from .dbobject import DbObjectType from .base_impl import DbType @@ -60,8 +60,8 @@ def actual_elements(self) -> int: This read-only attribute returns the actual number of elements in the variable. This corresponds to the number of elements in a PL/SQL index-by table for variables that are created using the method - Cursor.arrayvar(). For all other variables this value will be identical - to the attribute num_elements. + :meth:`Cursor.arrayvar()`. For all other variables, this value will be + identical to the attribute num_elements. """ if self._impl.is_array: return self._impl.num_elements_in_array @@ -99,22 +99,22 @@ def convert_nulls(self) -> bool: def getvalue(self, pos: int = 0) -> Any: """ - Return the value at the given position in the variable. For variables - created using the method Cursor.arrayvar() the value returned will be a - list of each of the values in the PL/SQL index-by table. For variables - bound to DML returning statements, the value returned will also be a - list corresponding to the returned data for the given execution of the - statement (as identified by the pos parameter). + Returns the value at the given position in the variable. For variables + created using the method :meth:`Cursor.arrayvar()`, the value returned + will be a list of each of the values in the PL/SQL index-by table. For + variables bound to DML returning statements, the value returned will + also be a list corresponding to the returned data for the given + execution of the statement (as identified by the ``pos`` parameter). """ return self._impl.get_value(pos) @property - def inconverter(self) -> Callable: + def inconverter(self) -> Optional[Callable]: """ This read-only attribute specifies the method used to convert data from Python to the Oracle database. The method signature is converter(value) and the expected return value is the value to bind to the database. If - this attribute is None, the value is bound directly without any + this attribute is *None*, the value is bound directly without any conversion. """ return self._impl.inconverter @@ -136,19 +136,19 @@ def numElements(self) -> int: return self.num_elements @property - def outconverter(self) -> Callable: + def outconverter(self) -> Optional[Callable]: """ This read-only attribute specifies the method used to convert data from the Oracle database to Python. The method signature is converter(value) and the expected return value is the value to return to Python. If this - attribute is None, the value is returned directly without any + attribute is *None*, the value is returned directly without any conversion. """ return self._impl.outconverter def setvalue(self, pos: int, value: Any) -> None: """ - Set the value at the given position in the variable. + Sets the value at the given position in the variable. """ self._impl.set_value(pos, value) @@ -156,7 +156,7 @@ def setvalue(self, pos: int, value: Any) -> None: def size(self) -> int: """ This read-only attribute returns the size of the variable. For strings - this value is the size in characters. For all others, this is same + this value is the size in characters. For all others, this is the same value as the attribute buffer_size. """ return self._impl.metadata.max_size @@ -165,8 +165,12 @@ def size(self) -> int: def type(self) -> Union[DbType, DbObjectType]: """ This read-only attribute returns the type of the variable. This will be - an Oracle Object Type if the variable binds Oracle objects; otherwise, - it will be one of the database type constants. + an :ref:`Oracle Object Type ` if the variable binds + Oracle objects; otherwise, it will be one of the + :ref:`database type constants `. + + Database type constants are now used when the variable is not used for + binding Oracle objects. """ return self._type diff --git a/utils/fields.cfg b/utils/fields.cfg index 8ef1a4f1..5f03c191 100644 --- a/utils/fields.cfg +++ b/utils/fields.cfg @@ -391,7 +391,7 @@ type = list description = application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be - a string. This value is only used in thick mode + a string [shardingkey] type = list diff --git a/utils/templates/connect_params.py b/utils/templates/connect_params.py index 8b206af8..ceab8c5e 100644 --- a/utils/templates/connect_params.py +++ b/utils/templates/connect_params.py @@ -81,7 +81,7 @@ def wrapped(self): def copy(self) -> "ConnectParams": """ - Creates a copy of the parameters and returns it. + Creates a copy of the ConnectParams instance and returns it. """ params = ConnectParams.__new__(ConnectParams) params._impl = self._impl.copy() @@ -89,36 +89,38 @@ def copy(self) -> "ConnectParams": def get_connect_string(self) -> str: """ - Returns a connect string generated from the parameters. + Returns the connection string associated with the instance. """ return self._impl.get_connect_string() def get_network_service_names(self) -> list: """ - Returns a list of the network service names found in the tnsnames.ora - file found in the configuration directory associated with the - parameters. If no such file exists, an error is raised. + Returns a list of the network service names found in the + :ref:`tnsnames.ora ` file which is inside the directory + that can be identified by the attribute + :attr:`~ConnectParams.config_dir`. If a tnsnames.ora file does not + exist, then an exception is raised. """ return self._impl.get_network_service_names() def parse_connect_string(self, connect_string: str) -> None: """ Parses the connect string into its components and stores the - parameters. The connect string could be an Easy Connect string, - name-value pairs or a simple alias which is looked up in tnsnames.ora. - Any parameters found in the connect string override any currently - stored values. + parameters. + + The ``connect string`` parameter can be an Easy Connect string, + name-value pairs, or a simple alias which is looked up in + ``tnsnames.ora``. Parameters that are found in the connect string + override any currently stored values. """ self._impl.parse_connect_string(connect_string) def parse_dsn_with_credentials(self, dsn: str) -> tuple: """ - Parses a dsn in the form /@ or in the + Parses a DSN in the form /@ or in the form / and returns a 3-tuple containing the parsed user, password and connect string. Empty strings are returned as the - value None. This is done automatically when a value is passed to - the dsn parameter but no value is passed to the user password when - creating a standalone connection or connection pool. + value *None*. """ return self._impl.parse_dsn_with_credentials(dsn) @@ -127,8 +129,9 @@ def set( # {{ params_setter_args }} ): """ - All parameters are optional. A brief description of each parameter - follows: + Sets the values for one or more of the parameters of a ConnectParams + object. All parameters are optional. A brief description of each + parameter follows: # {{ args_help_without_defaults }} """ @@ -136,24 +139,27 @@ def set( def set_from_config(self, config: dict) -> None: """ - Sets the property values based on the supplied configuration. The - configuration consists of a dictionary with the following keys, all of - which are optional: "connect_descriptor", "user", "password" and "pyo". + Sets the property values based on the specified configuration. This + method is intended for use with Centralized Configuration Providers. + + The ``config`` parameter is a dictionary which consists of the + following optional keys: "connect_descriptor", "user", "password", and + "pyo". - If the "connect_descriptor" key is supplied, it is expected to be a - string, which will be parsed and the properties found within it stored - in the parameters. + If the key "connect_descriptor" is specified, it is expected to be a + string, which will be parsed and the properties found within it are + stored in the ConnectParams instance. - If the "user" or "password" keys are supplied, and the parameters do - not already have a user or password, these values will be stored; - otherwise, they will be ignored. The "user" key is expected to be a - string. The "password" key may be a string or it may be a dictionary - containing the keys "type" and "value" which will be used to determine - the actual password. + If the keys "user" or "password" are specified, and the parameters do + not already have a user or password set, these values will be stored; + otherwise, they will be ignored. The key "user" is expected to be a + string. The "key" password may be a string or it may be a dictionary + which will be examined by a :ref:`registered password type handler + ` to determine the actual password. - If the "pyo" key is supplied, it is expected to be a dictionary + If the key "pyo" is specified, it is expected to be a dictionary containing keys corresponding to property names. Any property names - accepted by the parameters will be stored; all other values will be - ignored. + accepted by the ConnectParams class will be stored in the ConnectParams + instance; all other values will be ignored. """ self._impl.set_from_config(config) diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 7cddf7df..bfbaff5f 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -49,7 +49,7 @@ from .dataframe import DataFrame from .dbobject import DbObjectType, DbObject from .lob import AsyncLOB, LOB -from .pipeline import Pipeline +from .pipeline import Pipeline, PipelineOpResult from .soda import SodaDatabase from .subscr import Subscription from .utils import normalize_sessionless_transaction_id @@ -489,7 +489,7 @@ def msgproperties( ) -> MessageProperties: """ Returns an object specifying the properties of messages used in - advanced queuing. See :ref:`msgproperties` for more information. + advanced queuing. Each of the parameters are optional. If specified, they act as a shortcut for setting each of the equivalently named properties. @@ -518,10 +518,10 @@ def queue( payload_type: Optional[Union[DbObjectType, str]] = None, *, payloadType: Optional[DbObjectType] = None, - ) -> Queue: + ) -> Union[Queue, AsyncQueue]: """ - Creates a :ref:`queue ` which is used to enqueue and dequeue - messages in Advanced Queuing. + Creates a queue which is used to enqueue and dequeue messages in + Advanced Queuing. The ``name`` parameter is expected to be a string identifying the queue in which messages are to be enqueued or dequeued. @@ -849,27 +849,6 @@ def __init__( ) -> None: """ Constructor for creating a connection to the database. - - The dsn parameter (data source name) can be a string in the format - user/password@connect_string or can simply be the connect string (in - which case authentication credentials such as the username and password - need to be specified separately). See the documentation on connection - strings for more information. - - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling acquire() on the pool. - - The params parameter is expected to be of type ConnectParams and - contains connection parameters that will be used when establishing the - connection. See the documentation on ConnectParams for more - information. If this parameter is not specified, the additional keyword - parameters will be used to create an instance of ConnectParams. If both - the params parameter and additional keyword parameters are specified, - the values in the keyword parameters have precedence. Note that if a - dsn is also supplied, then in the python-oracledb Thin mode, the values - of the parameters specified (if any) within the dsn will override the - values passed as additional keyword parameters, which themselves - override the values set in the params parameter object. """ super().__init__() @@ -1713,8 +1692,7 @@ def __init__( kwargs: dict, ) -> None: """ - Constructor for asynchronous connection pool. Not intended to be used - directly but only indirectly through async_connect(). + Constructor for creating an asynchronous connection to the database. """ super().__init__() self._pool = pool @@ -2238,12 +2216,11 @@ async def run_pipeline( self, pipeline: Pipeline, continue_on_error: bool = False, - ) -> list: + ) -> list[PipelineOpResult]: """ - Runs all of the operations in the :ref:`pipeline ` and - returns a list of :ref:`PipelineOpResult Objects - `, each entry corresponding to an operation - executed in the pipeline. + Runs all of the operations in the pipeline and returns a list of + PipelineOpResult, each entry corresponding to an operation executed in + the pipeline. The ``continue_on_error`` parameter determines whether operations should continue to run after an error has occurred. If this parameter diff --git a/utils/templates/pool.py b/utils/templates/pool.py index 975cb001..7bb2b7ae 100644 --- a/utils/templates/pool.py +++ b/utils/templates/pool.py @@ -58,29 +58,7 @@ def __init__( **kwargs, ) -> None: """ - Constructor for creating a connection pool. Connection pooling creates - a pool of available connections to the database, allowing applications - to acquire a connection very quickly. It is of primary use in a server - where connections are requested in rapid succession and used for a - short period of time, for example in a web server. - - The dsn parameter (data source name) can be a string in the format - user/password@connect_string or can simply be the connect string (in - which case authentication credentials such as the username and password - need to be specified separately). See the documentation on connection - strings for more information. - - The params parameter is expected to be of type PoolParams and contains - parameters that are used to create the pool. See the documentation on - PoolParams for more information. If this parameter is not specified, - the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword - parameters are specified, the values in the keyword parameters have - precedence. Note that if a dsn is also supplied, then in the - python-oracledb Thin mode, the values of the parameters specified - (if any) within the dsn will override the values passed as additional - keyword parameters, which themselves override the values set in the - params parameter object. + Constructor for creating a connection pool. """ if params is None: params_impl = base_impl.PoolParamsImpl() @@ -121,8 +99,8 @@ def _verify_open(self) -> None: @property def busy(self) -> int: """ - Returns the number of connections that have been acquired from the pool - and have not yet been returned to the pool. + This read-only attribute returns the number of connections currently + acquired. """ self._verify_open() return self._impl.get_busy_count() @@ -130,14 +108,27 @@ def busy(self) -> int: @property def dsn(self) -> str: """ - Returns the connection string (TNS entry) of the database to which - connections in the pool have been established. + This read-only attribute returns the TNS entry of the database to which + a connection has been established. """ self._verify_open() return self._impl.dsn @property def getmode(self) -> oracledb.PoolGetMode: + """ + This read-write attribute determines how connections are returned from + the pool. If :data:`~oracledb.POOL_GETMODE_FORCEGET` is specified, a + new connection will be returned even if there are no free connections + in the pool. :data:`~oracledb.POOL_GETMODE_NOWAIT` will raise an + exception if there are no free connections are available in the pool. + If :data:`~oracledb.POOL_GETMODE_WAIT` is specified and there are no + free connections in the pool, the caller will wait until a free + connection is available. :data:`~oracledb.POOL_GETMODE_TIMEDWAIT` uses + the value of :data:`~ConnectionPool.wait_timeout` to determine how long + the caller should wait for a connection to become available before + returning an error. + """ self._verify_open() return oracledb.PoolGetMode(self._impl.get_getmode()) @@ -149,8 +140,9 @@ def getmode(self, value: oracledb.PoolGetMode) -> None: @property def homogeneous(self) -> bool: """ - Returns a boolean indicating if the pool is homogeneous or not. If the - pool is not homogeneous, different authentication can be used for each + This read-only boolean attribute indicates whether the pool is + considered :ref:`homogeneous ` or not. If the pool is + not homogeneous, different authentication can be used for each connection acquired from the pool. """ self._verify_open() @@ -159,8 +151,8 @@ def homogeneous(self) -> bool: @property def increment(self) -> int: """ - Returns the number of connections that will be created when additional - connections need to be created to satisfy requests. + This read-only attribute returns the number of connections that will be + established when additional connections need to be created. """ self._verify_open() return self._impl.increment @@ -168,7 +160,8 @@ def increment(self) -> int: @property def max(self) -> int: """ - Returns the maximum number of connections that the pool can control. + This read-only attribute returns the maximum number of connections that + the pool can control. """ self._verify_open() return self._impl.max @@ -176,14 +169,15 @@ def max(self) -> int: @property def max_lifetime_session(self) -> int: """ - Returns the maximum length of time (in seconds) that a pooled - connection may exist. Connections that are in use will not be closed. - They become candidates for termination only when they are released back - to the pool and have existed for longer than max_lifetime_session - seconds. Note that termination only occurs when the pool is accessed. A - value of 0 means that there is no maximum length of time that a pooled - connection may exist. This attribute is only available in Oracle - Database 12.1. + This read-write attribute is the maximum length of time (in seconds) + that a pooled connection may exist since first being created. A value + of *0* means there is no limit. Connections become candidates for + termination when they are acquired or released back to the pool, and + have existed for longer than ``max_lifetime_session`` seconds. + Connections that are in active use will not be closed. In + python-oracledb Thick mode, Oracle Client libraries 12.1 or later must + be used and, prior to Oracle Client 21, cleanup only occurs when the + pool is accessed. """ self._verify_open() return self._impl.get_max_lifetime_session() @@ -196,13 +190,13 @@ def max_lifetime_session(self, value: int) -> None: @property def max_sessions_per_shard(self) -> int: """ - Returns the number of sessions that can be created per shard in the - pool. Setting this attribute greater than zero specifies the maximum - number of sessions in the pool that can be used for any given shard in - a sharded database. This lets connections in the pool be balanced - across the shards. A value of zero will not set any maximum number of - sessions for each shard. This attribute is only available in Oracle - Client 18.3 and higher. + This read-write attribute returns the number of sessions that can be + created per shard in the pool. Setting this attribute greater than zero + specifies the maximum number of sessions in the pool that can be used + for any given shard in a sharded database. This lets connections in the + pool be balanced across the shards. A value of *0* will not set any + maximum number of sessions for each shard. This attribute is only + available in Oracle Client 18.3 and higher. """ self._verify_open() return self._impl.get_max_sessions_per_shard() @@ -215,8 +209,9 @@ def max_sessions_per_shard(self, value: int) -> None: @property def min(self) -> int: """ - Returns the minimum number of connections that the pool will control. - These are created when the pool is first created. + This read-only attribute returns the number of connections with which + the connection pool was created and the minimum number of connections + that will be controlled by the connection pool. """ self._verify_open() return self._impl.min @@ -224,8 +219,8 @@ def min(self) -> int: @property def name(self) -> str: """ - Returns the name assigned to the pool by Oracle. This attribute is only - relevant in python-oracledb thick mode. + This read-only attribute returns the name assigned to the pool by + Oracle. """ self._verify_open() return self._impl.name @@ -233,7 +228,8 @@ def name(self) -> str: @property def opened(self) -> int: """ - Returns the number of connections currently opened by the pool. + This read-only attribute returns the number of connections currently + opened by the pool. """ self._verify_open() return self._impl.get_open_count() @@ -241,15 +237,15 @@ def opened(self) -> int: @property def ping_interval(self) -> int: """ - Returns the pool ping interval in seconds. When a connection is - acquired from the pool, a check is first made to see how long it - has been since the connection was put into the pool. If - this idle time exceeds ping_interval, then a round-trip ping to the - database is performed. If the connection is unusable, it is discarded - and a different connection is selected to be returned by - SessionPool.acquire(). Setting ping_interval to a negative value - disables pinging. Setting it to 0 forces a ping for every aquire() - and is not recommended. + This read-write integer attribute specifies the pool ping interval in + seconds. When a connection is acquired from the pool, a check is first + made to see how long it has been since the connection was put into the + pool. If this idle time exceeds ``ping_interval``, then a + :ref:`round-trip ` ping to the database is performed. If + the connection is unusable, it is discarded and a different connection + is selected to be returned by :meth:`acquire()`. Setting + ``ping_interval`` to a negative value disables pinging. Setting it to + *0* forces a ping for every :meth:`acquire()` and is not recommended. """ self._verify_open() return self._impl.get_ping_interval() @@ -261,12 +257,13 @@ def ping_interval(self, value: int) -> None: @property def soda_metadata_cache(self) -> bool: """ - Specifies whether the SODA metadata cache is enabled or not. Enabling - the cache significantly improves the performance of methods - SodaDatabase.createCollection() (when not specifying a value for the - metadata parameter) and SodaDatabase.openCollection(). Note that the - cache can become out of date if changes to the metadata of cached - collections are made externally. + This read-write boolean attribute returns whether the SODA metadata + cache is enabled or not. Enabling the cache significantly improves the + performance of methods :meth:`SodaDatabase.createCollection()` (when + not specifying a value for the ``metadata`` parameter) and + :meth:`SodaDatabase.openCollection()`. Note that the cache can become + out of date if changes to the metadata of cached collections are made + externally. """ self._verify_open() return self._impl.get_soda_metadata_cache() @@ -282,10 +279,10 @@ def soda_metadata_cache(self, value: bool) -> None: @property def stmtcachesize(self) -> int: """ - Specifies the size of the statement cache that will be used as the - starting point for any connections that are created by the pool. Once a + This read-write attribute specifies the size of the statement cache + that will be used for connections obtained from the pool. Once a connection is created, that connection’s statement cache size can only - be changed by setting the stmtcachesize attribute on the connection + be changed by setting the ``stmtcachesize`` attribute on the connection itself. """ self._verify_open() @@ -299,8 +296,12 @@ def stmtcachesize(self, value: int) -> None: @property def thin(self) -> bool: """ - Returns a boolean indicating if the pool was created in - python-oracledb's thin mode (True) or thick mode (False). + This read-only attribute returns a boolean which indicates the + python-oracledb mode in which the pool was created. If the value of + this attribute is *True*, it indicates that the pool was created in the + python-oracledb Thin mode. If the value of this attribute is *False*, + it indicates that the pool was created in the python-oracledb Thick + mode. """ self._verify_open() return not isinstance(self._impl, thick_impl.ThickPoolImpl) @@ -308,11 +309,12 @@ def thin(self) -> bool: @property def timeout(self) -> int: """ - Specifies the time (in seconds) after which idle connections will be - terminated in order to maintain an optimum number of open connections. - A value of 0 means that no idle connections are terminated. Note that - in thick mode with older Oracle Client libraries termination only - occurs when the pool is accessed. + This read-write attribute specifies the time (in seconds) after which + idle connections will be terminated in order to maintain an optimum + number of open connections. A value of *0* means that no idle + connections are terminated. Note that in python-oracledb Thick mode + with older Oracle Client Libraries, the termination only occurs when + the pool is accessed. """ self._verify_open() return self._impl.get_timeout() @@ -332,7 +334,8 @@ def tnsentry(self) -> str: @property def username(self) -> str: """ - Returns the name of the user which was used to create the pool. + This read-only attribute returns the name of the user which established + the connection to the database. """ self._verify_open() return self._impl.username @@ -340,10 +343,11 @@ def username(self) -> str: @property def wait_timeout(self) -> int: """ - Specifies the time (in milliseconds) that the caller should wait for a - connection to become available in the pool before returning with an - error. This value is only used if the getmode parameter used to create - the pool was POOL_GETMODE_TIMEDWAIT. + This read-write attribute specifies the time (in milliseconds) that the + caller should wait for a connection to become available in the pool + before returning with an error. This value is only used if the + ``getmode`` parameter to :meth:`oracledb.create_pool()` was the value + :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. """ self._verify_open() return self._impl.get_wait_timeout() @@ -387,31 +391,39 @@ def acquire( supershardingkey: Optional[list] = None, ) -> "connection_module.Connection": """ - Acquire a connection from the pool and return it. + Acquires a connection from the session pool and returns a + :ref:`connection object `. - If the pool is homogeneous, the user and password parameters cannot be - specified. If they are, an exception will be raised. + If the pool is :ref:`homogeneous `, the ``user`` and + ``password`` parameters cannot be specified. If they are, an exception + will be raised. - The cclass parameter, if specified, should be a string corresponding to - the connection class for database resident connection pooling (DRCP). + The ``cclass`` parameter, if specified, should be a string + corresponding to the connection class for :ref:`drcp`. - The purity parameter is expected to be one of PURITY_DEFAULT, - PURITY_NEW, or PURITY_SELF. + The ``purity`` parameter is expected to be one of + :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or + :data:`~oracledb.PURITY_DEFAULT`. - The tag parameter, if specified, is expected to be a string with - name=value pairs like “k1=v1;k2=v2” and will limit the connections that - can be returned from a pool unless the matchanytag parameter is - set to True. In that case connections with the specified tag will be - preferred over others, but if no such connections are available a - connection with a different tag may be returned instead. In any case, - untagged connections will always be returned if no connections with the - specified tag are available. Connections are tagged when they are - released back to the pool. + The ``tag`` parameter, if specified, is expected to be a string with + name=value pairs like "k1=v1;k2=v2" and will limit the connections that + can be returned from a connection pool unless the ``matchanytag`` + parameter is set to *True*. In that case, connections with the + specified tag will be preferred over others, but if no such connections + are available, then a connection with a different tag may be returned + instead. In any case, untagged connections will always be returned if + no connections with the specified tag are available. Connections are + tagged when they are :meth:`released ` back to + the pool. - The shardingkey and supershardingkey parameters, if specified, are - expected to be a sequence of values which will be used to identify the - database shard to connect to. The key values can be strings, numbers, - bytes or dates. + The ``shardingkey`` and ``supershardingkey`` parameters, if specified, + are expected to be a sequence of values which will be used to identify + the database shard to connect to. The key values can be strings, + numbers, bytes, or dates. See :ref:`connsharding`. + + When using the :ref:`connection pool cache `, calling + :meth:`oracledb.connect()` with a ``pool_alias`` parameter is the same + as calling ``pool.acquire()``. """ self._verify_open() @@ -430,11 +442,12 @@ def acquire( def close(self, force: bool = False) -> None: """ - Close the pool now, rather than when the last reference to it is + Closes the pool now, rather than when the last reference to it is released, which makes it unusable for further work. If any connections have been acquired and not released back to the - pool, this method will fail unless the force parameter is set to True. + pool, this method will fail unless the ``force`` parameter is set to + *True*. """ self._verify_open() self._impl.close(force) @@ -444,8 +457,8 @@ def close(self, force: bool = False) -> None: def drop(self, connection: "connection_module.Connection") -> None: """ - Drop the connection from the pool, which is useful if the connection is - no longer usable (such as when the database session is killed). + Drops the connection from the pool which is useful if the connection is + no longer usable (such as when the session is killed). """ self._verify_open() if not isinstance(connection, connection_module.Connection): @@ -455,42 +468,6 @@ def drop(self, connection: "connection_module.Connection") -> None: self._impl.drop(connection._impl) connection._impl = None - def release( - self, - connection: "connection_module.Connection", - tag: Optional[str] = None, - ) -> None: - """ - Release the connection back to the pool now, rather than whenever - __del__ is called. The connection will be unusable from this point - forward; an Error exception will be raised if any operation is - attempted with the connection. Any cursors or LOBs created by the - connection will also be marked unusable and an Error exception will be - raised if any operation is attempted with them. - - Internally, references to the connection are held by cursor objects, - LOB objects, etc. Once all of these references are released, the - connection itself will be released back to the pool automatically. - Either control references to these related objects carefully or - explicitly release connections back to the pool in order to ensure - sufficient resources are available. - - If the tag is not None, it is expected to be a string with name=value - pairs like “k1=v1;k2=v2” and will override the value in the property - Connection.tag. If either Connection.tag or the tag parameter are not - None, the connection will be retagged when it is released back to the - pool. - """ - self._verify_open() - if not isinstance(connection, connection_module.Connection): - message = "connection must be an instance of oracledb.Connection" - raise TypeError(message) - connection._verify_connected() - if tag is not None: - connection.tag = tag - self._impl.return_connection(connection._impl) - connection._impl = None - def reconfigure( self, min: Optional[int] = None, @@ -506,41 +483,52 @@ def reconfigure( ping_interval: Optional[int] = None, ) -> None: """ - Reconfigures various parameters of a connection pool. The pool size - can be altered with reconfigure() by passing values for min, max - or increment. The getmode, timeout, wait_timeout, - max_lifetime_session, max_sessions_per_shard, soda_metadata_cache, - stmtcachesize and ping_interval can be set directly or by using - reconfigure(). All parameters are optional. Unspecified parameters - will leave those pool attributes unchanged. The parameters are - processed in two stages. After any size change has been processed, - reconfiguration on the other parameters is done sequentially. If - an error such as an invalid value occurs when changing one attribute, - then an exception will be generated but any already changed - attributes will retain their new values. - - During reconfiguration of a pool's size, the behavior of acquire() - depends on the getmode in effect when acquire() is called: - - * With mode POOL_GETMODE_FORCEGET, an acquire() call will wait until - the pool has been reconfigured. - - * With mode POOL_GETMODE__TIMEDWAIT, an acquire() call will try to - acquire a connection in the time specified by pool.wait_timeout and - return an error if the time taken exceeds that value. - - * With mode POOL_GETMODE_WAIT, an acquire() call will wait until after - the pool has been reconfigured and a connection is available. - - * With mode POOL_GETMODE_NOWAIT, if the number of busy connections is - less than the pool size, acquire() will return a new connection - after pool reconfiguration is complete. - - Closing connections with pool.release() or connection.close() will - wait until any pool size reconfiguration is complete. - - Closing the connection pool with pool.close() will wait until + Reconfigures various parameters of a connection pool. The pool size can + be altered with ``reconfigure()`` by passing values for + :data:`~ConnectionPool.min`, :data:`~ConnectionPool.max` or + :data:`~ConnectionPool.increment`. The + :data:`~ConnectionPool.getmode`, :data:`~ConnectionPool.timeout`, + :data:`~ConnectionPool.wait_timeout`, + :data:`~ConnectionPool.max_lifetime_session`, + :data:`~ConnectionPool.max_sessions_per_shard`, + :data:`~ConnectionPool.soda_metadata_cache`, + :data:`~ConnectionPool.stmtcachesize` and + :data:`~ConnectionPool.ping_interval` attributes can be set directly or + with ``reconfigure()``. + + All parameters are optional. Unspecified parameters will leave those + pool attributes unchanged. The parameters are processed in two stages. + After any size change has been processed, reconfiguration on the other + parameters is done sequentially. If an error such as an invalid value + occurs when changing one attribute, then an exception will be generated + but any already changed attributes will retain their new values. + + During reconfiguration of a pool's size, the behavior of + :meth:`ConnectionPool.acquire()` depends on the ``getmode`` in effect + when ``acquire()`` is called: + + * With mode :data:`~oracledb.POOL_GETMODE_FORCEGET`, an ``acquire()`` + call will wait until the pool has been reconfigured. + + * With mode :data:`~oracledb.POOL_GETMODE_TIMEDWAIT`, an ``acquire()`` + call will try to acquire a connection in the time specified by + pool.wait_timeout and return an error if the time taken exceeds that + value. + + * With mode :data:`~oracledb.POOL_GETMODE_WAIT`, an ``acquire()`` call + will wait until after the pool has been reconfigured and a connection + is available. + + * With mode :data:`~oracledb.POOL_GETMODE_NOWAIT`, if the number of + busy connections is less than the pool size, ``acquire()`` will + return a new connection after pool reconfiguration is complete. + + Closing connections with :meth:`ConnectionPool.release()` or + :meth:`Connection.close()` will wait until any pool size reconfiguration is complete. + + Closing the connection pool with :meth:`ConnectionPool.close()` will + wait until reconfiguration is complete. """ if min is None: @@ -568,6 +556,42 @@ def reconfigure( if ping_interval is not None: self.ping_interval = ping_interval + def release( + self, + connection: "connection_module.Connection", + tag: Optional[str] = None, + ) -> None: + """ + Releases the connection back to the pool now, rather than whenever + __del__ is called. The connection will be unusable from this point + forward; an Error exception will be raised if any operation is + attempted with the connection. Any cursors or LOBs created by the + connection will also be marked unusable and an Error exception will be + raised if any operation is attempted with them. + + Internally, references to the connection are held by cursor objects, + LOB objects, etc. Once all of these references are released, the + connection itself will be released back to the pool automatically. + Either control references to these related objects carefully or + explicitly release connections back to the pool in order to ensure + sufficient resources are available. + + If the tag is not *None*, it is expected to be a string with name=value + pairs like "k1=v1;k2=v2" and will override the value in the property + :attr:`Connection.tag`. If either :attr:`Connection.tag` or the tag + parameter are not *None*, the connection will be retagged when it is + released back to the pool. + """ + self._verify_open() + if not isinstance(connection, connection_module.Connection): + message = "connection must be an instance of oracledb.Connection" + raise TypeError(message) + connection._verify_connected() + if tag is not None: + connection.tag = tag + self._impl.return_connection(connection._impl) + connection._impl = None + def _pool_factory( f: Callable[..., ConnectionPool], @@ -675,31 +699,22 @@ def acquire( supershardingkey: Optional[list] = None, ) -> "connection_module.AsyncConnection": """ - Acquire a connection from the pool and return it. - - If the pool is homogeneous, the user and password parameters cannot be - specified. If they are, an exception will be raised. + Acquires a connection from the pool and returns an :ref:`asynchronous + connection object `. - The cclass parameter, if specified, should be a string corresponding to - the connection class for database resident connection pooling (DRCP). + If the pool is :ref:`homogeneous `, the ``user`` and + ``password`` parameters cannot be specified. If they are, an exception + will be raised. - The purity parameter is expected to be one of PURITY_DEFAULT, - PURITY_NEW, or PURITY_SELF. + The ``cclass`` parameter, if specified, should be a string + corresponding to the connection class for :ref:`drcp`. - The tag parameter, if specified, is expected to be a string with - name=value pairs like “k1=v1;k2=v2” and will limit the connections that - can be returned from a pool unless the matchanytag parameter is - set to True. In that case connections with the specified tag will be - preferred over others, but if no such connections are available a - connection with a different tag may be returned instead. In any case, - untagged connections will always be returned if no connections with the - specified tag are available. Connections are tagged when they are - released back to the pool. + The ``purity`` parameter is expected to be one of + :data:`~oracledb.PURITY_NEW`, :data:`~oracledb.PURITY_SELF`, or + :data:`~oracledb.PURITY_DEFAULT`. - The shardingkey and supershardingkey parameters, if specified, are - expected to be a sequence of values which will be used to identify the - database shard to connect to. The key values can be strings, numbers, - bytes or dates. + The ``tag``, ``matchanytag``, ``shardingkey``, and ``supershardingkey`` + parameters are ignored in python-oracledb Thin mode. """ self._verify_open() @@ -718,11 +733,12 @@ def acquire( async def close(self, force: bool = False) -> None: """ - Close the pool now, rather than when the last reference to it is + Closes the pool now, rather than when the last reference to it is released, which makes it unusable for further work. If any connections have been acquired and not released back to the - pool, this method will fail unless the force parameter is set to True. + pool, this method will fail unless the ``force`` parameter is set to + *True*. """ self._verify_open() await self._impl.close(force) @@ -732,8 +748,8 @@ async def close(self, force: bool = False) -> None: async def drop(self, connection: "connection_module.Connection") -> None: """ - Drop the connection from the pool, which is useful if the connection is - no longer usable (such as when the database session is killed). + Drops the connection from the pool which is useful if the connection is + no longer usable (such as when the session is killed). """ self._verify_open() if not isinstance(connection, connection_module.AsyncConnection): @@ -751,25 +767,13 @@ async def release( tag: Optional[str] = None, ) -> None: """ - Release the connection back to the pool now, rather than whenever - __del__ is called. The connection will be unusable from this point - forward; an Error exception will be raised if any operation is - attempted with the connection. Any cursors or LOBs created by the - connection will also be marked unusable and an Error exception will be - raised if any operation is attempted with them. + Releases the connection back to the pool now. The connection will be + unusable from this point forward. An Error exception will be raised if + any operation is attempted with the connection. Any cursors or LOBs + created by the connection will also be marked unusable and an Error + exception will be raised if any operation is attempted with them. - Internally, references to the connection are held by cursor objects, - LOB objects, etc. Once all of these references are released, the - connection itself will be released back to the pool automatically. - Either control references to these related objects carefully or - explicitly release connections back to the pool in order to ensure - sufficient resources are available. - - If the tag is not None, it is expected to be a string with name=value - pairs like “k1=v1;k2=v2” and will override the value in the property - Connection.tag. If either Connection.tag or the tag parameter are not - None, the connection will be retagged when it is released back to the - pool. + The ``tag`` parameter is ignored in python-oracledb Thin mode. """ self._verify_open() if not isinstance(connection, connection_module.AsyncConnection): @@ -901,8 +905,11 @@ def get_pool( pool_alias: str, ) -> Union[ConnectionPool, AsyncConnectionPool, None]: """ - Returns the connection pool with the given alias from the python-oracledb - connection pool cache. If a pool with that alias does not exist, the value - "None" will be returned. + Returns a :ref:`ConnectionPool object ` from the python-oracledb + pool cache. The pool must have been previously created by passing the same + ``pool_alias`` value to :meth:`oracledb.create_pool()` or + :meth:`oracledb.create_pool_async()`. + + If a pool with the given name does not exist, *None* is returned. """ return named_pools.pools.get(pool_alias) From 7b27d75f2a96ed2293f8b43e4ece077181e926b0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 5 Aug 2025 12:25:53 -0600 Subject: [PATCH 174/239] Update copyrights for files changed in PR #479. --- samples/bind_insert.py | 2 +- samples/bind_insert_async.py | 2 +- samples/cqn.py | 2 +- samples/json_blob.py | 2 +- samples/json_blob_async.py | 2 +- src/oracledb/driver_mode.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/samples/bind_insert.py b/samples/bind_insert.py index 712e858e..d3fea7b1 100644 --- a/samples/bind_insert.py +++ b/samples/bind_insert.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/bind_insert_async.py b/samples/bind_insert_async.py index 37f0acfb..6330e2e7 100644 --- a/samples/bind_insert_async.py +++ b/samples/bind_insert_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/cqn.py b/samples/cqn.py index d6399f75..6a34d576 100644 --- a/samples/cqn.py +++ b/samples/cqn.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2023, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # diff --git a/samples/json_blob.py b/samples/json_blob.py index e0d06658..433c1b31 100644 --- a/samples/json_blob.py +++ b/samples/json_blob.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/samples/json_blob_async.py b/samples/json_blob_async.py index 5c9fb45c..8f89fc58 100644 --- a/samples/json_blob_async.py +++ b/samples/json_blob_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/driver_mode.py b/src/oracledb/driver_mode.py index e1abd501..15c93dc9 100644 --- a/src/oracledb/driver_mode.py +++ b/src/oracledb/driver_mode.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2023 Oracle and/or its affiliates. +# Copyright (c) 2021, 2025 Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License From 054e9070d2eb182885ce99e804315175654b5c1d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 5 Aug 2025 16:29:27 -0600 Subject: [PATCH 175/239] Bump copyright on modified files. --- src/oracledb/constructors.py | 2 +- src/oracledb/dbobject.py | 2 +- src/oracledb/dsn.py | 2 +- src/oracledb/fetch_info.py | 2 +- src/oracledb/pipeline.py | 2 +- src/oracledb/soda.py | 2 +- src/oracledb/sparse_vector.py | 2 +- src/oracledb/subscr.py | 2 +- src/oracledb/var.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/src/oracledb/constructors.py b/src/oracledb/constructors.py index b2759a87..8a58245d 100644 --- a/src/oracledb/constructors.py +++ b/src/oracledb/constructors.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2023, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/dbobject.py b/src/oracledb/dbobject.py index da571c80..5b21eaa2 100644 --- a/src/oracledb/dbobject.py +++ b/src/oracledb/dbobject.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/dsn.py b/src/oracledb/dsn.py index 81f48a7e..13bb31cb 100644 --- a/src/oracledb/dsn.py +++ b/src/oracledb/dsn.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/fetch_info.py b/src/oracledb/fetch_info.py index 94f106d8..1d6c3f15 100644 --- a/src/oracledb/fetch_info.py +++ b/src/oracledb/fetch_info.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/pipeline.py b/src/oracledb/pipeline.py index 4422f86f..483ba344 100644 --- a/src/oracledb/pipeline.py +++ b/src/oracledb/pipeline.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/soda.py b/src/oracledb/soda.py index 1c5bffb2..eaa735d1 100644 --- a/src/oracledb/soda.py +++ b/src/oracledb/soda.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/sparse_vector.py b/src/oracledb/sparse_vector.py index 61d64012..13c68231 100644 --- a/src/oracledb/sparse_vector.py +++ b/src/oracledb/sparse_vector.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/subscr.py b/src/oracledb/subscr.py index bc685a36..e01ade42 100644 --- a/src/oracledb/subscr.py +++ b/src/oracledb/subscr.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2023, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License diff --git a/src/oracledb/var.py b/src/oracledb/var.py index 626d46d3..089524b5 100644 --- a/src/oracledb/var.py +++ b/src/oracledb/var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License From cd28b18f4a3f640f0b83e4dcc8c283a82f3bdb22 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 5 Aug 2025 16:29:54 -0600 Subject: [PATCH 176/239] Small tweaks to the links for the oracledb.defaults attributes. --- doc/src/api_manual/connect_params.rst | 47 +++++++++------- doc/src/api_manual/defaults.rst | 2 +- doc/src/api_manual/module.rst | 11 ++-- doc/src/release_notes.rst | 45 ++++++++------- doc/src/user_guide/appendix_c.rst | 11 ++-- doc/src/user_guide/asyncio.rst | 3 +- doc/src/user_guide/connection_handling.rst | 65 +++++++++++++--------- doc/src/user_guide/dataframes.rst | 4 +- doc/src/user_guide/initialization.rst | 6 +- doc/src/user_guide/sql_execution.rst | 11 ++-- doc/src/user_guide/troubleshooting.rst | 14 +++-- doc/src/user_guide/tuning.rst | 35 ++++++------ src/oracledb/connection.py | 27 +++++---- src/oracledb/defaults.py | 4 +- src/oracledb/pipeline.py | 5 +- src/oracledb/utils.py | 15 ++--- utils/templates/connection.py | 27 +++++---- 17 files changed, 190 insertions(+), 142 deletions(-) diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index 5fbb7b96..cecbf6c4 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -139,9 +139,10 @@ All properties are read only. .. autoproperty:: ConnectParams.driver_name This is an arbitrary value set by the user in the - :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.driver_name` - attribute which is the default value. This is the value shown in the - CLIENT_DRIVER column of the V$SESSION_CONNECT_INFO view. + :meth:`oracledb.ConnectParams()` method or the + :attr:`oracledb.defaults.driver_name ` attribute + which is the default value. This is the value shown in the CLIENT_DRIVER + column of the V$SESSION_CONNECT_INFO view. This attribute is supported in both python-oracledb Thin and Thick modes. @@ -193,9 +194,10 @@ All properties are read only. .. autoproperty:: ConnectParams.machine This is an arbitrary value set by the user in the - :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.machine` - attribute which is the default value. This is the value shown in the - MACHINE column of the V$SESSION view. + :meth:`oracledb.ConnectParams()` method or the + :attr:`oracledb.defaults.machine ` attribute which is the + default value. This is the value shown in the MACHINE column of the + V$SESSION view. This attribute is only supported in python-oracledb Thin mode. @@ -210,9 +212,10 @@ All properties are read only. .. autoproperty:: ConnectParams.osuser This is an arbitrary value set by the user in the - :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.osuser` - attribute which is the default value. This is the value shown in the OSUSER - column of the V$SESSION view. + :meth:`oracledb.ConnectParams()` method or the + :attr:`oracledb.defaults.osuser ` attribute which is the + default value. This is the value shown in the OSUSER column of the + V$SESSION view. This attribute is only supported in python-oracledb Thin mode. @@ -249,9 +252,10 @@ All properties are read only. .. autoproperty:: ConnectParams.program This is an arbitrary value set by the user in the - :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.program` - attribute which is the default value. This is the value shown in the - PROGRAM column of the V$SESSION view. + :meth:`oracledb.ConnectParams()` method or the + :attr:`oracledb.defaults.program ` attribute which is the + default value. This is the value shown in the PROGRAM column of the + V$SESSION view. This attribute is supported in python-oracledb Thin mode. @@ -350,23 +354,24 @@ All properties are read only. The default value of this attribute was changed from *60.0* seconds to *20.0* seconds. -.. autoproperty:: ConnectParams.use_sni - - This attribute is supported in both python-oracledb Thin and Thick modes. - - .. versionadded:: 3.0.0 - .. autoproperty:: ConnectParams.terminal This is an arbitrary value set by the user in the - :meth:`oracledb.ConnectParams()` method or the :attr:`defaults.terminal` - attribute which is the default value. This is the value shown in the - TERMINAL column of the V$SESSION view. + :meth:`oracledb.ConnectParams()` method or the + :attr:`oracledb.defaults.terminal ` attribute which is + the default value. This is the value shown in the TERMINAL column of the + V$SESSION view. This attribute is only supported in python-oracledb Thin mode. .. versionadded:: 2.5.0 +.. autoproperty:: ConnectParams.use_sni + + This attribute is supported in both python-oracledb Thin and Thick modes. + + .. versionadded:: 3.0.0 + .. autoproperty:: ConnectParams.thick_mode_dsn_passthrough This attribute is only supported in python-oracledb Thick mode. diff --git a/doc/src/api_manual/defaults.rst b/doc/src/api_manual/defaults.rst index 9029edf0..04043ec7 100644 --- a/doc/src/api_manual/defaults.rst +++ b/doc/src/api_manual/defaults.rst @@ -50,7 +50,7 @@ Defaults Attributes heuristic. At completion of a call to :meth:`oracledb.init_oracle_client()` in - Thick mode, the value of :attr:`defaults.config_dir` may get changed + Thick mode, the value of :attr:`Defaults.config_dir` may get changed by python-oracledb. .. autoproperty:: Defaults.driver_name diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index cb1868a3..0b9a435f 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -199,8 +199,9 @@ Oracledb Methods .. versionchanged:: 3.0.0 - At completion of the method, the value of :attr:`defaults.config_dir` - may get changed by python-oracledb. + At completion of the method, the value of + :attr:`oracledb.defaults.config_dir ` may get + changed by python-oracledb. .. versionchanged:: 2.5.0 @@ -1905,7 +1906,8 @@ stored configuration information. Python-oracledb then uses this information to connect to Oracle Database. To use this plugin in python-oracledb Thick mode, you must set -:attr:`defaults.thick_mode_dsn_passthrough` to *False*. Alternatively use +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` to *False*. Alternatively use :meth:`ConnectParams.parse_connect_string()`, see :ref:`usingconnparams`. See :ref:`ociobjstorageprovider` for more information. @@ -1930,7 +1932,8 @@ stored configuration information. Python-oracledb then uses this information to connect to Oracle Database. To use this plugin in python-oracledb Thick mode, you must set -:attr:`defaults.thick_mode_dsn_passthrough` to *False*. Alternatively use +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` to *False*. Alternatively use :meth:`ConnectParams.parse_connect_string()`, see :ref:`usingconnparams`. See :ref:`azureappstorageprovider` for more information. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index f547b2aa..32ed8de5 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -280,7 +280,9 @@ Common Changes - Added support for CLOB, BLOB, and RAW data types - Fixed support for BOOLEAN data type - Fixed bug when NUMBER data is fetched that does not have a precision or - scale specified and :attr:`defaults.fetch_decimals` is set to *True*. + scale specified and + :attr:`oracledb.defaults.fetch_decimals ` is set + to *True*. - More efficient processing when a significant amount of data is duplicated from one row to the next - Avoid memory allocation/free cycles for decimal data @@ -298,7 +300,8 @@ Common Changes #) An error message that links to :ref:`documentation ` on setting up a protocol hook function is now returned by default for LDAP and LDAPS URL connection strings in python-oracledb Thin mode, or when - :attr:`defaults.thick_mode_dsn_passthrough` is *False*. + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. #) Error ``DPY-2062: payload cannot be enqueued since it does not match the payload type supported by the queue`` is now raised when the payload of a message being enqueued is not supported by the queue. Previously, @@ -379,14 +382,16 @@ Thick Mode Changes ++++++++++++++++++ #) At successful completion of a call to :meth:`oracledb.init_oracle_client()`, - the value of :attr:`defaults.config_dir` may get set by python-oracledb in - some cases. For example it might be set to the configuration directory that - is relative to the loaded Oracle Client libraries. + the value of :attr:`oracledb.defaults.config_dir ` may + get set by python-oracledb in some cases. For example it might be set to + the configuration directory that is relative to the loaded Oracle Client + libraries. #) Connect string parsing and :ref:`tnsnames.ora ` file handling can be configured with the new parameter - :attr:`defaults.thick_mode_dsn_passthrough` which can be helpful for - application portability. When it is `False`, python-oracledb Thick mode - behaves similarly to Thin mode. + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` which can be helpful for application + portability. When it is `False`, python-oracledb Thick mode behaves + similarly to Thin mode. #) Fixed bug that caused :attr:`oracledb._Error.isrecoverable` to always be `False`. @@ -428,9 +433,9 @@ Common Changes :attr:`DbObjectAttribute.scale`, and :attr:`DbObjectAttribute.max_size` that provide additional metadata about :ref:`database object attributes `. -#) The attribute :attr:`defaults.config_dir` is now set to - ``$ORACLE_HOME/network/admin`` if the environment variable ``ORACLE_HOME`` - is set and ``TNS_ADMIN`` is *not* set. +#) The attribute :attr:`oracledb.defaults.config_dir ` is + now set to ``$ORACLE_HOME/network/admin`` if the environment variable + ``ORACLE_HOME`` is set and ``TNS_ADMIN`` is *not* set. #) All connect strings are parsed by the driver if the new parameter ``thick_mode_dsn_passthrough`` is set to *True*. Previously, only Thin mode parsed all connect strings and Thick mode passed the connect string @@ -451,8 +456,9 @@ Common Changes :meth:`oracledb.connect_async()`, :meth:`oracledb.create_pool()` and :meth:`oracledb.create_pool_async()` (`issue 438 `__). -#) Fix typing issues with setters for :attr:`defaults.fetch_lobs` and - :attr:`defaults.fetch_decimals` +#) Fix typing issues with setters for + :attr:`oracledb.defaults.fetch_lobs ` and + :attr:`oracledb.defaults.fetch_decimals ` (`issue 458 `__). #) Error ``DPY-2053: python-oracledb thin mode cannot be used because thick mode has already been enabled`` is now raised when attempting to use @@ -520,11 +526,11 @@ Thin Mode Changes :attr:`Connection.serial_num` that provide information about the session identifier and session serial number associated with a connection. #) Added attributes - :attr:`oracledb.defaults.driver_name `, - :attr:`oracledb.defaults.machine `, - :attr:`oracledb.defaults.osuser `, - :attr:`oracledb.defaults.program `, and - :attr:`oracledb.defaults.terminal ` to set + :attr:`oracledb.defaults.driver_name `, + :attr:`oracledb.defaults.machine `, + :attr:`oracledb.defaults.osuser `, + :attr:`oracledb.defaults.program `, and + :attr:`oracledb.defaults.terminal ` to set information about the driver name, machine name, operating system user, program name, and terminal name respectively. The ``driver_name``, ``machine``, ``osuser``, ``program``, and ``terminal`` parameters were also @@ -1677,7 +1683,8 @@ Thin Mode Changes #) Fixed bug with handling of redirect data returned by some SCAN listeners (`issue 39 `__). #) Fixed bug with re-execution of SQL that requires a define, such as occurs - when setting `oracledb.defaults.fetch_lobs` to the value `False` + when setting :attr:`oracledb.defaults.fetch_lobs ` to + the value `False` (`issue 41 `__). #) Fixed bug that prevented cursors from implicit results sets from being closed. diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index 82b5a1e7..d892797a 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -349,7 +349,8 @@ need to be made in addition to the common :ref:`commonupgrade`: python-oracledb Thick mode. 2. If the ``config_dir`` parameter of :func:`~oracledb.init_oracle_client` had - been used, then set the new :attr:`defaults.config_dir` attribute to the + been used, then set the new + :attr:`oracledb.defaults.config_dir ` attribute to the desired value or set the ``config_dir`` parameter in your connection or pool creation method call. For example: @@ -368,8 +369,9 @@ need to be made in addition to the common :ref:`commonupgrade`: Also, see :ref:`sqlnetclientconfig`. 3. If the ``driver_name`` parameter of :func:`~oracledb.init_oracle_client` had - been used, then set the new :attr:`defaults.driver_name` attribute to the - desired value or set the ``driver_name`` parameter when connecting. The + been used, then set the new + :attr:`oracledb.defaults.driver_name ` attribute to + the desired value or set the ``driver_name`` parameter when connecting. The convention for this parameter is to separate the product name from the product version by a colon and single blank characters. For example: @@ -390,7 +392,8 @@ need to be made in addition to the common :ref:`commonupgrade`: 5. To connect using a :ref:`TNS Alias ` from a ``tnsnames.ora`` file (see :ref:`optnetfiles`) in python-oracledb Thin mode, you should explicitly set the environment variable ``TNS_ADMIN`` to the directory - containing the file, or set :attr:`defaults.config_dir`, or set the + containing the file, or set + :attr:`oracledb.defaults.config_dir `, or set the ``config_dir`` parameter when connecting. A ``tnsnames.ora`` file in a "default" location such as the Instant Client diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index 0d261b0a..afb20fee 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -445,7 +445,8 @@ To limit the time for a pipeline, use an `asyncio timeout from Python 3.11. To tune fetching of rows with :meth:`Pipeline.add_fetchall()`, set -:attr:`defaults.arraysize` or pass the ``arraysize`` parameter. +:attr:`oracledb.defaults.arraysize ` or pass the +``arraysize`` parameter. Pipelining Examples +++++++++++++++++++ diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index ad4a4db4..3de344fe 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -480,11 +480,13 @@ In python-oracledb Thin mode, an additional :ref:`connection protocol hook function ` is required to handle this connection protocol, see :ref:`ldapconnections`. A connection protocol hook function is also required in python-oracledb Thick mode if -:attr:`defaults.thick_mode_dsn_passthrough` is *False*. +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` is *False*. To use LDAP URLs in python-oracledb Thick mode applications when -:attr:`defaults.thick_mode_dsn_passthrough` is *True*, the Oracle Client -libraries must be 23.4, or later. +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` is *True*, the Oracle Client libraries +must be 23.4, or later. .. _configproviderurl: @@ -631,11 +633,13 @@ you in python-oracledb, then you can alter the connection string to include a protocol such as ``tcp://hostname``, or a port number such as ``hostname:1521``. -In python-oracledb Thick mode, when :attr:`defaults.thick_mode_dsn_passthrough` -is *False*, any ``DESCRIPTION``, ``CONNECT_DATA`` and ``SECURITY`` parameters -of a full connect descriptor that are unrecognized by python-oracledb are -passed to the database unchanged. Any Easy Connect parameters that are not -known to python-oracledb are discarded and not passed to the database. +In python-oracledb Thick mode, when +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` is *False*, any ``DESCRIPTION``, +``CONNECT_DATA`` and ``SECURITY`` parameters of a full connect descriptor that +are unrecognized by python-oracledb are passed to the database unchanged. Any +Easy Connect parameters that are not known to python-oracledb are discarded and +not passed to the database. .. _pyoparams: @@ -922,16 +926,17 @@ The following configuration providers are supported by python-oracledb: To use python-oracledb :ref:`Centralized Configuration Provider ` functionality in Thick mode, you should set -:attr:`defaults.thick_mode_dsn_passthrough` to *False*. Alternatively use +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` to *False*. Alternatively use :meth:`ConnectParams.parse_connect_string()`, see :ref:`usingconnparams`. -Note: In Thick mode, when :attr:`defaults.thick_mode_dsn_passthrough` is -*True*, it is the Oracle Client libraries that access the configuration -provider when python-oracledb connection or pool creation methods are -invoked. Any python-oracledb parameter section will be ignored. Any Oracle -Client Interface parameter section should be *removed* from the configuration -because its values may be different to those that python-oracledb assumes, and -will cause undefined behavior. +Note: In Thick mode, when :attr:`oracledb.defaults.thick_mode_dsn_passthrough +` is *True*, it is the Oracle Client +libraries that access the configuration provider when python-oracledb +connection or pool creation methods are invoked. Any python-oracledb parameter +section will be ignored. Any Oracle Client Interface parameter section should +be *removed* from the configuration because its values may be different to +those that python-oracledb assumes, and will cause undefined behavior. **Precedence of Attributes** @@ -1036,11 +1041,20 @@ The elements of the ``dsn`` parameter are detailed in the table below. * - Parameter - Description * - ``config-file`` - - Indicates that the centralized configuration provider is a file in your local system. + - Indicates that the centralized configuration provider is a file in your + local system. * - - - The file path and name of the JSON file that contains the configuration information. For relative paths, python-oracledb will use the connection or pool creation ``config_dir`` parameter, or :attr:`defaults.config_dir` value, to create an absolute path. + - The file path and name of the JSON file that contains the configuration + information. For relative paths, python-oracledb will use the + connection or pool creation ``config_dir`` parameter, or + :attr:`oracledb.defaults.config_dir ` value, to + create an absolute path. * - ``key`` - - The connection key name used to identify a specific configuration. If this parameter is specified, the file is assumed to contain multiple configurations that are indexed by the key value. If not specified, the file is assumed to contain a single configuration. See the example further below. + - The connection key name used to identify a specific configuration. If + this parameter is specified, the file is assumed to contain multiple + configurations that are indexed by the key value. If not specified, the + file is assumed to contain a single configuration. See the example + further below. **File Configuration Provider Examples** @@ -1696,9 +1710,9 @@ hook function is expected to construct valid connection details, which python-oracledb will use to complete the connection or pool creation. You can also make use of a protocol hook function in python-oracledb Thick mode -connection calls by setting :attr:`defaults.thick_mode_dsn_passthrough` to -*False*. Alternatively use :meth:`ConnectParams.parse_connect_string()`, see -:ref:`usingconnparams`. +connection calls by setting :attr:`oracledb.defaults.thick_mode_dsn_passthrough +` to *False*. Alternatively use +:meth:`ConnectParams.parse_connect_string()`, see :ref:`usingconnparams`. For example, the following hook function handles connection strings prefixed with the ``tcp://`` protocol. When :func:`oracledb.connect()` is called, the @@ -1932,9 +1946,10 @@ connect with an LDAP URL. For example: connection = oracledb.connect(user="scott", password=pw, dsn=ldapurl) To use an LDAP URL in python-oracledb Thick mode when -:attr:`defaults.thick_mode_dsn_passthrough` is *False*, a connection hook -function is required as shown below for Thin mode. This lets LDAP URLs be -utilized when python-oracledb uses any supported Oracle Client library version. +:attr:`oracledb.defaults.thick_mode_dsn_passthrough +` is *False*, a connection hook function +is required as shown below for Thin mode. This lets LDAP URLs be utilized when +python-oracledb uses any supported Oracle Client library version. **Python-oracledb Thin Mode LDAP URLs** diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index bcbe5e02..21f05554 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -161,8 +161,8 @@ When converting Oracle Database NUMBERs: querying an expression that results in a number without precision or scale, then the Arrow data type will be DOUBLE. -- If :attr:`defaults.fetch_decimals` is set to *True*, then the Arrow data - type is DECIMAL128. +- If :attr:`oracledb.defaults.fetch_decimals ` is set + to *True*, then the Arrow data type is DECIMAL128. - If the column has been created with a scale of *0*, and a precision value that is less than or equal to *18*, then the Arrow data type is INT64. diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index 1e2ac2ee..c6b303ea 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -487,9 +487,9 @@ one wins): params = oracledb.ConnectParams(config_dir="/opt/oracle/config") connection = oracledb.connect(user="hr", password=userpwd, dsn="orclpdb", params=params) -- the value of :attr:`defaults.config_dir`, which may have been set explicitly - to a directory, or internally set during initialization to ``$TNS_ADMIN`` or - ``$ORACLE_HOME/network/admin``. +- the value of :attr:`oracledb.defaults.config_dir `, + which may have been set explicitly to a directory, or internally set during + initialization to ``$TNS_ADMIN`` or ``$ORACLE_HOME/network/admin``. .. code-block:: python diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 29bade82..1964e79b 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -562,9 +562,9 @@ Oracle Database uses decimal numbers and these cannot be converted seamlessly to binary number representations like Python floats. In addition, the range of Oracle numbers exceeds that of floating point numbers. Python has decimal objects which do not have these limitations. In python-oracledb you can set -:attr:`defaults.fetch_decimals` so that Decimals are returned to the -application, ensuring that numeric precision is not lost when fetching certain -numbers. +:attr:`oracledb.defaults.fetch_decimals ` so that +Decimals are returned to the application, ensuring that numeric precision is +not lost when fetching certain numbers. The following code sample demonstrates the issue: @@ -594,8 +594,9 @@ This displays ``7.1 * 3 = 21.3`` See `samples/return_numbers_as_decimals.py `__ -An equivalent, longer, older coding idiom to :attr:`defaults.fetch_decimals` is -to use an :ref:`output type handler ` do the conversion. +An equivalent, longer, older coding idiom to setting +:attr:`oracledb.defaults.fetch_decimals ` is to use an +:ref:`output type handler ` to do the conversion. .. code-block:: python diff --git a/doc/src/user_guide/troubleshooting.rst b/doc/src/user_guide/troubleshooting.rst index c651743f..d1ede0d3 100644 --- a/doc/src/user_guide/troubleshooting.rst +++ b/doc/src/user_guide/troubleshooting.rst @@ -443,18 +443,20 @@ syntax. Perform one of the following: c = oracledb.connect(user="hr", password=userpw, dsn="(DESCRIPTION=(ADDRESS=(...))") -- Review the :attr:`defaults.config_dir` documentation for the heuristics used - by python-oracledb to automatically locate :ref:`tnsnames.ora - `. Ensure that your file is in an expected location, that the - file is readable by Python, and that any necessary environment variables such - as ``TNS_ADMIN`` are accessible by the Python process. +- Review the :attr:`oracledb.defaults.config_dir ` + documentation for the heuristics used by python-oracledb to automatically + locate :ref:`tnsnames.ora `. Ensure that your file is in an + expected location, that the file is readable by Python, and that any + necessary environment variables such as ``TNS_ADMIN`` are accessible by the + Python process. - If you have problems with the heuristics, then you can explicitly specify the location of :ref:`tnsnames.ora `. For example, if the file is at ``/opt/myconfigdir/tnsnames.ora``, then: - In python-oracledb's default Thin mode, or when - :attr:`defaults.thick_mode_dsn_passthrough` is *False*, you can use: + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*, you can use: .. code-block:: python diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index d04885d5..da0c7dd1 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -254,8 +254,9 @@ The number of round-trips will be the same regardless of which Application Default Prefetchrows and Arraysize Values +++++++++++++++++++++++++++++++++++++++++++++++++++++ -Application-wide defaults can be set using :attr:`defaults.prefetchrows` and -:attr:`defaults.arraysize`, for example: +Application-wide defaults can be set using +:attr:`oracledb.defaults.prefetchrows ` and +:attr:`oracledb.defaults.arraysize `, for example: .. code-block:: python @@ -356,8 +357,9 @@ When fetching :ref:`data frames ` with tuning of data transfer across the network is controlled by the respective methods ``arraysize`` or ``size`` parameters. -Any :attr:`defaults.prefetchrows` value is ignored since these methods always -set the internal prefetch size to the relevant ``arraysize`` or ``size`` value. +Any :attr:`oracledb.defaults.prefetchrows ` value is +ignored since these methods always set the internal prefetch size to the +relevant ``arraysize`` or ``size`` value. Parallelizing Data Fetches from a Single Table ---------------------------------------------- @@ -509,23 +511,24 @@ The python-oracledb Thick mode uses `Oracle Call Interface statement caching Each standalone or pooled connection has its own cache of statements with a default size of 20. The default size of the statement cache can be changed -using the :attr:`defaults.stmtcachesize` attribute. The size can be set when -creating connection pools or standalone connections. In general, set the -statement cache size to the size of the working set of statements being -executed by the application. To manually tune the cache, monitor the general -application load and the `Automatic Workload Repository `__ (AWR) "bytes sent via SQL*Net to client" values. The latter -statistic should benefit from not shipping statement metadata to -python-oracledb. Adjust the statement cache size to your satisfaction. With -Oracle Database 12c (or later), the Thick mode statement cache size can be -automatically tuned using an :ref:`oraaccess.xml ` file. +using the :attr:`oracledb.defaults.stmtcachesize ` +attribute. The size can be set when creating connection pools or standalone +connections. In general, set the statement cache size to the size of the +working set of statements being executed by the application. To manually tune +the cache, monitor the general application load and the `Automatic Workload +Repository `__ (AWR) "bytes sent via SQL*Net +to client" values. The latter statistic should benefit from not shipping +statement metadata to python-oracledb. Adjust the statement cache size to your +satisfaction. With Oracle Database 12c (or later), the Thick mode statement +cache size can be automatically tuned using an +:ref:`oraaccess.xml ` file. Setting the Statement Cache --------------------------- The statement cache size can be set globally with -:attr:`defaults.stmtcachesize`: +:attr:`oracledb.defaults.stmtcachesize `: .. code-block:: python diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index d1c0d1c3..0975afd6 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1084,9 +1084,9 @@ def fetch_df_all( The ``arraysize`` parameter can be specified to tune performance of fetching data across the network. It defaults to - :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s - :attr:`Cursor.prefetchrows` size is always set to the value of the - explicit or default ``arraysize`` parameter value. + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set + to the value of the explicit or default ``arraysize`` parameter value. Any LOB fetched must be less than 1 GB. """ @@ -1116,8 +1116,9 @@ def fetch_df_batches( match the bind variable placeholder names in ``statement``. The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + batch. It defaults to + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. @@ -2246,7 +2247,8 @@ async def fetchall( """ Executes a query and returns all of the rows. - The default value for ``arraysize`` is :attr:`defaults.arraysize`. + The default value for ``arraysize`` is + :attr:`oracledb.defaults.arraysize `. Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set to the value of the explicit or default ``arraysize`` parameter value. @@ -2280,9 +2282,9 @@ async def fetch_df_all( The ``arraysize`` parameter can be specified to tune performance of fetching data across the network. It defaults to - :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s - :attr:`Cursor.prefetchrows` size is always set to the value of the - explicit or default ``arraysize`` parameter value. + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set + to the value of the explicit or default ``arraysize`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -2310,8 +2312,9 @@ async def fetch_df_batches( match the bind variable placeholder names in ``statement``. The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + batch. It defaults to + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. """ @@ -2338,7 +2341,7 @@ async def fetchmany( Executes a query and returns up to the specified number of rows. The default value for ``num_rows`` is the value of - :attr:`defaults.arraysize`. + :attr:`oracledb.defaults.arraysize `. Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set to the value of the explicit or default ``num_rows`` parameter, diff --git a/src/oracledb/defaults.py b/src/oracledb/defaults.py index be0e85f5..9fdd9182 100644 --- a/src/oracledb/defaults.py +++ b/src/oracledb/defaults.py @@ -69,7 +69,7 @@ def config_dir(self) -> str: Thin mode. This attribute is used in python-oracledb Thin mode. It is also used in - Thick mode if :attr:`defaults.thick_mode_dsn_passthrough` is *False*. + Thick mode if :attr:`Defaults.thick_mode_dsn_passthrough` is *False*. """ return self._impl.config_dir @@ -263,7 +263,7 @@ def driver_name(self, value: str): self._impl.driver_name = value @property - def thick_mode_dsn_passthrough(self) -> str: + def thick_mode_dsn_passthrough(self) -> bool: """ This read-write attribute determines whether :ref:`connection strings ` passed as the ``dsn`` parameter to diff --git a/src/oracledb/pipeline.py b/src/oracledb/pipeline.py index 483ba344..03f71a59 100644 --- a/src/oracledb/pipeline.py +++ b/src/oracledb/pipeline.py @@ -335,7 +335,8 @@ def add_fetchall( :attr:`~PipelineOpResult.rows` attribute populated with the list of rows returned by the query. - The default value for ``arraysize`` is :attr:`defaults.arraysize`. + The default value for ``arraysize`` is + :attr:`oracledb.defaults.arraysize `. Internally, this operation's :attr:`Cursor.prefetchrows` size is set to the value of the explicit or default ``arraysize`` parameter value. @@ -369,7 +370,7 @@ def add_fetchmany( rows returned by the query. The default value for ``num_rows`` is the value of - :attr:`defaults.arraysize`. + :attr:`oracledb.defaults.arraysize `. Internally, this operation's :attr:`Cursor.prefetchrows` size is set to the value of the explicit or default ``num_rows`` parameter, allowing diff --git a/src/oracledb/utils.py b/src/oracledb/utils.py index fd0c49ae..ff976cfa 100644 --- a/src/oracledb/utils.py +++ b/src/oracledb/utils.py @@ -157,15 +157,16 @@ def init_oracle_client( :ref:`otherinit`. At successful completion of a call to ``oracledb.init_oracle_client()``, - the attribute :attr:`defaults.config_dir` will be set as determined below - (first one wins): + the attribute :attr:`oracledb.defaults.config_dir ` + will be set as determined below (first one wins): - the value of the ``oracledb.init_oracle_client()`` parameter ``config_dir``, if one was passed. - - the value of :attr:`defaults.config_dir` if it has one. I.e. - :attr:`defaults.config_dir` remains unchanged after - ``oracledb.init_oracle_client()`` completes. + - the value of :attr:`oracledb.defaults.config_dir ` + if it has one. i.e. + :attr:`oracledb.defaults.config_dir ` remains + unchanged after ``oracledb.init_oracle_client()`` completes. - the value of the environment variable ``$TNS_ADMIN``, if it is set. @@ -175,8 +176,8 @@ def init_oracle_client( - the directory of the loaded Oracle Client library, appended with ``network/admin``. Note this directory is not determinable on AIX. - - otherwise the value *None* is used. (Leaving :attr:`defaults.config_dir` - unchanged). + - otherwise the value *None* is used. (Leaving + :attr:`oracledb.defaults.config_dir ` unchanged). """ thick_impl.init_oracle_client(lib_dir, config_dir, error_url, driver_name) diff --git a/utils/templates/connection.py b/utils/templates/connection.py index bfbaff5f..34e259dd 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1082,9 +1082,9 @@ def fetch_df_all( The ``arraysize`` parameter can be specified to tune performance of fetching data across the network. It defaults to - :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s - :attr:`Cursor.prefetchrows` size is always set to the value of the - explicit or default ``arraysize`` parameter value. + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set + to the value of the explicit or default ``arraysize`` parameter value. Any LOB fetched must be less than 1 GB. """ @@ -1114,8 +1114,9 @@ def fetch_df_batches( match the bind variable placeholder names in ``statement``. The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + batch. It defaults to + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. @@ -1992,7 +1993,8 @@ async def fetchall( """ Executes a query and returns all of the rows. - The default value for ``arraysize`` is :attr:`defaults.arraysize`. + The default value for ``arraysize`` is + :attr:`oracledb.defaults.arraysize `. Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set to the value of the explicit or default ``arraysize`` parameter value. @@ -2026,9 +2028,9 @@ async def fetch_df_all( The ``arraysize`` parameter can be specified to tune performance of fetching data across the network. It defaults to - :attr:`defaults.arraysize`. Internally, the ``fetch_df_all()``'s - :attr:`Cursor.prefetchrows` size is always set to the value of the - explicit or default ``arraysize`` parameter value. + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set + to the value of the explicit or default ``arraysize`` parameter value. """ cursor = self.cursor() cursor._impl.fetching_arrow = True @@ -2056,8 +2058,9 @@ async def fetch_df_batches( match the bind variable placeholder names in ``statement``. The ``size`` parameter controls the number of records fetched in each - batch. It defaults to :attr:`defaults.arraysize`. Internally, the - ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and + batch. It defaults to + :attr:`oracledb.defaults.arraysize `. Internally, + the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. """ @@ -2084,7 +2087,7 @@ async def fetchmany( Executes a query and returns up to the specified number of rows. The default value for ``num_rows`` is the value of - :attr:`defaults.arraysize`. + :attr:`oracledb.defaults.arraysize `. Internally, this method's :attr:`AsyncCursor.prefetchrows` size is set to the value of the explicit or default ``num_rows`` parameter, From 857fafa345eba377405f205d38ee05eceef18eee Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 5 Aug 2025 16:30:41 -0600 Subject: [PATCH 177/239] Ensure defaults are included on a separate line in order to avoid having a trailing colon, which is treated specially with RST. --- src/oracledb/connect_params.py | 136 +++++++++++------- src/oracledb/connection.py | 242 +++++++++++++++++++++------------ src/oracledb/pool.py | 163 ++++++++++++++-------- src/oracledb/pool_params.py | 167 +++++++++++++++-------- utils/build_from_template.py | 16 ++- 5 files changed, 465 insertions(+), 259 deletions(-) diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index e0bfde82..ff56ddcd 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -114,20 +114,24 @@ def __init__( All parameters are optional. A brief description of each parameter follows: - - user: the name of the user to connect to (default: None) + - user: the name of the user to connect to + (default: None) - proxy_user: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" (default: None) + form "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - password: the password for the user + (default: None) - newpassword: the new password for the user. The new password will take effect immediately upon a successful connection to the database (default: None) - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + encrypted. This value is only used in thin mode + (default: None) - access_token: expected to be a string or a 2-tuple or a callable. If it is a string, it specifies an Azure AD OAuth2 token used for Open @@ -138,10 +142,12 @@ def __init__( either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the current authentication token has - expired (default: None) + expired + (default: None) - host: the name or IP address of the machine hosting the database or - the database listener (default: None) + the database listener + (default: None) - port: the port number on which the database listener is listening (default: 1521) @@ -151,58 +157,71 @@ def __init__( (default: "tcp") - https_proxy: the name or IP address of a proxy host to use for - tunneling secure connections (default: None) + tunneling secure connections + (default: None) - https_proxy_port: the port on which to communicate with the proxy - host (default: 0) + host + (default: 0) - - service_name: the service name of the database (default: None) + - service_name: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - instance_name: the instance name of the database + (default: None) - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + service_name instead is recommended + (default: None) - server_type: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" - or "pooled" (default: None) + or "pooled" + (default: None) - cclass: connection class to use for Database Resident Connection - Pooling (DRCP) (default: None) + Pooling (DRCP) + (default: None) - purity: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - expire_time: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive (default: 0) + greater than zero it enables keepalive + (default: 0) - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + retried before the attempt is terminated + (default: 0) - retry_delay: the number of seconds to wait before making a new - connection attempt (default: 1) + connection attempt + (default: 1) - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host (default: - 20.0) + to wait for establishing a connection to the database host + (default: 20.0) - ssl_server_dn_match: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching - is performed instead (default: True) + is performed instead + (default: True) - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used (default: None) + hostname will be used + (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso (default: None) + the file cwallet.sso + (default: None) - events: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous @@ -210,27 +229,33 @@ def __init__( (default: False) - externalauth: a boolean indicating whether to use external - authentication (default: False) + authentication + (default: False) - mode: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) + oracledb.AUTH_MODE_SYSDBA + (default: oracledb.AUTH_MODE_DEFAULT) - disable_oob: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - stmtcachesize: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter (default: None) + used simultaneously with the cclass parameter + (default: None) - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + pool. This value is only used in thick mode + (default: None) - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode (default: False) + thick mode + (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. @@ -239,36 +264,43 @@ def __init__( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string (default: None) + tuple should be a string + (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick - mode (default: None) + mode + (default: None) - supershardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - debug_jdwp: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable (default: None) + environment variable + (default: None) - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing (default: None) + the connection identifier used for tracing + (default: None) - ssl_context: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object - cannot be used (default: None) + cannot be used + (default: None) - sdu: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and - the database network SDU configuration value (default: 8192) + the database network SDU configuration value + (default: 8192) - pool_boundary: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. @@ -278,27 +310,32 @@ def __init__( - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information (default: - False) + refer to the ADB-S documentation for more information + (default: False) - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - program: the name of the executable program or application connected - to the Oracle Database (default: oracledb.defaults.program) + to the Oracle Database + (default: oracledb.defaults.program) - machine: the machine name of the client connecting to the Oracle - Database (default: oracledb.defaults.machine) + Database + (default: oracledb.defaults.machine) - terminal: the terminal identifier from which the connection - originates (default: oracledb.defaults.terminal) + originates + (default: oracledb.defaults.terminal) - osuser: the operating system user that initiates the database - connection (default: oracledb.defaults.osuser) + connection + (default: oracledb.defaults.osuser) - driver_name: the driver name used by the client to connect to the - Oracle Database (default: oracledb.defaults.driver_name) + Oracle Database + (default: oracledb.defaults.driver_name) - use_sni: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required @@ -309,19 +346,22 @@ def __init__( parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora - configuration file (default: - oracledb.defaults.thick_mode_dsn_passthrough) + configuration file + (default: oracledb.defaults.thick_mode_dsn_passthrough) - extra_auth_params: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins (default: None) + the Azure and OCI cloud-native authentication plugins + (default: None) - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + Oracle Database 23.4 or higher + (default: None) - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 0975afd6..a233ec88 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1731,20 +1731,24 @@ def connect( The following parameters are all optional. A brief description of each parameter follows: - - user: the name of the user to connect to (default: None) + - user: the name of the user to connect to + (default: None) - proxy_user: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the form - "user[proxy_user]" (default: None) + "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - password: the password for the user + (default: None) - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database (default: - None) + effect immediately upon a successful connection to the database + (default: None) - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + encrypted. This value is only used in thin mode + (default: None) - access_token: expected to be a string or a 2-tuple or a callable. If it is a string, it specifies an Azure AD OAuth2 token used for Open @@ -1754,73 +1758,88 @@ def connect( authentication. If it is a callable, it returns either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the - current authentication token has expired (default: None) + current authentication token has expired + (default: None) - host: the name or IP address of the machine hosting the database or the - database listener (default: None) + database listener + (default: None) - port: the port number on which the database listener is listening (default: 1521) - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) (default: - "tcp") + unencrypted network traffic or encrypted network traffic (TLS) + (default: "tcp") - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections (default: None) + secure connections + (default: None) - https_proxy_port: the port on which to communicate with the proxy host (default: 0) - - service_name: the service name of the database (default: None) + - service_name: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - instance_name: the instance name of the database + (default: None) - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + service_name instead is recommended + (default: None) - server_type: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" (default: None) - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) (default: None) + (DRCP) + (default: None) - purity: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - expire_time: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive (default: 0) + than zero it enables keepalive + (default: 0) - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + retried before the attempt is terminated + (default: 0) - retry_delay: the number of seconds to wait before making a new connection - attempt (default: 1) + attempt + (default: 1) - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host (default: 20.0) + wait for establishing a connection to the database host + (default: 20.0) - ssl_server_dn_match: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is - performed instead (default: True) + performed instead + (default: True) - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used (default: None) + for any verfication. Otherwise the hostname will be used + (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso (default: None) + cwallet.sso + (default: None) - events: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications (default: False) + notification and high availability event notifications + (default: False) - externalauth: a boolean indicating whether to use external authentication (default: False) @@ -1830,29 +1849,34 @@ def connect( - disable_oob: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - stmtcachesize: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter (default: None) + simultaneously with the cclass parameter + (default: None) - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + pool. This value is only used in thick mode + (default: None) - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode (default: False) + mode + (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() (default: - oracledb.defaults.config_dir) + the config_dir parameter of init_oracle_client() + (default: oracledb.defaults.config_dir) - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string (default: None) + should be a string + (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -1860,15 +1884,18 @@ def connect( - supershardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - debug_jdwp: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable (default: None) + variable + (default: None) - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing (default: None) + connection identifier used for tracing + (default: None) - ssl_context: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key @@ -1881,23 +1908,27 @@ def connect( Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and the - database network SDU configuration value (default: 8192) + database network SDU configuration value + (default: 8192) - pool_boundary: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher (default: None) + the use of DRCP with Oracle Database 23.4 or higher + (default: None) - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information (default: False) + ADB-S documentation for more information + (default: False) - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - program: the name of the executable program or application connected to - the Oracle Database (default: oracledb.defaults.program) + the Oracle Database + (default: oracledb.defaults.program) - machine: the machine name of the client connecting to the Oracle Database (default: oracledb.defaults.machine) @@ -1909,7 +1940,8 @@ def connect( (default: oracledb.defaults.osuser) - driver_name: the driver name used by the client to connect to the Oracle - Database (default: oracledb.defaults.driver_name) + Database + (default: oracledb.defaults.driver_name) - use_sni: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required @@ -1924,14 +1956,17 @@ def connect( - extra_auth_params: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the - Azure and OCI cloud-native authentication plugins (default: None) + Azure and OCI cloud-native authentication plugins + (default: None) - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + Oracle Database 23.4 or higher + (default: None) - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass @@ -2835,20 +2870,24 @@ def connect_async( The following parameters are all optional. A brief description of each parameter follows: - - user: the name of the user to connect to (default: None) + - user: the name of the user to connect to + (default: None) - proxy_user: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the form - "user[proxy_user]" (default: None) + "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - password: the password for the user + (default: None) - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database (default: - None) + effect immediately upon a successful connection to the database + (default: None) - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + encrypted. This value is only used in thin mode + (default: None) - access_token: expected to be a string or a 2-tuple or a callable. If it is a string, it specifies an Azure AD OAuth2 token used for Open @@ -2858,73 +2897,88 @@ def connect_async( authentication. If it is a callable, it returns either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the - current authentication token has expired (default: None) + current authentication token has expired + (default: None) - host: the name or IP address of the machine hosting the database or the - database listener (default: None) + database listener + (default: None) - port: the port number on which the database listener is listening (default: 1521) - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) (default: - "tcp") + unencrypted network traffic or encrypted network traffic (TLS) + (default: "tcp") - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections (default: None) + secure connections + (default: None) - https_proxy_port: the port on which to communicate with the proxy host (default: 0) - - service_name: the service name of the database (default: None) + - service_name: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - instance_name: the instance name of the database + (default: None) - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + service_name instead is recommended + (default: None) - server_type: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" (default: None) - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) (default: None) + (DRCP) + (default: None) - purity: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - expire_time: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive (default: 0) + than zero it enables keepalive + (default: 0) - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + retried before the attempt is terminated + (default: 0) - retry_delay: the number of seconds to wait before making a new connection - attempt (default: 1) + attempt + (default: 1) - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host (default: 20.0) + wait for establishing a connection to the database host + (default: 20.0) - ssl_server_dn_match: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is - performed instead (default: True) + performed instead + (default: True) - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used (default: None) + for any verfication. Otherwise the hostname will be used + (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso (default: None) + cwallet.sso + (default: None) - events: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications (default: False) + notification and high availability event notifications + (default: False) - externalauth: a boolean indicating whether to use external authentication (default: False) @@ -2934,29 +2988,34 @@ def connect_async( - disable_oob: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - stmtcachesize: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter (default: None) + simultaneously with the cclass parameter + (default: None) - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + pool. This value is only used in thick mode + (default: None) - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode (default: False) + mode + (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() (default: - oracledb.defaults.config_dir) + the config_dir parameter of init_oracle_client() + (default: oracledb.defaults.config_dir) - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string (default: None) + should be a string + (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -2964,15 +3023,18 @@ def connect_async( - supershardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - debug_jdwp: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable (default: None) + variable + (default: None) - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing (default: None) + connection identifier used for tracing + (default: None) - ssl_context: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key @@ -2985,23 +3047,27 @@ def connect_async( Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and the - database network SDU configuration value (default: 8192) + database network SDU configuration value + (default: 8192) - pool_boundary: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher (default: None) + the use of DRCP with Oracle Database 23.4 or higher + (default: None) - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information (default: False) + ADB-S documentation for more information + (default: False) - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - program: the name of the executable program or application connected to - the Oracle Database (default: oracledb.defaults.program) + the Oracle Database + (default: oracledb.defaults.program) - machine: the machine name of the client connecting to the Oracle Database (default: oracledb.defaults.machine) @@ -3013,7 +3079,8 @@ def connect_async( (default: oracledb.defaults.osuser) - driver_name: the driver name used by the client to connect to the Oracle - Database (default: oracledb.defaults.driver_name) + Database + (default: oracledb.defaults.driver_name) - use_sni: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required @@ -3028,13 +3095,16 @@ def connect_async( - extra_auth_params: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the - Azure and OCI cloud-native authentication plugins (default: None) + Azure and OCI cloud-native authentication plugins + (default: None) - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + Oracle Database 23.4 or higher + (default: None) - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index b0c68001..1e04f0e7 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -736,18 +736,20 @@ def create_pool( The following parameters are all optional. A brief description of each parameter follows: - - min: the minimum number of connections the pool should contain (default: - 1) + - min: the minimum number of connections the pool should contain + (default: 1) - - max: the maximum number of connections the pool should contain (default: - 2) + - max: the maximum number of connections the pool should contain + (default: 2) - increment: the number of connections that should be added to the pool - whenever a new connection needs to be created (default: 1) + whenever a new connection needs to be created + (default: 1) - connectiontype: the class of the connection that should be returned during calls to pool.acquire(). It must be oracledb.Connection or a - subclass of oracledb.Connection (default: None) + subclass of oracledb.Connection + (default: None) - getmode: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, @@ -755,56 +757,68 @@ def create_pool( (default: oracledb.POOL_GETMODE_WAIT) - homogeneous: a boolean indicating whether the connections are homogeneous - (same user) or heterogeneous (multiple users) (default: True) + (same user) or heterogeneous (multiple users) + (default: True) - timeout: length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections are never - terminated (default: 0) + terminated + (default: 0) - wait_timeout: length of time (in milliseconds) that a caller should wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT (default: 0) + oracledb.POOL_GETMODE_TIMEDWAIT + (default: 0) - max_lifetime_session: length of time (in seconds) that connections can remain in the pool. If it is 0 then connections may remain in the pool - indefinitely (default: 0) + indefinitely + (default: 0) - session_callback: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag - differs from the one requested (default: None) + differs from the one requested + (default: None) - max_sessions_per_shard: the maximum number of connections that may be - associated with a particular shard (default: 0) + associated with a particular shard + (default: 0) - soda_metadata_cache: boolean indicating whether or not the SODA metadata - cache should be enabled (default: False) + cache should be enabled + (default: False) - ping_interval: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping - functionality will be disabled (default: 60) + functionality will be disabled + (default: 60) - ping_timeout: maximum length of time (in milliseconds) to wait for a connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() (default: - 5000) + before being discarded and replaced during a call to acquire() + (default: 5000) - - user: the name of the user to connect to (default: None) + - user: the name of the user to connect to + (default: None) - proxy_user: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the form - "user[proxy_user]" (default: None) + "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - password: the password for the user + (default: None) - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database (default: - None) + effect immediately upon a successful connection to the database + (default: None) - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + encrypted. This value is only used in thin mode + (default: None) - access_token: expected to be a string or a 2-tuple or a callable. If it is a string, it specifies an Azure AD OAuth2 token used for Open @@ -814,73 +828,88 @@ def create_pool( authentication. If it is a callable, it returns either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the - current authentication token has expired (default: None) + current authentication token has expired + (default: None) - host: the name or IP address of the machine hosting the database or the - database listener (default: None) + database listener + (default: None) - port: the port number on which the database listener is listening (default: 1521) - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) (default: - "tcp") + unencrypted network traffic or encrypted network traffic (TLS) + (default: "tcp") - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections (default: None) + secure connections + (default: None) - https_proxy_port: the port on which to communicate with the proxy host (default: 0) - - service_name: the service name of the database (default: None) + - service_name: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - instance_name: the instance name of the database + (default: None) - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + service_name instead is recommended + (default: None) - server_type: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" (default: None) - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) (default: None) + (DRCP) + (default: None) - purity: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - expire_time: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive (default: 0) + than zero it enables keepalive + (default: 0) - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + retried before the attempt is terminated + (default: 0) - retry_delay: the number of seconds to wait before making a new connection - attempt (default: 1) + attempt + (default: 1) - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host (default: 20.0) + wait for establishing a connection to the database host + (default: 20.0) - ssl_server_dn_match: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is - performed instead (default: True) + performed instead + (default: True) - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used (default: None) + for any verfication. Otherwise the hostname will be used + (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso (default: None) + cwallet.sso + (default: None) - events: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications (default: False) + notification and high availability event notifications + (default: False) - externalauth: a boolean indicating whether to use external authentication (default: False) @@ -890,29 +919,34 @@ def create_pool( - disable_oob: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - stmtcachesize: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter (default: None) + simultaneously with the cclass parameter + (default: None) - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + pool. This value is only used in thick mode + (default: None) - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode (default: False) + mode + (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() (default: - oracledb.defaults.config_dir) + the config_dir parameter of init_oracle_client() + (default: oracledb.defaults.config_dir) - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string (default: None) + should be a string + (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode @@ -920,15 +954,18 @@ def create_pool( - supershardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - debug_jdwp: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable (default: None) + variable + (default: None) - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing (default: None) + connection identifier used for tracing + (default: None) - ssl_context: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key @@ -941,23 +978,27 @@ def create_pool( Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and the - database network SDU configuration value (default: 8192) + database network SDU configuration value + (default: 8192) - pool_boundary: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher (default: None) + the use of DRCP with Oracle Database 23.4 or higher + (default: None) - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information (default: False) + ADB-S documentation for more information + (default: False) - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - program: the name of the executable program or application connected to - the Oracle Database (default: oracledb.defaults.program) + the Oracle Database + (default: oracledb.defaults.program) - machine: the machine name of the client connecting to the Oracle Database (default: oracledb.defaults.machine) @@ -969,7 +1010,8 @@ def create_pool( (default: oracledb.defaults.osuser) - driver_name: the driver name used by the client to connect to the Oracle - Database (default: oracledb.defaults.driver_name) + Database + (default: oracledb.defaults.driver_name) - use_sni: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required @@ -984,14 +1026,17 @@ def create_pool( - extra_auth_params: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the - Azure and OCI cloud-native authentication plugins (default: None) + Azure and OCI cloud-native authentication plugins + (default: None) - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + Oracle Database 23.4 or higher + (default: None) - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index 28214f60..349ecfb2 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -134,11 +134,13 @@ def __init__( (default: 2) - increment: the number of connections that should be added to the pool - whenever a new connection needs to be created (default: 1) + whenever a new connection needs to be created + (default: 1) - connectiontype: the class of the connection that should be returned during calls to pool.acquire(). It must be oracledb.Connection or a - subclass of oracledb.Connection (default: None) + subclass of oracledb.Connection + (default: None) - getmode: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, @@ -146,57 +148,68 @@ def __init__( (default: oracledb.POOL_GETMODE_WAIT) - homogeneous: a boolean indicating whether the connections are - homogeneous (same user) or heterogeneous (multiple users) (default: - True) + homogeneous (same user) or heterogeneous (multiple users) + (default: True) - timeout: length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections - are never terminated (default: 0) + are never terminated + (default: 0) - wait_timeout: length of time (in milliseconds) that a caller should wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT (default: 0) + oracledb.POOL_GETMODE_TIMEDWAIT + (default: 0) - max_lifetime_session: length of time (in seconds) that connections can remain in the pool. If it is 0 then connections may remain in the - pool indefinitely (default: 0) + pool indefinitely + (default: 0) - session_callback: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag - differs from the one requested (default: None) + differs from the one requested + (default: None) - max_sessions_per_shard: the maximum number of connections that may be - associated with a particular shard (default: 0) + associated with a particular shard + (default: 0) - soda_metadata_cache: boolean indicating whether or not the SODA - metadata cache should be enabled (default: False) + metadata cache should be enabled + (default: False) - ping_interval: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping - functionality will be disabled (default: 60) + functionality will be disabled + (default: 60) - ping_timeout: maximum length of time (in milliseconds) to wait for a connection in the pool to respond to an internal ping to the database before being discarded and replaced during a call to acquire() (default: 5000) - - user: the name of the user to connect to (default: None) + - user: the name of the user to connect to + (default: None) - proxy_user: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" (default: None) + form "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - password: the password for the user + (default: None) - newpassword: the new password for the user. The new password will take effect immediately upon a successful connection to the database (default: None) - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + encrypted. This value is only used in thin mode + (default: None) - access_token: expected to be a string or a 2-tuple or a callable. If it is a string, it specifies an Azure AD OAuth2 token used for Open @@ -207,10 +220,12 @@ def __init__( either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the current authentication token has - expired (default: None) + expired + (default: None) - host: the name or IP address of the machine hosting the database or - the database listener (default: None) + the database listener + (default: None) - port: the port number on which the database listener is listening (default: 1521) @@ -220,58 +235,71 @@ def __init__( (default: "tcp") - https_proxy: the name or IP address of a proxy host to use for - tunneling secure connections (default: None) + tunneling secure connections + (default: None) - https_proxy_port: the port on which to communicate with the proxy - host (default: 0) + host + (default: 0) - - service_name: the service name of the database (default: None) + - service_name: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - instance_name: the instance name of the database + (default: None) - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + service_name instead is recommended + (default: None) - server_type: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" - or "pooled" (default: None) + or "pooled" + (default: None) - cclass: connection class to use for Database Resident Connection - Pooling (DRCP) (default: None) + Pooling (DRCP) + (default: None) - purity: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - expire_time: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive (default: 0) + greater than zero it enables keepalive + (default: 0) - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + retried before the attempt is terminated + (default: 0) - retry_delay: the number of seconds to wait before making a new - connection attempt (default: 1) + connection attempt + (default: 1) - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host (default: - 20.0) + to wait for establishing a connection to the database host + (default: 20.0) - ssl_server_dn_match: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching - is performed instead (default: True) + is performed instead + (default: True) - ssl_server_cert_dn: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the - hostname will be used (default: None) + hostname will be used + (default: None) - wallet_location: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso (default: None) + the file cwallet.sso + (default: None) - events: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous @@ -279,27 +307,33 @@ def __init__( (default: False) - externalauth: a boolean indicating whether to use external - authentication (default: False) + authentication + (default: False) - mode: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) + oracledb.AUTH_MODE_SYSDBA + (default: oracledb.AUTH_MODE_DEFAULT) - disable_oob: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - stmtcachesize: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter (default: None) + used simultaneously with the cclass parameter + (default: None) - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + pool. This value is only used in thick mode + (default: None) - matchanytag: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in - thick mode (default: False) + thick mode + (default: False) - config_dir: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. @@ -308,36 +342,43 @@ def __init__( - appcontext: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the - tuple should be a string (default: None) + tuple should be a string + (default: None) - shardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick - mode (default: None) + mode + (default: None) - supershardingkey: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - debug_jdwp: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable (default: None) + environment variable + (default: None) - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing (default: None) + the connection identifier used for tracing + (default: None) - ssl_context: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object - cannot be used (default: None) + cannot be used + (default: None) - sdu: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and - the database network SDU configuration value (default: 8192) + the database network SDU configuration value + (default: 8192) - pool_boundary: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. @@ -347,27 +388,32 @@ def __init__( - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information (default: - False) + refer to the ADB-S documentation for more information + (default: False) - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - program: the name of the executable program or application connected - to the Oracle Database (default: oracledb.defaults.program) + to the Oracle Database + (default: oracledb.defaults.program) - machine: the machine name of the client connecting to the Oracle - Database (default: oracledb.defaults.machine) + Database + (default: oracledb.defaults.machine) - terminal: the terminal identifier from which the connection - originates (default: oracledb.defaults.terminal) + originates + (default: oracledb.defaults.terminal) - osuser: the operating system user that initiates the database - connection (default: oracledb.defaults.osuser) + connection + (default: oracledb.defaults.osuser) - driver_name: the driver name used by the client to connect to the - Oracle Database (default: oracledb.defaults.driver_name) + Oracle Database + (default: oracledb.defaults.driver_name) - use_sni: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required @@ -378,19 +424,22 @@ def __init__( parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora - configuration file (default: - oracledb.defaults.thick_mode_dsn_passthrough) + configuration file + (default: oracledb.defaults.thick_mode_dsn_passthrough) - extra_auth_params: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins (default: None) + the Azure and OCI cloud-native authentication plugins + (default: None) - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + Oracle Database 23.4 or higher + (default: None) - handle: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass diff --git a/utils/build_from_template.py b/utils/build_from_template.py index 61f5f5ef..2e141386 100644 --- a/utils/build_from_template.py +++ b/utils/build_from_template.py @@ -146,11 +146,13 @@ def args_help_with_defaults_content(indent): """ Generates the content for the args_help_with_defaults template tag. """ - raw_descriptions = [ - f"- {f.name}: {f.description} (default: {f.default})" - for f in fields - if f.description - ] + raw_descriptions = [] + for f in fields: + if not f.description: + continue + raw_descriptions.append(f"- {f.name}: {f.description}") + raw_descriptions.append(f" (default: {f.default})") + raw_descriptions.append("") descriptions = [ textwrap.fill( d, @@ -158,9 +160,9 @@ def args_help_with_defaults_content(indent): subsequent_indent=indent + " ", width=TEXT_WIDTH, ) - for d in raw_descriptions + for d in raw_descriptions[:-1] ] - return "\n\n".join(descriptions).strip() + return "\n".join(descriptions).strip() def args_help_without_defaults_content(indent): From d349d8460ea40b1f0f2f28fe90d5f28b06913ecf Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 7 Aug 2025 14:30:53 -0600 Subject: [PATCH 178/239] Doc improvements. --- doc/src/api_manual/dataframe.rst | 12 +- doc/src/api_manual/module.rst | 4 +- doc/src/api_manual/pipeline.rst | 38 +-- doc/src/user_guide/dataframes.rst | 46 ++- src/oracledb/__init__.py | 3 + src/oracledb/connect_params.py | 388 +++++++++++----------- src/oracledb/connection.py | 334 ++++++++++--------- src/oracledb/pool.py | 533 +++++++++++++++++------------- src/oracledb/pool_params.py | 486 +++++++++++++-------------- utils/Makefile | 12 +- utils/build_from_template.py | 91 +++-- utils/templates/connection.py | 2 +- 12 files changed, 1006 insertions(+), 943 deletions(-) diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index 044c64d2..664f3f6e 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -1,8 +1,8 @@ .. _oracledataframe: -**************** -API: Data Frames -**************** +********************** +API: DataFrame Objects +********************** .. currentmodule:: oracledb @@ -26,8 +26,10 @@ DataFrame Class .. autoclass:: DataFrame A DataFrame object is returned by the methods - :meth:`Connection.fetch_df_all()` and - :meth:`Connection.fetch_df_batches()`. + :meth:`Connection.fetch_df_all()`, + :meth:`Connection.fetch_df_batches()`, + :meth:`AsyncConnection.fetch_df_all()`, or + :meth:`AsyncConnection.fetch_df_batches()`. Each column in a DataFrame exposes an `Apache Arrow PyCapsule `. diff --git a/doc/src/api_manual/pipeline.rst b/doc/src/api_manual/pipeline.rst index b9536e26..04fe6ec4 100644 --- a/doc/src/api_manual/pipeline.rst +++ b/doc/src/api_manual/pipeline.rst @@ -34,66 +34,30 @@ Pipeline Methods .. automethod:: Pipeline.add_callfunc - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - - .. seealso:: - - :ref:`PipelineOp object ` and - :ref:`PipelineOpResult object ` - .. automethod:: Pipeline.add_callproc - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - - .. seealso:: - - :ref:`PipelineOp object ` - .. automethod:: Pipeline.add_commit .. automethod:: Pipeline.add_execute - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - - .. seealso:: - - :ref:`PipelineOp object ` - .. automethod:: Pipeline.add_executemany - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - .. seealso:: - :ref:`batchstmnt` and :ref:`PipelineOp object ` + :ref:`batchstmnt` .. automethod:: Pipeline.add_fetchall - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - - .. seealso:: - - :ref:`PipelineOp object ` and - :ref:`PipelineOpResult object ` - .. automethod:: Pipeline.add_fetchmany - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - .. seealso:: - :ref:`PipelineOp object `, - :ref:`PipelineOpResult object `, :ref:`roundtrips`, and :ref:`rowlimit` .. automethod:: Pipeline.add_fetchone - :ref:`pipelineopattrs` can be used to examine the operation, if needed. - .. seealso:: - :ref:`PipelineOp object `, - :ref:`PipelineOpResult object `, and :ref:`rowlimit` Pipeline Attributes diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 21f05554..f900ebb1 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -16,8 +16,8 @@ as `Apache PyArrow `__, `__ format. Python-oracledb has a :ref:`DataFrame ` object that exposes -an Apache Arrow PyCapsule Interface. This enables zero-copy data interchanges -to the data frame objects of other libraries. +an Apache Arrow ArrowArrayStream PyCapsule Interface. This enables zero-copy +data interchanges to the data frame objects of other libraries. .. note:: @@ -29,28 +29,30 @@ to the data frame objects of other libraries. Fetching Data Frames ==================== -Data frames can be fetched by using a standard SQL query. +Data frames can be fetched by using a standard SQL query with :ref:`Connection +` or :ref:`AsyncConnection ` methods. Data Frame Queries ------------------ -Python-oracledb has two methods for fetching rows into data frames: +The python-oracledb methods for fetching rows into data frames are: - :meth:`Connection.fetch_df_all()` fetches all rows from a query - :meth:`Connection.fetch_df_batches()` implements an iterator for fetching batches of rows -The methods return python-oracledb :ref:`DataFrame ` -objects. +These methods can also be called from :ref:`AsyncConnection +`. The methods all return python-oracledb :ref:`DataFrame +` objects. For example, to fetch all rows from a query and print some information about the results: .. code-block:: python - sql = "select * from departments" + sql = "select * from departments where department_id > :1" # Adjust arraysize to tune the query fetch performance - odf = connection.fetch_df_all(statement=sql, arraysize=100) + odf = connection.fetch_df_all(statement=sql, parameters=[100], arraysize=100) print(odf.column_names()) print(f"{odf.num_columns()} columns") @@ -60,7 +62,7 @@ With Oracle Database's standard DEPARTMENTS table, this would display:: ['DEPARTMENT_ID', 'DEPARTMENT_NAME', 'MANAGER_ID', 'LOCATION_ID'] 4 columns - 27 rows + 17 rows To fetch in batches, use an iterator: @@ -68,12 +70,12 @@ To fetch in batches, use an iterator: import pyarrow - sql = "select * from departments where department_id < 80" + sql = "select * from departments where department_id < :1" # Adjust "size" to tune the query fetch performance # Here it is small to show iteration - for odf in connection.fetch_df_batches(statement=sql, size=4): - pdf = pyarrow.table(odf).to_pandas() - print(pdf) + for odf in connection.fetch_df_batches(statement=sql, parameters=[80], size=4): + df = pyarrow.table(odf).to_pandas() + print(df) With Oracle Database's standard DEPARTMENTS table, this would display:: @@ -90,6 +92,24 @@ With Oracle Database's standard DEPARTMENTS table, this would display:: Converting to other data frame formats is :ref:`shown later ` in this chapter. +**Asynchronous Data Frame Queries** + +With :ref:`asynchronous programming `, use the appropriate syntax. For +example, to fetch all rows at once: + +.. code-block:: python + + connection = await oracledb.connect_async(...) + odf = await connection.fetch_df_all(sql="select ...", parameters=..., arraysize=...) + +Or to iterate: + +.. code-block:: python + + connection = await oracledb.connect_async(...) + async for odf in connection.fetch_df_batches(sql="select ...", parameters=..., size=...): + do_something(odf) + .. _dftypemapping: Data Frame Type Mapping diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index 9ec8da65..51560b75 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -43,6 +43,9 @@ from . import base_impl, thick_impl, thin_impl from .base_impl import ( + # type classes + ApiType as ApiType, + DbType as DbType, # database types DB_TYPE_BFILE as DB_TYPE_BFILE, DB_TYPE_BINARY_DOUBLE as DB_TYPE_BINARY_DOUBLE, diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index ff56ddcd..e9eb4039 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -114,28 +114,28 @@ def __init__( All parameters are optional. A brief description of each parameter follows: - - user: the name of the user to connect to + - ``user``: the name of the user to connect to (default: None) - - proxy_user: the name of the proxy user to connect to. If this value - is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" + - ``proxy_user``: the name of the proxy user to connect to. If this + value is not specified, it will be parsed out of user if user is in + the form "user[proxy_user]" (default: None) - - password: the password for the user + - ``password``: the password for the user (default: None) - - newpassword: the new password for the user. The new password will + - ``newpassword``: the new password for the user. The new password will take effect immediately upon a successful connection to the database (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + - ``wallet_password``: the password to use to decrypt the wallet, if it + is encrypted. This value is only used in thin mode (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: expected to be a string or a 2-tuple or a callable. + If it is a string, it specifies an Azure AD OAuth2 token used for + Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -145,222 +145,224 @@ def __init__( expired (default: None) - - host: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the name or IP address of the machine hosting the database + or the database listener (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to - use unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether + to use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for + - ``https_proxy``: the name or IP address of a proxy host to use for tunneling secure connections (default: None) - - https_proxy_port: the port on which to communicate with the proxy + - ``https_proxy_port``: the port on which to communicate with the proxy host (default: 0) - - service_name: the service name of the database + - ``service_name``: the service name of the database (default: None) - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database (default: None) - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended (default: None) - - server_type: the type of server connection that should be + - ``server_type``: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection + - ``cclass``: connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling + (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value + - ``expire_time``: an integer indicating the number of minutes between + the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive (default: 0) - - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that a connection attempt should + be retried before the attempt is terminated (default: 0) - - retry_delay: the number of seconds to wait before making a new + - ``retry_delay``: the number of seconds to wait before making a new connection attempt (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of + seconds to wait for establishing a connection to the database host (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server + - ``ssl_server_dn_match``: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used (default: None) - - wallet_location: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet - file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + thin mode this must be the directory containing the PEM-encoded + wallet file ewallet.pem. In thick mode this must be the directory + containing the file cwallet.sso (default: None) - - events: boolean specifying whether events mode should be enabled. + - ``events``: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query notification and high availability event notifications (default: False) - - externalauth: a boolean indicating whether to use external + - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - mode: authorization mode to use. For example + - ``mode``: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``disable_oob``: boolean indicating whether out-of-band breaks should + be disabled. This value is only used in thin mode. It has no effect + on Windows which does not support this functionality (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot + be used simultaneously with the cclass parameter (default: None) - - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + - ``tag``: identifies the type of connection that should be returned + from a pool. This value is only used in thick mode (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode (default: False) - - config_dir: directory in which the optional tnsnames.ora + - ``config_dir``: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use the config_dir parameter of init_oracle_client() (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be - a list of 3-tuples (namespace, name, value) and each entry in the + - ``appcontext``: application context used by the connection. It should + be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick - mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that + identify the database shard to connect to. This value is only used in + thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode (default: None) - - debug_jdwp: a string with the format "host=;port=" that - specifies the host and port of the PL/SQL debugger. This value is - only used in thin mode. For thick mode set the ORA_DEBUG_JDWP + - ``debug_jdwp``: a string with the format "host=;port=" + that specifies the host and port of the PL/SQL debugger. This value + is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable (default: None) - - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is + added to the connection identifier used for tracing (default: None) - - ssl_context: an SSLContext object used for connecting to the database - using TLS. This SSL context will be modified to include the private - key or any certificates found in a separately supplied wallet. This - parameter should only be specified if the default SSLContext object - cannot be used + - ``ssl_context``: an SSLContext object used for connecting to the + database using TLS. This SSL context will be modified to include the + private key or any certificates found in a separately supplied + wallet. This parameter should only be specified if the default + SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The - value tunes internal buffers used for communication to the database. - Bigger values can increase throughput for large queries or bulk data - loads, but at the cost of higher memory use. The SDU size that will - actually be used is negotiated down to the lower of this value and - the database network SDU configuration value + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. + The value tunes internal buffers used for communication to the + database. Bigger values can increase throughput for large queries or + bulk data loads, but at the cost of higher memory use. The SDU size + that will actually be used is negotiated down to the lower of this + value and the database network SDU configuration value (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" + - ``pool_boundary``: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. - This is an Oracle Autonomous Database Serverless (ADB-S) specific - property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + open. This is an Oracle Autonomous Database Serverless (ADB-S) + specific property for clients connecting from within OCI Cloud + network. Please refer to the ADB-S documentation for more information (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - program: the name of the executable program or application connected - to the Oracle Database + - ``program``: the name of the executable program or application + connected to the Oracle Database (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle + - ``machine``: the machine name of the client connecting to the Oracle Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection + - ``terminal``: the terminal identifier from which the connection originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database + - ``osuser``: the operating system user that initiates the database connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the + - ``driver_name``: the driver name used by the client to connect to the Oracle Database (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to - bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: boolean indicating whether to use the TLS SNI extension + to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the - connect string to the Oracle Client libraries unchanged without + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters - necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins + - ``extra_auth_params``: a dictionary containing configuration + parameters necessary for Oracle Database authentication using + plugins, such as the Azure and OCI cloud-native authentication + plugins (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP + with Oracle Database 23.4 or higher (default: None) - - handle: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + - ``handle``: an integer representing a pointer to a valid service + context handle. This value is only used in thick mode. It should be + used with extreme caution (default: 0) """ pass @@ -977,23 +979,23 @@ def set( object. All parameters are optional. A brief description of each parameter follows: - - user: the name of the user to connect to + - ``user``: the name of the user to connect to - - proxy_user: the name of the proxy user to connect to. If this value - is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" + - ``proxy_user``: the name of the proxy user to connect to. If this + value is not specified, it will be parsed out of user if user is in + the form "user[proxy_user]" - - password: the password for the user + - ``password``: the password for the user - - newpassword: the new password for the user. The new password will + - ``newpassword``: the new password for the user. The new password will take effect immediately upon a successful connection to the database - - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + - ``wallet_password``: the password to use to decrypt the wallet, if it + is encrypted. This value is only used in thin mode - - access_token: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: expected to be a string or a 2-tuple or a callable. + If it is a string, it specifies an Azure AD OAuth2 token used for + Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -1002,176 +1004,178 @@ def set( create new connections but the current authentication token has expired - - host: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the name or IP address of the machine hosting the database + or the database listener - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening - - protocol: one of the strings "tcp" or "tcps" indicating whether to - use unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether + to use unencrypted network traffic or encrypted network traffic (TLS) - - https_proxy: the name or IP address of a proxy host to use for + - ``https_proxy``: the name or IP address of a proxy host to use for tunneling secure connections - - https_proxy_port: the port on which to communicate with the proxy + - ``https_proxy_port``: the port on which to communicate with the proxy host - - service_name: the service name of the database + - ``service_name``: the service name of the database - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended - - server_type: the type of server connection that should be + - ``server_type``: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" - - cclass: connection class to use for Database Resident Connection + - ``cclass``: connection class to use for Database Resident Connection Pooling (DRCP) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling + (DRCP) - - expire_time: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value + - ``expire_time``: an integer indicating the number of minutes between + the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive - - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that a connection attempt should + be retried before the attempt is terminated - - retry_delay: the number of seconds to wait before making a new + - ``retry_delay``: the number of seconds to wait before making a new connection attempt - - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of + seconds to wait for establishing a connection to the database host - - ssl_server_dn_match: boolean indicating whether the server + - ``ssl_server_dn_match``: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead - - ssl_server_cert_dn: the distinguished name (DN) which should be + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used - - wallet_location: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet - file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + thin mode this must be the directory containing the PEM-encoded + wallet file ewallet.pem. In thick mode this must be the directory + containing the file cwallet.sso - - events: boolean specifying whether events mode should be enabled. + - ``events``: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query notification and high availability event notifications - - externalauth: a boolean indicating whether to use external + - ``externalauth``: a boolean indicating whether to use external authentication - - mode: authorization mode to use. For example + - ``mode``: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA - - disable_oob: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``disable_oob``: boolean indicating whether out-of-band breaks should + be disabled. This value is only used in thin mode. It has no effect + on Windows which does not support this functionality - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache - - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot + be used simultaneously with the cclass parameter - - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + - ``tag``: identifies the type of connection that should be returned + from a pool. This value is only used in thick mode - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode - - config_dir: directory in which the optional tnsnames.ora + - ``config_dir``: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use the config_dir parameter of init_oracle_client() - - appcontext: application context used by the connection. It should be - a list of 3-tuples (namespace, name, value) and each entry in the + - ``appcontext``: application context used by the connection. It should + be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string - - shardingkey: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick - mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that + identify the database shard to connect to. This value is only used in + thick mode - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode - - debug_jdwp: a string with the format "host=;port=" that - specifies the host and port of the PL/SQL debugger. This value is - only used in thin mode. For thick mode set the ORA_DEBUG_JDWP + - ``debug_jdwp``: a string with the format "host=;port=" + that specifies the host and port of the PL/SQL debugger. This value + is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable - - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is + added to the connection identifier used for tracing - - ssl_context: an SSLContext object used for connecting to the database - using TLS. This SSL context will be modified to include the private - key or any certificates found in a separately supplied wallet. This - parameter should only be specified if the default SSLContext object - cannot be used + - ``ssl_context``: an SSLContext object used for connecting to the + database using TLS. This SSL context will be modified to include the + private key or any certificates found in a separately supplied + wallet. This parameter should only be specified if the default + SSLContext object cannot be used - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The - value tunes internal buffers used for communication to the database. - Bigger values can increase throughput for large queries or bulk data - loads, but at the cost of higher memory use. The SDU size that will - actually be used is negotiated down to the lower of this value and - the database network SDU configuration value + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. + The value tunes internal buffers used for communication to the + database. Bigger values can increase throughput for large queries or + bulk data loads, but at the cost of higher memory use. The SDU size + that will actually be used is negotiated down to the lower of this + value and the database network SDU configuration value - - pool_boundary: one of the values "statement" or "transaction" + - ``pool_boundary``: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. - This is an Oracle Autonomous Database Serverless (ADB-S) specific - property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + open. This is an Oracle Autonomous Database Serverless (ADB-S) + specific property for clients connecting from within OCI Cloud + network. Please refer to the ADB-S documentation for more information - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use - - program: the name of the executable program or application connected - to the Oracle Database + - ``program``: the name of the executable program or application + connected to the Oracle Database - - machine: the machine name of the client connecting to the Oracle + - ``machine``: the machine name of the client connecting to the Oracle Database - - terminal: the terminal identifier from which the connection + - ``terminal``: the terminal identifier from which the connection originates - - osuser: the operating system user that initiates the database + - ``osuser``: the operating system user that initiates the database connection - - driver_name: the driver name used by the client to connect to the + - ``driver_name``: the driver name used by the client to connect to the Oracle Database - - use_sni: boolean indicating whether to use the TLS SNI extension to - bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: boolean indicating whether to use the TLS SNI extension + to bypass the second TLS neogiation that would otherwise be required - - thick_mode_dsn_passthrough: boolean indicating whether to pass the - connect string to the Oracle Client libraries unchanged without + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - - extra_auth_params: a dictionary containing configuration parameters - necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins + - ``extra_auth_params``: a dictionary containing configuration + parameters necessary for Oracle Database authentication using + plugins, such as the Azure and OCI cloud-native authentication + plugins - - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP + with Oracle Database 23.4 or higher - - handle: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + - ``handle``: an integer representing a pointer to a valid service + context handle. This value is only used in thick mode. It should be + used with extreme caution """ pass diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index a233ec88..dd082a8e 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1731,27 +1731,27 @@ def connect( The following parameters are all optional. A brief description of each parameter follows: - - user: the name of the user to connect to + - ``user``: the name of the user to connect to (default: None) - - proxy_user: the name of the proxy user to connect to. If this value is - not specified, it will be parsed out of user if user is in the form + - ``proxy_user``: the name of the proxy user to connect to. If this value + is not specified, it will be parsed out of user if user is in the form "user[proxy_user]" (default: None) - - password: the password for the user + - ``password``: the password for the user (default: None) - - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database + - ``newpassword``: the new password for the user. The new password will + take effect immediately upon a successful connection to the database (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is + - ``wallet_password``: the password to use to decrypt the wallet, if it is encrypted. This value is only used in thin mode (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If it - is a string, it specifies an Azure AD OAuth2 token used for Open + - ``access_token``: expected to be a string or a 2-tuple or a callable. If + it is a string, it specifies an Azure AD OAuth2 token used for Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based @@ -1761,149 +1761,153 @@ def connect( current authentication token has expired (default: None) - - host: the name or IP address of the machine hosting the database or the - database listener + - ``host``: the name or IP address of the machine hosting the database or + the database listener (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to + use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections + - ``https_proxy``: the name or IP address of a proxy host to use for + tunneling secure connections (default: None) - - https_proxy_port: the port on which to communicate with the proxy host + - ``https_proxy_port``: the port on which to communicate with the proxy + host (default: 0) - - service_name: the service name of the database + - ``service_name``: the service name of the database (default: None) - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database (default: None) - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended (default: None) - - server_type: the type of server connection that should be established. If - specified, it should be one of "dedicated", "shared" or "pooled" + - ``server_type``: the type of server connection that should be + established. If specified, it should be one of "dedicated", "shared" or + "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) + - ``cclass``: connection class to use for Database Resident Connection + Pooling (DRCP) (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the + - ``expire_time``: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive (default: 0) - - retry_count: the number of times that a connection attempt should be + - ``retry_count``: the number of times that a connection attempt should be retried before the attempt is terminated (default: 0) - - retry_delay: the number of seconds to wait before making a new connection - attempt + - ``retry_delay``: the number of seconds to wait before making a new + connection attempt (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of seconds + to wait for establishing a connection to the database host (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server certificate - distinguished name (DN) should be matched in addition to the regular - certificate verification that is performed. Note that if the + - ``ssl_server_dn_match``: boolean indicating whether the server + certificate distinguished name (DN) should be matched in addition to the + regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be matched - with the server. This value is ignored if the ssl_server_dn_match + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be + matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used (default: None) - - wallet_location: the directory where the wallet can be found. In thin + - ``wallet_location``: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file cwallet.sso (default: None) - - events: boolean specifying whether events mode should be enabled. This - value is only used in thick mode and is needed for continuous query + - ``events``: boolean specifying whether events mode should be enabled. + This value is only used in thick mode and is needed for continuous query notification and high availability event notifications (default: False) - - externalauth: a boolean indicating whether to use external authentication + - ``externalauth``: a boolean indicating whether to use external + authentication (default: False) - - mode: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA + - ``mode``: authorization mode to use. For example + oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be + - ``disable_oob``: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on Windows which does not support this functionality (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot be + used simultaneously with the cclass parameter (default: None) - - tag: identifies the type of connection that should be returned from a + - ``tag``: identifies the type of connection that should be returned from a pool. This value is only used in thick mode (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode (default: False) - - config_dir: directory in which the optional tnsnames.ora configuration - file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() + - ``config_dir``: directory in which the optional tnsnames.ora + configuration file is located. This value is only used in thin mode. For + thick mode use the config_dir parameter of init_oracle_client() (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be a - list of 3-tuples (namespace, name, value) and each entry in the tuple + - ``appcontext``: application context used by the connection. It should be + a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify the - database shard to connect to. This value is only used in thick mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that identify + the database shard to connect to. This value is only used in thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode (default: None) - - debug_jdwp: a string with the format "host=;port=" that + - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable (default: None) - - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is added to + the connection identifier used for tracing (default: None) - - ssl_context: an SSLContext object used for connecting to the database + - ``ssl_context``: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will @@ -1911,59 +1915,62 @@ def connect( database network SDU configuration value (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" indicating - when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher + - ``pool_boundary``: one of the values "statement" or "transaction" + indicating when pooled DRCP connections can be returned to the pool. This + requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This - is an Oracle Autonomous Database Serverless (ADB-S) specific property for - clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + This is an Oracle Autonomous Database Serverless (ADB-S) specific + property for clients connecting from within OCI Cloud network. Please + refer to the ADB-S documentation for more information (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - program: the name of the executable program or application connected to - the Oracle Database + - ``program``: the name of the executable program or application connected + to the Oracle Database (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle Database + - ``machine``: the machine name of the client connecting to the Oracle + Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection originates + - ``terminal``: the terminal identifier from which the connection + originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database connection + - ``osuser``: the operating system user that initiates the database + connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the Oracle - Database + - ``driver_name``: the driver name used by the client to connect to the + Oracle Database (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters + - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with Oracle Database 23.4 or higher (default: None) - - handle: an integer representing a pointer to a valid service context + - ``handle``: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -2870,27 +2877,27 @@ def connect_async( The following parameters are all optional. A brief description of each parameter follows: - - user: the name of the user to connect to + - ``user``: the name of the user to connect to (default: None) - - proxy_user: the name of the proxy user to connect to. If this value is - not specified, it will be parsed out of user if user is in the form + - ``proxy_user``: the name of the proxy user to connect to. If this value + is not specified, it will be parsed out of user if user is in the form "user[proxy_user]" (default: None) - - password: the password for the user + - ``password``: the password for the user (default: None) - - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database + - ``newpassword``: the new password for the user. The new password will + take effect immediately upon a successful connection to the database (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is + - ``wallet_password``: the password to use to decrypt the wallet, if it is encrypted. This value is only used in thin mode (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If it - is a string, it specifies an Azure AD OAuth2 token used for Open + - ``access_token``: expected to be a string or a 2-tuple or a callable. If + it is a string, it specifies an Azure AD OAuth2 token used for Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based @@ -2900,149 +2907,153 @@ def connect_async( current authentication token has expired (default: None) - - host: the name or IP address of the machine hosting the database or the - database listener + - ``host``: the name or IP address of the machine hosting the database or + the database listener (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to + use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections + - ``https_proxy``: the name or IP address of a proxy host to use for + tunneling secure connections (default: None) - - https_proxy_port: the port on which to communicate with the proxy host + - ``https_proxy_port``: the port on which to communicate with the proxy + host (default: 0) - - service_name: the service name of the database + - ``service_name``: the service name of the database (default: None) - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database (default: None) - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended (default: None) - - server_type: the type of server connection that should be established. If - specified, it should be one of "dedicated", "shared" or "pooled" + - ``server_type``: the type of server connection that should be + established. If specified, it should be one of "dedicated", "shared" or + "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) + - ``cclass``: connection class to use for Database Resident Connection + Pooling (DRCP) (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the + - ``expire_time``: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive (default: 0) - - retry_count: the number of times that a connection attempt should be + - ``retry_count``: the number of times that a connection attempt should be retried before the attempt is terminated (default: 0) - - retry_delay: the number of seconds to wait before making a new connection - attempt + - ``retry_delay``: the number of seconds to wait before making a new + connection attempt (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of seconds + to wait for establishing a connection to the database host (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server certificate - distinguished name (DN) should be matched in addition to the regular - certificate verification that is performed. Note that if the + - ``ssl_server_dn_match``: boolean indicating whether the server + certificate distinguished name (DN) should be matched in addition to the + regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be matched - with the server. This value is ignored if the ssl_server_dn_match + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be + matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used (default: None) - - wallet_location: the directory where the wallet can be found. In thin + - ``wallet_location``: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file cwallet.sso (default: None) - - events: boolean specifying whether events mode should be enabled. This - value is only used in thick mode and is needed for continuous query + - ``events``: boolean specifying whether events mode should be enabled. + This value is only used in thick mode and is needed for continuous query notification and high availability event notifications (default: False) - - externalauth: a boolean indicating whether to use external authentication + - ``externalauth``: a boolean indicating whether to use external + authentication (default: False) - - mode: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA + - ``mode``: authorization mode to use. For example + oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be + - ``disable_oob``: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on Windows which does not support this functionality (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot be + used simultaneously with the cclass parameter (default: None) - - tag: identifies the type of connection that should be returned from a + - ``tag``: identifies the type of connection that should be returned from a pool. This value is only used in thick mode (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode (default: False) - - config_dir: directory in which the optional tnsnames.ora configuration - file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() + - ``config_dir``: directory in which the optional tnsnames.ora + configuration file is located. This value is only used in thin mode. For + thick mode use the config_dir parameter of init_oracle_client() (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be a - list of 3-tuples (namespace, name, value) and each entry in the tuple + - ``appcontext``: application context used by the connection. It should be + a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify the - database shard to connect to. This value is only used in thick mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that identify + the database shard to connect to. This value is only used in thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode (default: None) - - debug_jdwp: a string with the format "host=;port=" that + - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable (default: None) - - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is added to + the connection identifier used for tracing (default: None) - - ssl_context: an SSLContext object used for connecting to the database + - ``ssl_context``: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will @@ -3050,59 +3061,62 @@ def connect_async( database network SDU configuration value (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" indicating - when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher + - ``pool_boundary``: one of the values "statement" or "transaction" + indicating when pooled DRCP connections can be returned to the pool. This + requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This - is an Oracle Autonomous Database Serverless (ADB-S) specific property for - clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + This is an Oracle Autonomous Database Serverless (ADB-S) specific + property for clients connecting from within OCI Cloud network. Please + refer to the ADB-S documentation for more information (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - program: the name of the executable program or application connected to - the Oracle Database + - ``program``: the name of the executable program or application connected + to the Oracle Database (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle Database + - ``machine``: the machine name of the client connecting to the Oracle + Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection originates + - ``terminal``: the terminal identifier from which the connection + originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database connection + - ``osuser``: the operating system user that initiates the database + connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the Oracle - Database + - ``driver_name``: the driver name used by the client to connect to the + Oracle Database (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters + - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with Oracle Database 23.4 or higher (default: None) - - handle: an integer representing a pointer to a valid service context + - ``handle``: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index 1e04f0e7..013a6370 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -736,59 +736,59 @@ def create_pool( The following parameters are all optional. A brief description of each parameter follows: - - min: the minimum number of connections the pool should contain + - ``min``: the minimum number of connections the pool should contain (default: 1) - - max: the maximum number of connections the pool should contain + - ``max``: the maximum number of connections the pool should contain (default: 2) - - increment: the number of connections that should be added to the pool + - ``increment``: the number of connections that should be added to the pool whenever a new connection needs to be created (default: 1) - - connectiontype: the class of the connection that should be returned + - ``connectiontype``: the class of the connection that should be returned during calls to pool.acquire(). It must be oracledb.Connection or a subclass of oracledb.Connection (default: None) - - getmode: how pool.acquire() will behave. One of the constants + - ``getmode``: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT (default: oracledb.POOL_GETMODE_WAIT) - - homogeneous: a boolean indicating whether the connections are homogeneous - (same user) or heterogeneous (multiple users) + - ``homogeneous``: a boolean indicating whether the connections are + homogeneous (same user) or heterogeneous (multiple users) (default: True) - - timeout: length of time (in seconds) that a connection may remain idle in - the pool before it is terminated. If it is 0 then connections are never - terminated + - ``timeout``: length of time (in seconds) that a connection may remain + idle in the pool before it is terminated. If it is 0 then connections are + never terminated (default: 0) - - wait_timeout: length of time (in milliseconds) that a caller should wait - when acquiring a connection from the pool with getmode set to + - ``wait_timeout``: length of time (in milliseconds) that a caller should + wait when acquiring a connection from the pool with getmode set to oracledb.POOL_GETMODE_TIMEDWAIT (default: 0) - - max_lifetime_session: length of time (in seconds) that connections can - remain in the pool. If it is 0 then connections may remain in the pool - indefinitely + - ``max_lifetime_session``: length of time (in seconds) that connections + can remain in the pool. If it is 0 then connections may remain in the + pool indefinitely (default: 0) - - session_callback: a callable that is invoked when a connection is + - ``session_callback``: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag differs from the one requested (default: None) - - max_sessions_per_shard: the maximum number of connections that may be + - ``max_sessions_per_shard``: the maximum number of connections that may be associated with a particular shard (default: 0) - - soda_metadata_cache: boolean indicating whether or not the SODA metadata - cache should be enabled + - ``soda_metadata_cache``: boolean indicating whether or not the SODA + metadata cache should be enabled (default: False) - - ping_interval: length of time (in seconds) after which an unused + - ``ping_interval``: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by @@ -796,32 +796,32 @@ def create_pool( functionality will be disabled (default: 60) - - ping_timeout: maximum length of time (in milliseconds) to wait for a + - ``ping_timeout``: maximum length of time (in milliseconds) to wait for a connection in the pool to respond to an internal ping to the database before being discarded and replaced during a call to acquire() (default: 5000) - - user: the name of the user to connect to + - ``user``: the name of the user to connect to (default: None) - - proxy_user: the name of the proxy user to connect to. If this value is - not specified, it will be parsed out of user if user is in the form + - ``proxy_user``: the name of the proxy user to connect to. If this value + is not specified, it will be parsed out of user if user is in the form "user[proxy_user]" (default: None) - - password: the password for the user + - ``password``: the password for the user (default: None) - - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database + - ``newpassword``: the new password for the user. The new password will + take effect immediately upon a successful connection to the database (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is + - ``wallet_password``: the password to use to decrypt the wallet, if it is encrypted. This value is only used in thin mode (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If it - is a string, it specifies an Azure AD OAuth2 token used for Open + - ``access_token``: expected to be a string or a 2-tuple or a callable. If + it is a string, it specifies an Azure AD OAuth2 token used for Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based @@ -831,149 +831,153 @@ def create_pool( current authentication token has expired (default: None) - - host: the name or IP address of the machine hosting the database or the - database listener + - ``host``: the name or IP address of the machine hosting the database or + the database listener (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to + use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections + - ``https_proxy``: the name or IP address of a proxy host to use for + tunneling secure connections (default: None) - - https_proxy_port: the port on which to communicate with the proxy host + - ``https_proxy_port``: the port on which to communicate with the proxy + host (default: 0) - - service_name: the service name of the database + - ``service_name``: the service name of the database (default: None) - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database (default: None) - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended (default: None) - - server_type: the type of server connection that should be established. If - specified, it should be one of "dedicated", "shared" or "pooled" + - ``server_type``: the type of server connection that should be + established. If specified, it should be one of "dedicated", "shared" or + "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) + - ``cclass``: connection class to use for Database Resident Connection + Pooling (DRCP) (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the + - ``expire_time``: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive (default: 0) - - retry_count: the number of times that a connection attempt should be + - ``retry_count``: the number of times that a connection attempt should be retried before the attempt is terminated (default: 0) - - retry_delay: the number of seconds to wait before making a new connection - attempt + - ``retry_delay``: the number of seconds to wait before making a new + connection attempt (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of seconds + to wait for establishing a connection to the database host (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server certificate - distinguished name (DN) should be matched in addition to the regular - certificate verification that is performed. Note that if the + - ``ssl_server_dn_match``: boolean indicating whether the server + certificate distinguished name (DN) should be matched in addition to the + regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be matched - with the server. This value is ignored if the ssl_server_dn_match + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be + matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used (default: None) - - wallet_location: the directory where the wallet can be found. In thin + - ``wallet_location``: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file cwallet.sso (default: None) - - events: boolean specifying whether events mode should be enabled. This - value is only used in thick mode and is needed for continuous query + - ``events``: boolean specifying whether events mode should be enabled. + This value is only used in thick mode and is needed for continuous query notification and high availability event notifications (default: False) - - externalauth: a boolean indicating whether to use external authentication + - ``externalauth``: a boolean indicating whether to use external + authentication (default: False) - - mode: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA + - ``mode``: authorization mode to use. For example + oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be + - ``disable_oob``: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on Windows which does not support this functionality (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot be + used simultaneously with the cclass parameter (default: None) - - tag: identifies the type of connection that should be returned from a + - ``tag``: identifies the type of connection that should be returned from a pool. This value is only used in thick mode (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode (default: False) - - config_dir: directory in which the optional tnsnames.ora configuration - file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() + - ``config_dir``: directory in which the optional tnsnames.ora + configuration file is located. This value is only used in thin mode. For + thick mode use the config_dir parameter of init_oracle_client() (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be a - list of 3-tuples (namespace, name, value) and each entry in the tuple + - ``appcontext``: application context used by the connection. It should be + a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify the - database shard to connect to. This value is only used in thick mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that identify + the database shard to connect to. This value is only used in thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode (default: None) - - debug_jdwp: a string with the format "host=;port=" that + - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable (default: None) - - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is added to + the connection identifier used for tracing (default: None) - - ssl_context: an SSLContext object used for connecting to the database + - ``ssl_context``: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will @@ -981,59 +985,62 @@ def create_pool( database network SDU configuration value (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" indicating - when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher + - ``pool_boundary``: one of the values "statement" or "transaction" + indicating when pooled DRCP connections can be returned to the pool. This + requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This - is an Oracle Autonomous Database Serverless (ADB-S) specific property for - clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + This is an Oracle Autonomous Database Serverless (ADB-S) specific + property for clients connecting from within OCI Cloud network. Please + refer to the ADB-S documentation for more information (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - program: the name of the executable program or application connected to - the Oracle Database + - ``program``: the name of the executable program or application connected + to the Oracle Database (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle Database + - ``machine``: the machine name of the client connecting to the Oracle + Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection originates + - ``terminal``: the terminal identifier from which the connection + originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database connection + - ``osuser``: the operating system user that initiates the database + connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the Oracle - Database + - ``driver_name``: the driver name used by the client to connect to the + Oracle Database (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters + - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the Azure and OCI cloud-native authentication plugins (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with Oracle Database 23.4 or higher (default: None) - - handle: an integer representing a pointer to a valid service context + - ``handle``: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with extreme caution (default: 0) @@ -1298,262 +1305,314 @@ def create_pool_async( The following parameters are all optional. A brief description of each parameter follows: - - min: the minimum number of connections the pool should contain (default: - 1) + - ``min``: the minimum number of connections the pool should contain + (default: 1) - - max: the maximum number of connections the pool should contain (default: - 2) + - ``max``: the maximum number of connections the pool should contain + (default: 2) - - increment: the number of connections that should be added to the pool - whenever a new connection needs to be created (default: 1) + - ``increment``: the number of connections that should be added to the pool + whenever a new connection needs to be created + (default: 1) - - connectiontype: the class of the connection that should be returned + - ``connectiontype``: the class of the connection that should be returned during calls to pool.acquire(). It must be oracledb.AsyncConnection or a - subclass of oracledb.AsyncConnection (default: None) + subclass of oracledb.AsyncConnection + (default: None) - - getmode: how pool.acquire() will behave. One of the constants + - ``getmode``: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT (default: oracledb.POOL_GETMODE_WAIT) - - homogeneous: a boolean indicating whether the connections are homogeneous - (same user) or heterogeneous (multiple users) (default: True) + - ``homogeneous``: a boolean indicating whether the connections are + homogeneous (same user) or heterogeneous (multiple users) + (default: True) - - timeout: length of time (in seconds) that a connection may remain idle in - the pool before it is terminated. If it is 0 then connections are never - terminated (default: 0) + - ``timeout``: length of time (in seconds) that a connection may remain + idle in the pool before it is terminated. If it is 0 then connections are + never terminated + (default: 0) - - wait_timeout: length of time (in milliseconds) that a caller should wait - when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT (default: 0) + - ``wait_timeout``: length of time (in milliseconds) that a caller should + wait when acquiring a connection from the pool with getmode set to + oracledb.POOL_GETMODE_TIMEDWAIT + (default: 0) - - max_lifetime_session: length of time (in seconds) that connections can - remain in the pool. If it is 0 then connections may remain in the pool - indefinitely (default: 0) + - ``max_lifetime_session``: length of time (in seconds) that connections + can remain in the pool. If it is 0 then connections may remain in the + pool indefinitely + (default: 0) - - session_callback: a callable that is invoked when a connection is + - ``session_callback``: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag - differs from the one requested (default: None) + differs from the one requested + (default: None) - - max_sessions_per_shard: the maximum number of connections that may be - associated with a particular shard (default: 0) + - ``max_sessions_per_shard``: the maximum number of connections that may be + associated with a particular shard + (default: 0) - - soda_metadata_cache: boolean indicating whether or not the SODA metadata - cache should be enabled (default: False) + - ``soda_metadata_cache``: boolean indicating whether or not the SODA + metadata cache should be enabled + (default: False) - - ping_interval: length of time (in seconds) after which an unused + - ``ping_interval``: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping - functionality will be disabled (default: 60) + functionality will be disabled + (default: 60) - - ping_timeout: maximum length of time (in milliseconds) to wait for a + - ``ping_timeout``: maximum length of time (in milliseconds) to wait for a connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() (default: - 5000) + before being discarded and replaced during a call to acquire() + (default: 5000) - - user: the name of the user to connect to (default: None) + - ``user``: the name of the user to connect to + (default: None) - - proxy_user: the name of the proxy user to connect to. If this value is - not specified, it will be parsed out of user if user is in the form - "user[proxy_user]" (default: None) + - ``proxy_user``: the name of the proxy user to connect to. If this value + is not specified, it will be parsed out of user if user is in the form + "user[proxy_user]" + (default: None) - - password: the password for the user (default: None) + - ``password``: the password for the user + (default: None) - - newpassword: the new password for the user. The new password will take - effect immediately upon a successful connection to the database (default: - None) + - ``newpassword``: the new password for the user. The new password will + take effect immediately upon a successful connection to the database + (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode (default: None) + - ``wallet_password``: the password to use to decrypt the wallet, if it is + encrypted. This value is only used in thin mode + (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If it - is a string, it specifies an Azure AD OAuth2 token used for Open + - ``access_token``: expected to be a string or a 2-tuple or a callable. If + it is a string, it specifies an Azure AD OAuth2 token used for Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the - current authentication token has expired (default: None) + current authentication token has expired + (default: None) - - host: the name or IP address of the machine hosting the database or the - database listener (default: None) + - ``host``: the name or IP address of the machine hosting the database or + the database listener + (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to use - unencrypted network traffic or encrypted network traffic (TLS) (default: - "tcp") + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to + use unencrypted network traffic or encrypted network traffic (TLS) + (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for tunneling - secure connections (default: None) + - ``https_proxy``: the name or IP address of a proxy host to use for + tunneling secure connections + (default: None) - - https_proxy_port: the port on which to communicate with the proxy host + - ``https_proxy_port``: the port on which to communicate with the proxy + host (default: 0) - - service_name: the service name of the database (default: None) + - ``service_name``: the service name of the database + (default: None) - - instance_name: the instance name of the database (default: None) + - ``instance_name``: the instance name of the database + (default: None) - - sid: the system identifier (SID) of the database. Note using a - service_name instead is recommended (default: None) + - ``sid``: the system identifier (SID) of the database. Note using a + service_name instead is recommended + (default: None) - - server_type: the type of server connection that should be established. If - specified, it should be one of "dedicated", "shared" or "pooled" + - ``server_type``: the type of server connection that should be + established. If specified, it should be one of "dedicated", "shared" or + "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection Pooling - (DRCP) (default: None) + - ``cclass``: connection class to use for Database Resident Connection + Pooling (DRCP) + (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the + - ``expire_time``: an integer indicating the number of minutes between the sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive (default: 0) + than zero it enables keepalive + (default: 0) - - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated (default: 0) + - ``retry_count``: the number of times that a connection attempt should be + retried before the attempt is terminated + (default: 0) - - retry_delay: the number of seconds to wait before making a new connection - attempt (default: 1) + - ``retry_delay``: the number of seconds to wait before making a new + connection attempt + (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds to - wait for establishing a connection to the database host (default: 20.0) + - ``tcp_connect_timeout``: a float indicating the maximum number of seconds + to wait for establishing a connection to the database host + (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server certificate - distinguished name (DN) should be matched in addition to the regular - certificate verification that is performed. Note that if the + - ``ssl_server_dn_match``: boolean indicating whether the server + certificate distinguished name (DN) should be matched in addition to the + regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is - performed instead (default: True) + performed instead + (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be matched - with the server. This value is ignored if the ssl_server_dn_match + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be + matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used - for any verfication. Otherwise the hostname will be used (default: None) + for any verfication. Otherwise the hostname will be used + (default: None) - - wallet_location: the directory where the wallet can be found. In thin + - ``wallet_location``: the directory where the wallet can be found. In thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso (default: None) + cwallet.sso + (default: None) - - events: boolean specifying whether events mode should be enabled. This - value is only used in thick mode and is needed for continuous query - notification and high availability event notifications (default: False) + - ``events``: boolean specifying whether events mode should be enabled. + This value is only used in thick mode and is needed for continuous query + notification and high availability event notifications + (default: False) - - externalauth: a boolean indicating whether to use external authentication + - ``externalauth``: a boolean indicating whether to use external + authentication (default: False) - - mode: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA + - ``mode``: authorization mode to use. For example + oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be + - ``disable_oob``: boolean indicating whether out-of-band breaks should be disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality (default: False) + Windows which does not support this functionality + (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be used - simultaneously with the cclass parameter (default: None) + - ``edition``: edition to use for the connection. This parameter cannot be + used simultaneously with the cclass parameter + (default: None) - - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode (default: None) + - ``tag``: identifies the type of connection that should be returned from a + pool. This value is only used in thick mode + (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick - mode (default: False) + mode + (default: False) - - config_dir: directory in which the optional tnsnames.ora configuration - file is located. This value is only used in thin mode. For thick mode use - the config_dir parameter of init_oracle_client() (default: - oracledb.defaults.config_dir) + - ``config_dir``: directory in which the optional tnsnames.ora + configuration file is located. This value is only used in thin mode. For + thick mode use the config_dir parameter of init_oracle_client() + (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be a - list of 3-tuples (namespace, name, value) and each entry in the tuple - should be a string (default: None) + - ``appcontext``: application context used by the connection. It should be + a list of 3-tuples (namespace, name, value) and each entry in the tuple + should be a string + (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify the - database shard to connect to. This value is only used in thick mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that identify + the database shard to connect to. This value is only used in thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode (default: None) + thick mode + (default: None) - - debug_jdwp: a string with the format "host=;port=" that + - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable (default: None) + variable + (default: None) - - connection_id_prefix: an application specific prefix that is added to the - connection identifier used for tracing (default: None) + - ``connection_id_prefix``: an application specific prefix that is added to + the connection identifier used for tracing + (default: None) - - ssl_context: an SSLContext object used for connecting to the database + - ``ssl_context``: an SSLContext object used for connecting to the database using TLS. This SSL context will be modified to include the private key or any certificates found in a separately supplied wallet. This parameter should only be specified if the default SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. The value tunes internal buffers used for communication to the database. Bigger values can increase throughput for large queries or bulk data loads, but at the cost of higher memory use. The SDU size that will actually be used is negotiated down to the lower of this value and the - database network SDU configuration value (default: 8192) + database network SDU configuration value + (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" indicating - when pooled DRCP connections can be returned to the pool. This requires - the use of DRCP with Oracle Database 23.4 or higher (default: None) + - ``pool_boundary``: one of the values "statement" or "transaction" + indicating when pooled DRCP connections can be returned to the pool. This + requires the use of DRCP with Oracle Database 23.4 or higher + (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. This - is an Oracle Autonomous Database Serverless (ADB-S) specific property for - clients connecting from within OCI Cloud network. Please refer to the - ADB-S documentation for more information (default: False) + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + This is an Oracle Autonomous Database Serverless (ADB-S) specific + property for clients connecting from within OCI Cloud network. Please + refer to the ADB-S documentation for more information + (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or - ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: - None) + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or + ssl.TLSVersion.TLSv1_3 indicating which TLS version to use + (default: None) - - program: the name of the executable program or application connected to - the Oracle Database (default: oracledb.defaults.program) + - ``program``: the name of the executable program or application connected + to the Oracle Database + (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle Database + - ``machine``: the machine name of the client connecting to the Oracle + Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection originates + - ``terminal``: the terminal identifier from which the connection + originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database connection + - ``osuser``: the operating system user that initiates the database + connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the Oracle - Database (default: oracledb.defaults.driver_name) + - ``driver_name``: the driver name used by the client to connect to the + Oracle Database + (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters + - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the - Azure and OCI cloud-native authentication plugins (default: None) + Azure and OCI cloud-native authentication plugins + (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher (default: None) + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with + Oracle Database 23.4 or higher + (default: None) - - handle: an integer representing a pointer to a valid service context + - ``handle``: an integer representing a pointer to a valid service context handle. This value is only used in thick mode. It should be used with - extreme caution (default: 0) + extreme caution + (default: 0) """ pass diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index 349ecfb2..dc679111 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -127,59 +127,59 @@ def __init__( All parameters are optional. A brief description of each parameter follows: - - min: the minimum number of connections the pool should contain + - ``min``: the minimum number of connections the pool should contain (default: 1) - - max: the maximum number of connections the pool should contain + - ``max``: the maximum number of connections the pool should contain (default: 2) - - increment: the number of connections that should be added to the pool - whenever a new connection needs to be created + - ``increment``: the number of connections that should be added to the + pool whenever a new connection needs to be created (default: 1) - - connectiontype: the class of the connection that should be returned - during calls to pool.acquire(). It must be oracledb.Connection or a - subclass of oracledb.Connection + - ``connectiontype``: the class of the connection that should be + returned during calls to pool.acquire(). It must be + oracledb.Connection or a subclass of oracledb.Connection (default: None) - - getmode: how pool.acquire() will behave. One of the constants + - ``getmode``: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT (default: oracledb.POOL_GETMODE_WAIT) - - homogeneous: a boolean indicating whether the connections are + - ``homogeneous``: a boolean indicating whether the connections are homogeneous (same user) or heterogeneous (multiple users) (default: True) - - timeout: length of time (in seconds) that a connection may remain + - ``timeout``: length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections are never terminated (default: 0) - - wait_timeout: length of time (in milliseconds) that a caller should - wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT + - ``wait_timeout``: length of time (in milliseconds) that a caller + should wait when acquiring a connection from the pool with getmode + set to oracledb.POOL_GETMODE_TIMEDWAIT (default: 0) - - max_lifetime_session: length of time (in seconds) that connections - can remain in the pool. If it is 0 then connections may remain in the - pool indefinitely + - ``max_lifetime_session``: length of time (in seconds) that + connections can remain in the pool. If it is 0 then connections may + remain in the pool indefinitely (default: 0) - - session_callback: a callable that is invoked when a connection is + - ``session_callback``: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag differs from the one requested (default: None) - - max_sessions_per_shard: the maximum number of connections that may be - associated with a particular shard + - ``max_sessions_per_shard``: the maximum number of connections that + may be associated with a particular shard (default: 0) - - soda_metadata_cache: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: boolean indicating whether or not the SODA metadata cache should be enabled (default: False) - - ping_interval: length of time (in seconds) after which an unused + - ``ping_interval``: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by @@ -187,33 +187,34 @@ def __init__( functionality will be disabled (default: 60) - - ping_timeout: maximum length of time (in milliseconds) to wait for a - connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() + - ``ping_timeout``: maximum length of time (in milliseconds) to wait + for a connection in the pool to respond to an internal ping to the + database before being discarded and replaced during a call to + acquire() (default: 5000) - - user: the name of the user to connect to + - ``user``: the name of the user to connect to (default: None) - - proxy_user: the name of the proxy user to connect to. If this value - is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" + - ``proxy_user``: the name of the proxy user to connect to. If this + value is not specified, it will be parsed out of user if user is in + the form "user[proxy_user]" (default: None) - - password: the password for the user + - ``password``: the password for the user (default: None) - - newpassword: the new password for the user. The new password will + - ``newpassword``: the new password for the user. The new password will take effect immediately upon a successful connection to the database (default: None) - - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + - ``wallet_password``: the password to use to decrypt the wallet, if it + is encrypted. This value is only used in thin mode (default: None) - - access_token: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: expected to be a string or a 2-tuple or a callable. + If it is a string, it specifies an Azure AD OAuth2 token used for + Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -223,222 +224,224 @@ def __init__( expired (default: None) - - host: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the name or IP address of the machine hosting the database + or the database listener (default: None) - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening (default: 1521) - - protocol: one of the strings "tcp" or "tcps" indicating whether to - use unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether + to use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - https_proxy: the name or IP address of a proxy host to use for + - ``https_proxy``: the name or IP address of a proxy host to use for tunneling secure connections (default: None) - - https_proxy_port: the port on which to communicate with the proxy + - ``https_proxy_port``: the port on which to communicate with the proxy host (default: 0) - - service_name: the service name of the database + - ``service_name``: the service name of the database (default: None) - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database (default: None) - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended (default: None) - - server_type: the type of server connection that should be + - ``server_type``: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" (default: None) - - cclass: connection class to use for Database Resident Connection + - ``cclass``: connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling + (DRCP) (default: oracledb.PURITY_DEFAULT) - - expire_time: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value + - ``expire_time``: an integer indicating the number of minutes between + the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive (default: 0) - - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that a connection attempt should + be retried before the attempt is terminated (default: 0) - - retry_delay: the number of seconds to wait before making a new + - ``retry_delay``: the number of seconds to wait before making a new connection attempt (default: 1) - - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of + seconds to wait for establishing a connection to the database host (default: 20.0) - - ssl_server_dn_match: boolean indicating whether the server + - ``ssl_server_dn_match``: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead (default: True) - - ssl_server_cert_dn: the distinguished name (DN) which should be + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used (default: None) - - wallet_location: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet - file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + thin mode this must be the directory containing the PEM-encoded + wallet file ewallet.pem. In thick mode this must be the directory + containing the file cwallet.sso (default: None) - - events: boolean specifying whether events mode should be enabled. + - ``events``: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query notification and high availability event notifications (default: False) - - externalauth: a boolean indicating whether to use external + - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - mode: authorization mode to use. For example + - ``mode``: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA (default: oracledb.AUTH_MODE_DEFAULT) - - disable_oob: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``disable_oob``: boolean indicating whether out-of-band breaks should + be disabled. This value is only used in thin mode. It has no effect + on Windows which does not support this functionality (default: False) - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache (default: oracledb.defaults.stmtcachesize) - - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot + be used simultaneously with the cclass parameter (default: None) - - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + - ``tag``: identifies the type of connection that should be returned + from a pool. This value is only used in thick mode (default: None) - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode (default: False) - - config_dir: directory in which the optional tnsnames.ora + - ``config_dir``: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use the config_dir parameter of init_oracle_client() (default: oracledb.defaults.config_dir) - - appcontext: application context used by the connection. It should be - a list of 3-tuples (namespace, name, value) and each entry in the + - ``appcontext``: application context used by the connection. It should + be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string (default: None) - - shardingkey: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick - mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that + identify the database shard to connect to. This value is only used in + thick mode (default: None) - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode (default: None) - - debug_jdwp: a string with the format "host=;port=" that - specifies the host and port of the PL/SQL debugger. This value is - only used in thin mode. For thick mode set the ORA_DEBUG_JDWP + - ``debug_jdwp``: a string with the format "host=;port=" + that specifies the host and port of the PL/SQL debugger. This value + is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable (default: None) - - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is + added to the connection identifier used for tracing (default: None) - - ssl_context: an SSLContext object used for connecting to the database - using TLS. This SSL context will be modified to include the private - key or any certificates found in a separately supplied wallet. This - parameter should only be specified if the default SSLContext object - cannot be used + - ``ssl_context``: an SSLContext object used for connecting to the + database using TLS. This SSL context will be modified to include the + private key or any certificates found in a separately supplied + wallet. This parameter should only be specified if the default + SSLContext object cannot be used (default: None) - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The - value tunes internal buffers used for communication to the database. - Bigger values can increase throughput for large queries or bulk data - loads, but at the cost of higher memory use. The SDU size that will - actually be used is negotiated down to the lower of this value and - the database network SDU configuration value + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. + The value tunes internal buffers used for communication to the + database. Bigger values can increase throughput for large queries or + bulk data loads, but at the cost of higher memory use. The SDU size + that will actually be used is negotiated down to the lower of this + value and the database network SDU configuration value (default: 8192) - - pool_boundary: one of the values "statement" or "transaction" + - ``pool_boundary``: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. - This is an Oracle Autonomous Database Serverless (ADB-S) specific - property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + open. This is an Oracle Autonomous Database Serverless (ADB-S) + specific property for clients connecting from within OCI Cloud + network. Please refer to the ADB-S documentation for more information (default: False) - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - program: the name of the executable program or application connected - to the Oracle Database + - ``program``: the name of the executable program or application + connected to the Oracle Database (default: oracledb.defaults.program) - - machine: the machine name of the client connecting to the Oracle + - ``machine``: the machine name of the client connecting to the Oracle Database (default: oracledb.defaults.machine) - - terminal: the terminal identifier from which the connection + - ``terminal``: the terminal identifier from which the connection originates (default: oracledb.defaults.terminal) - - osuser: the operating system user that initiates the database + - ``osuser``: the operating system user that initiates the database connection (default: oracledb.defaults.osuser) - - driver_name: the driver name used by the client to connect to the + - ``driver_name``: the driver name used by the client to connect to the Oracle Database (default: oracledb.defaults.driver_name) - - use_sni: boolean indicating whether to use the TLS SNI extension to - bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: boolean indicating whether to use the TLS SNI extension + to bypass the second TLS neogiation that would otherwise be required (default: False) - - thick_mode_dsn_passthrough: boolean indicating whether to pass the - connect string to the Oracle Client libraries unchanged without + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file (default: oracledb.defaults.thick_mode_dsn_passthrough) - - extra_auth_params: a dictionary containing configuration parameters - necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins + - ``extra_auth_params``: a dictionary containing configuration + parameters necessary for Oracle Database authentication using + plugins, such as the Azure and OCI cloud-native authentication + plugins (default: None) - - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP + with Oracle Database 23.4 or higher (default: None) - - handle: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + - ``handle``: an integer representing a pointer to a valid service + context handle. This value is only used in thick mode. It should be + used with extreme caution (default: 0) """ pass @@ -715,74 +718,75 @@ def set( All parameters are optional. A brief description of each parameter follows: - - min: the minimum number of connections the pool should contain + - ``min``: the minimum number of connections the pool should contain - - max: the maximum number of connections the pool should contain + - ``max``: the maximum number of connections the pool should contain - - increment: the number of connections that should be added to the pool - whenever a new connection needs to be created + - ``increment``: the number of connections that should be added to the + pool whenever a new connection needs to be created - - connectiontype: the class of the connection that should be returned - during calls to pool.acquire(). It must be oracledb.Connection or a - subclass of oracledb.Connection + - ``connectiontype``: the class of the connection that should be + returned during calls to pool.acquire(). It must be + oracledb.Connection or a subclass of oracledb.Connection - - getmode: how pool.acquire() will behave. One of the constants + - ``getmode``: how pool.acquire() will behave. One of the constants oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT - - homogeneous: a boolean indicating whether the connections are + - ``homogeneous``: a boolean indicating whether the connections are homogeneous (same user) or heterogeneous (multiple users) - - timeout: length of time (in seconds) that a connection may remain + - ``timeout``: length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections are never terminated - - wait_timeout: length of time (in milliseconds) that a caller should - wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT + - ``wait_timeout``: length of time (in milliseconds) that a caller + should wait when acquiring a connection from the pool with getmode + set to oracledb.POOL_GETMODE_TIMEDWAIT - - max_lifetime_session: length of time (in seconds) that connections - can remain in the pool. If it is 0 then connections may remain in the - pool indefinitely + - ``max_lifetime_session``: length of time (in seconds) that + connections can remain in the pool. If it is 0 then connections may + remain in the pool indefinitely - - session_callback: a callable that is invoked when a connection is + - ``session_callback``: a callable that is invoked when a connection is returned from the pool for the first time, or when the connection tag differs from the one requested - - max_sessions_per_shard: the maximum number of connections that may be - associated with a particular shard + - ``max_sessions_per_shard``: the maximum number of connections that + may be associated with a particular shard - - soda_metadata_cache: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: boolean indicating whether or not the SODA metadata cache should be enabled - - ping_interval: length of time (in seconds) after which an unused + - ``ping_interval``: length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping functionality will be disabled - - ping_timeout: maximum length of time (in milliseconds) to wait for a - connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() + - ``ping_timeout``: maximum length of time (in milliseconds) to wait + for a connection in the pool to respond to an internal ping to the + database before being discarded and replaced during a call to + acquire() - - user: the name of the user to connect to + - ``user``: the name of the user to connect to - - proxy_user: the name of the proxy user to connect to. If this value - is not specified, it will be parsed out of user if user is in the - form "user[proxy_user]" + - ``proxy_user``: the name of the proxy user to connect to. If this + value is not specified, it will be parsed out of user if user is in + the form "user[proxy_user]" - - password: the password for the user + - ``password``: the password for the user - - newpassword: the new password for the user. The new password will + - ``newpassword``: the new password for the user. The new password will take effect immediately upon a successful connection to the database - - wallet_password: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + - ``wallet_password``: the password to use to decrypt the wallet, if it + is encrypted. This value is only used in thin mode - - access_token: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: expected to be a string or a 2-tuple or a callable. + If it is a string, it specifies an Azure AD OAuth2 token used for + Open Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -791,175 +795,177 @@ def set( create new connections but the current authentication token has expired - - host: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the name or IP address of the machine hosting the database + or the database listener - - port: the port number on which the database listener is listening + - ``port``: the port number on which the database listener is listening - - protocol: one of the strings "tcp" or "tcps" indicating whether to - use unencrypted network traffic or encrypted network traffic (TLS) + - ``protocol``: one of the strings "tcp" or "tcps" indicating whether + to use unencrypted network traffic or encrypted network traffic (TLS) - - https_proxy: the name or IP address of a proxy host to use for + - ``https_proxy``: the name or IP address of a proxy host to use for tunneling secure connections - - https_proxy_port: the port on which to communicate with the proxy + - ``https_proxy_port``: the port on which to communicate with the proxy host - - service_name: the service name of the database + - ``service_name``: the service name of the database - - instance_name: the instance name of the database + - ``instance_name``: the instance name of the database - - sid: the system identifier (SID) of the database. Note using a + - ``sid``: the system identifier (SID) of the database. Note using a service_name instead is recommended - - server_type: the type of server connection that should be + - ``server_type``: the type of server connection that should be established. If specified, it should be one of "dedicated", "shared" or "pooled" - - cclass: connection class to use for Database Resident Connection + - ``cclass``: connection class to use for Database Resident Connection Pooling (DRCP) - - purity: purity to use for Database Resident Connection Pooling (DRCP) + - ``purity``: purity to use for Database Resident Connection Pooling + (DRCP) - - expire_time: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value + - ``expire_time``: an integer indicating the number of minutes between + the sending of keepalive probes. If this parameter is set to a value greater than zero it enables keepalive - - retry_count: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that a connection attempt should + be retried before the attempt is terminated - - retry_delay: the number of seconds to wait before making a new + - ``retry_delay``: the number of seconds to wait before making a new connection attempt - - tcp_connect_timeout: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + - ``tcp_connect_timeout``: a float indicating the maximum number of + seconds to wait for establishing a connection to the database host - - ssl_server_dn_match: boolean indicating whether the server + - ``ssl_server_dn_match``: boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead - - ssl_server_cert_dn: the distinguished name (DN) which should be + - ``ssl_server_cert_dn``: the distinguished name (DN) which should be matched with the server. This value is ignored if the ssl_server_dn_match parameter is not set to the value True. If specified this value is used for any verfication. Otherwise the hostname will be used - - wallet_location: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet - file ewallet.pem. In thick mode this must be the directory containing - the file cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + thin mode this must be the directory containing the PEM-encoded + wallet file ewallet.pem. In thick mode this must be the directory + containing the file cwallet.sso - - events: boolean specifying whether events mode should be enabled. + - ``events``: boolean specifying whether events mode should be enabled. This value is only used in thick mode and is needed for continuous query notification and high availability event notifications - - externalauth: a boolean indicating whether to use external + - ``externalauth``: a boolean indicating whether to use external authentication - - mode: authorization mode to use. For example + - ``mode``: authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA - - disable_oob: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``disable_oob``: boolean indicating whether out-of-band breaks should + be disabled. This value is only used in thin mode. It has no effect + on Windows which does not support this functionality - - stmtcachesize: identifies the initial size of the statement cache + - ``stmtcachesize``: identifies the initial size of the statement cache - - edition: edition to use for the connection. This parameter cannot be - used simultaneously with the cclass parameter + - ``edition``: edition to use for the connection. This parameter cannot + be used simultaneously with the cclass parameter - - tag: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + - ``tag``: identifies the type of connection that should be returned + from a pool. This value is only used in thick mode - - matchanytag: boolean specifying whether any tag can be used when + - ``matchanytag``: boolean specifying whether any tag can be used when acquiring a connection from the pool. This value is only used in thick mode - - config_dir: directory in which the optional tnsnames.ora + - ``config_dir``: directory in which the optional tnsnames.ora configuration file is located. This value is only used in thin mode. For thick mode use the config_dir parameter of init_oracle_client() - - appcontext: application context used by the connection. It should be - a list of 3-tuples (namespace, name, value) and each entry in the + - ``appcontext``: application context used by the connection. It should + be a list of 3-tuples (namespace, name, value) and each entry in the tuple should be a string - - shardingkey: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick - mode + - ``shardingkey``: a list of strings, numbers, bytes or dates that + identify the database shard to connect to. This value is only used in + thick mode - - supershardingkey: a list of strings, numbers, bytes or dates that + - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in thick mode - - debug_jdwp: a string with the format "host=;port=" that - specifies the host and port of the PL/SQL debugger. This value is - only used in thin mode. For thick mode set the ORA_DEBUG_JDWP + - ``debug_jdwp``: a string with the format "host=;port=" + that specifies the host and port of the PL/SQL debugger. This value + is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment variable - - connection_id_prefix: an application specific prefix that is added to - the connection identifier used for tracing + - ``connection_id_prefix``: an application specific prefix that is + added to the connection identifier used for tracing - - ssl_context: an SSLContext object used for connecting to the database - using TLS. This SSL context will be modified to include the private - key or any certificates found in a separately supplied wallet. This - parameter should only be specified if the default SSLContext object - cannot be used + - ``ssl_context``: an SSLContext object used for connecting to the + database using TLS. This SSL context will be modified to include the + private key or any certificates found in a separately supplied + wallet. This parameter should only be specified if the default + SSLContext object cannot be used - - sdu: the requested size of the Session Data Unit (SDU), in bytes. The - value tunes internal buffers used for communication to the database. - Bigger values can increase throughput for large queries or bulk data - loads, but at the cost of higher memory use. The SDU size that will - actually be used is negotiated down to the lower of this value and - the database network SDU configuration value + - ``sdu``: the requested size of the Session Data Unit (SDU), in bytes. + The value tunes internal buffers used for communication to the + database. Bigger values can increase throughput for large queries or + bulk data loads, but at the cost of higher memory use. The SDU size + that will actually be used is negotiated down to the lower of this + value and the database network SDU configuration value - - pool_boundary: one of the values "statement" or "transaction" + - ``pool_boundary``: one of the values "statement" or "transaction" indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher - - use_tcp_fast_open: boolean indicating whether to use TCP fast open. - This is an Oracle Autonomous Database Serverless (ADB-S) specific - property for clients connecting from within OCI Cloud network. Please - refer to the ADB-S documentation for more information + - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + open. This is an Oracle Autonomous Database Serverless (ADB-S) + specific property for clients connecting from within OCI Cloud + network. Please refer to the ADB-S documentation for more information - - ssl_version: one of the values ssl.TLSVersion.TLSv1_2 or + - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use - - program: the name of the executable program or application connected - to the Oracle Database + - ``program``: the name of the executable program or application + connected to the Oracle Database - - machine: the machine name of the client connecting to the Oracle + - ``machine``: the machine name of the client connecting to the Oracle Database - - terminal: the terminal identifier from which the connection + - ``terminal``: the terminal identifier from which the connection originates - - osuser: the operating system user that initiates the database + - ``osuser``: the operating system user that initiates the database connection - - driver_name: the driver name used by the client to connect to the + - ``driver_name``: the driver name used by the client to connect to the Oracle Database - - use_sni: boolean indicating whether to use the TLS SNI extension to - bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: boolean indicating whether to use the TLS SNI extension + to bypass the second TLS neogiation that would otherwise be required - - thick_mode_dsn_passthrough: boolean indicating whether to pass the - connect string to the Oracle Client libraries unchanged without + - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this to False makes thick and thin mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - - extra_auth_params: a dictionary containing configuration parameters - necessary for Oracle Database authentication using plugins, such as - the Azure and OCI cloud-native authentication plugins + - ``extra_auth_params``: a dictionary containing configuration + parameters necessary for Oracle Database authentication using + plugins, such as the Azure and OCI cloud-native authentication + plugins - - pool_name: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP + with Oracle Database 23.4 or higher - - handle: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + - ``handle``: an integer representing a pointer to a valid service + context handle. This value is only used in thick mode. It should be + used with extreme caution """ pass diff --git a/utils/Makefile b/utils/Makefile index 5fcc0884..420358ab 100644 --- a/utils/Makefile +++ b/utils/Makefile @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2022, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,14 +33,16 @@ TEMPLATE_DIR = templates all: $(SOURCE_DIR)/connect_params.py $(SOURCE_DIR)/pool_params.py \ $(SOURCE_DIR)/connection.py $(SOURCE_DIR)/pool.py -$(SOURCE_DIR)/connection.py: fields.cfg $(TEMPLATE_DIR)/connection.py +BASE_DEPS = fields.cfg build_from_template.py + +$(SOURCE_DIR)/connection.py: $(BASE_DEPS) $(TEMPLATE_DIR)/connection.py python build_from_template.py connection -$(SOURCE_DIR)/connect_params.py: fields.cfg $(TEMPLATE_DIR)/connect_params.py +$(SOURCE_DIR)/connect_params.py: $(BASE_DEPS) $(TEMPLATE_DIR)/connect_params.py python build_from_template.py connect_params -$(SOURCE_DIR)/pool.py: fields.cfg $(TEMPLATE_DIR)/pool.py +$(SOURCE_DIR)/pool.py: $(BASE_DEPS) $(TEMPLATE_DIR)/pool.py python build_from_template.py pool -$(SOURCE_DIR)/pool_params.py: fields.cfg $(TEMPLATE_DIR)/pool_params.py +$(SOURCE_DIR)/pool_params.py: $(BASE_DEPS) $(TEMPLATE_DIR)/pool_params.py python build_from_template.py pool_params diff --git a/utils/build_from_template.py b/utils/build_from_template.py index 2e141386..e9e61ee9 100644 --- a/utils/build_from_template.py +++ b/utils/build_from_template.py @@ -77,17 +77,40 @@ class Field: description: str = "" source: str = None - @property - def async_description(self): - return self.description.replace( - "oracledb.Connection", "oracledb.AsyncConnection" - ) - - @property - def async_typ(self): - return self.typ.replace( - "oracledb.Connection", "oracledb.AsyncConnection" + def get_arg_string(self, with_async: bool = False) -> str: + """ + Returns the string defining the argument when used in a function + definition. + """ + typ = self.typ + if with_async: + typ = typ.replace( + "oracledb.Connection", "oracledb.AsyncConnection" + ) + return f"{self.name}: Optional[{typ}] = None," + + def get_help_string( + self, indent: str, with_default: bool = False, with_async: bool = False + ) -> str: + """ + Returns the help string to use for the field with the given + indentation. + """ + description = self.description + if with_async: + description = description.replace( + "oracledb.Connection", "oracledb.AsyncConnection" + ) + raw_help_string = f"- ``{self.name}``: {description}" + help_string = textwrap.fill( + raw_help_string, + initial_indent=indent, + subsequent_indent=indent + " ", + width=TEXT_WIDTH, ) + if with_default: + help_string += f"\n{indent} (default: {self.default})" + return help_string # parse command line @@ -146,41 +169,17 @@ def args_help_with_defaults_content(indent): """ Generates the content for the args_help_with_defaults template tag. """ - raw_descriptions = [] - for f in fields: - if not f.description: - continue - raw_descriptions.append(f"- {f.name}: {f.description}") - raw_descriptions.append(f" (default: {f.default})") - raw_descriptions.append("") descriptions = [ - textwrap.fill( - d, - initial_indent=indent, - subsequent_indent=indent + " ", - width=TEXT_WIDTH, - ) - for d in raw_descriptions[:-1] + f.get_help_string(indent, with_default=True) for f in fields ] - return "\n".join(descriptions).strip() + return "\n\n".join(descriptions).strip() def args_help_without_defaults_content(indent): """ Generates the content for the args_help_without_defaults template tag. """ - raw_descriptions = [ - f"- {f.name}: {f.description}" for f in fields if f.description - ] - descriptions = [ - textwrap.fill( - d, - initial_indent=indent, - subsequent_indent=indent + " ", - width=TEXT_WIDTH, - ) - for d in raw_descriptions - ] + descriptions = [f.get_help_string(indent) for f in fields] return "\n\n".join(descriptions).strip() @@ -189,7 +188,7 @@ def args_with_defaults_content(indent): Generates the content for the args_with_defaults template tag. """ args_joiner = "\n" + indent - args = [f"{f.name}: Optional[{f.typ}] = None," for f in fields] + args = [f.get_arg_string() for f in fields] return args_joiner.join(args) @@ -197,19 +196,9 @@ def async_args_help_with_defaults_content(indent): """ Generates the content for the async_args_help_with_defaults template tag. """ - raw_descriptions = [ - f"- {f.name}: {f.async_description} (default: {f.default})" - for f in fields - if f.description - ] descriptions = [ - textwrap.fill( - d, - initial_indent=indent, - subsequent_indent=indent + " ", - width=TEXT_WIDTH, - ) - for d in raw_descriptions + f.get_help_string(indent, with_default=True, with_async=True) + for f in fields ] return "\n\n".join(descriptions).strip() @@ -219,7 +208,7 @@ def async_args_with_defaults_content(indent): Generates the content for the async_args_with_defaults template tag. """ args_joiner = "\n" + indent - args = [f"{f.name}: Optional[{f.async_typ}] = None," for f in fields] + args = [f.get_arg_string(with_async=True) for f in fields] return args_joiner.join(args) diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 34e259dd..2aafc959 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -2529,6 +2529,6 @@ def connect_async( The following parameters are all optional. A brief description of each parameter follows: - # {{ args_help_with_defaults }} + # {{ async_args_help_with_defaults }} """ pass From 0489c7ea1b3438ef2764595d71d4762b116e5d82 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 8 Aug 2025 10:20:54 -0600 Subject: [PATCH 179/239] Added support for explicitly specifying whether to fetch LOBs or decimal values when performing queries. --- doc/src/api_manual/async_connection.rst | 20 +++ doc/src/api_manual/async_cursor.rst | 4 + doc/src/api_manual/connection.rst | 8 ++ doc/src/api_manual/cursor.rst | 4 + doc/src/api_manual/module.rst | 12 +- doc/src/api_manual/pipeline.rst | 20 +++ doc/src/release_notes.rst | 7 + doc/src/user_guide/json_data_type.rst | 5 +- doc/src/user_guide/lob_data.rst | 18 ++- samples/json_blob.py | 3 +- samples/json_blob_async.py | 3 +- samples/json_direct.py | 5 +- samples/json_direct_async.py | 5 +- samples/return_lobs_as_strings.py | 6 +- samples/return_lobs_as_strings_async.py | 6 +- samples/return_numbers_as_decimals.py | 5 +- samples/return_numbers_as_decimals_async.py | 5 +- samples/spatial_to_geopandas.py | 19 ++- ...le-Database-The-New-Wave-of-Scripting.html | 9 +- src/oracledb/base_impl.pxd | 4 + src/oracledb/connection.py | 86 +++++++++++- src/oracledb/cursor.py | 37 ++++- src/oracledb/impl/base/cursor.pyx | 8 +- src/oracledb/impl/base/pipeline.pyx | 12 +- src/oracledb/impl/thin/connection.pyx | 6 + src/oracledb/pipeline.py | 54 +++++++ tests/test_1900_lob_var.py | 5 +- tests/test_4300_cursor_other.py | 9 ++ tests/test_6300_cursor_other_async.py | 11 ++ ..._7000_connection_async_shortcut_methods.py | 50 +++++++ tests/test_7600_pipelining_async.py | 132 +++++++++++++++--- tests/test_8700_sessionless_transaction.py | 9 +- ...test_8800_sessionless_transaction_async.py | 9 +- tests/test_8900_dataframe_ingestion.py | 48 +++---- tests/test_9000_dataframe_ingestion_async.py | 48 +++---- utils/templates/connection.py | 86 +++++++++++- 36 files changed, 629 insertions(+), 149 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index b4f1cb3e..ada4e9fb 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -87,6 +87,10 @@ AsyncConnection Methods .. automethod:: AsyncConnection.fetchall + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. automethod:: AsyncConnection.fetch_df_all See :ref:`dataframeformat` for the supported data types and examples. @@ -96,6 +100,10 @@ AsyncConnection Methods The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. + .. versionchanged:: 3.4.0 + + The ``fetch_decimals`` parameter was added. + .. versionadded:: 3.0.0 .. automethod:: AsyncConnection.fetch_df_batches @@ -107,12 +115,24 @@ AsyncConnection Methods The data frame support in python-oracledb 3.3 is a pre-release and may change in a future version. + .. versionchanged:: 3.4.0 + + The ``fetch_decimals`` parameter was added. + .. versionadded:: 3.0.0 .. automethod:: AsyncConnection.fetchmany + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. automethod:: AsyncConnection.fetchone + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. automethod:: AsyncConnection.gettype .. automethod:: AsyncConnection.is_healthy diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 73a78131..a4d92488 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -68,6 +68,10 @@ AsyncCursor Methods .. automethod:: AsyncCursor.execute + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. versionchanged:: 3.3.0 The ``suspend_on_success`` parameter was added. diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 7439540a..720a8740 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -90,6 +90,10 @@ Connection Methods .. dbapimethodextension:: + .. versionchanged:: 3.4.0 + + The ``fetch_decimals`` parameter was added. + .. versionadded:: 3.0.0 .. automethod:: Connection.fetch_df_batches @@ -103,6 +107,10 @@ Connection Methods .. dbapimethodextension:: + .. versionchanged:: 3.4.0 + + The ``fetch_decimals`` parameter was added. + .. versionadded:: 3.0.0 .. automethod:: Connection.getSodaDatabase diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index 8f0fe0ba..68f64bd0 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -69,6 +69,10 @@ Cursor Methods .. automethod:: Cursor.execute + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. versionchanged:: 3.3.0 The ``suspend_on_success`` parameter was added. diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index c5889842..7b1a5b8a 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -1405,12 +1405,12 @@ Also see the table :ref:`supporteddbtypes`. .. data:: DB_TYPE_LONG_NVARCHAR This constant can be used in output type handlers when fetching NCLOB - columns as a string. (Note a type handler is not needed if - :ref:`oracledb.defaults.fetch_lobs ` is set to False). For IN - binds, this constant can be used to create a bind variable in - :meth:`Cursor.var()` or via :meth:`Cursor.setinputsizes()`. The - ``DB_TYPE_LONG_NVARCHAR`` value won't be shown in query metadata since it - is not a database type. + columns as a string. (Note a type handler is not needed if + :ref:`oracledb.defaults.fetch_lobs `, or the equivalent execution + parameter, is set to *False*). For IN binds, this constant can be used to + create a bind variable in :meth:`Cursor.var()` or via + :meth:`Cursor.setinputsizes()`. The ``DB_TYPE_LONG_NVARCHAR`` value won't + be shown in query metadata since it is not a database type. It will compare equal to the DB API type :data:`STRING`. diff --git a/doc/src/api_manual/pipeline.rst b/doc/src/api_manual/pipeline.rst index 04fe6ec4..7bceaedb 100644 --- a/doc/src/api_manual/pipeline.rst +++ b/doc/src/api_manual/pipeline.rst @@ -48,14 +48,26 @@ Pipeline Methods .. automethod:: Pipeline.add_fetchall + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. automethod:: Pipeline.add_fetchmany + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. seealso:: :ref:`roundtrips`, and :ref:`rowlimit` .. automethod:: Pipeline.add_fetchone + .. versionchanged:: 3.4.0 + + The ``fetch_lobs`` and ``fetch_decimals`` parameters were added. + .. seealso:: :ref:`rowlimit` @@ -86,6 +98,14 @@ PipelineOp Attributes :ref:`tuningfetch` +.. autoproperty:: PipelineOp.fetch_decimals + + .. versionadded:: 3.4.0 + +.. autoproperty:: PipelineOp.fetch_lobs + + .. versionadded:: 3.4.0 + .. autoproperty:: PipelineOp.keyword_parameters .. autoproperty:: PipelineOp.name diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 32ed8de5..42f93729 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -25,6 +25,13 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to + the methods used for fetching rows or dataframes from the database. Note + that for the creation of pipeline operations, if these parameters are not + specified then the values of + :attr:`oracledb.defaults.fetch_lobs ` and + :attr:`oracledb.defaults.fetch_decimals ` are now + stored with the operation and used during pipeline execution. #) API documentation is now generated from the source code. diff --git a/doc/src/user_guide/json_data_type.rst b/doc/src/user_guide/json_data_type.rst index c95e6dbf..a6e760f8 100644 --- a/doc/src/user_guide/json_data_type.rst +++ b/doc/src/user_guide/json_data_type.rst @@ -412,8 +412,6 @@ for example: .. code-block:: python - oracledb.defaults.fetch_lobs = False - cursor.execute(""" select json_arrayagg( @@ -423,7 +421,8 @@ for example: departments d where department_id < :did""", - [50]); + [50], + fetch_lobs=False) j, = cursor.fetchone() print(j) diff --git a/doc/src/user_guide/lob_data.rst b/doc/src/user_guide/lob_data.rst index 4ac6dc71..34ba4782 100644 --- a/doc/src/user_guide/lob_data.rst +++ b/doc/src/user_guide/lob_data.rst @@ -74,16 +74,14 @@ Fetching LOBs as Strings and Bytes CLOBs and BLOBs smaller than 1 GB can queried from the database directly as strings and bytes. This can be much faster than streaming a :ref:`LOB Object -`. Support is enabled by setting the :ref:`Defaults Object -`. +`. Support is enabled by setting :attr:`oracledb.defaults.fetch_lobs +`, or by setting the ``fetch_lobs`` parameter at statement +execution: .. code-block:: python import oracledb - # returns strings or bytes instead of a locator - oracledb.defaults.fetch_lobs = False - . . . id_val = 1 @@ -92,7 +90,7 @@ strings and bytes. This can be much faster than streaming a :ref:`LOB Object cursor.execute("insert into lob_tbl (id, c, b) values (:1, :2, :3)", [id_val, text_data, binary_data]) - cursor.execute("select c, b from lob_tbl where id = :1", [id_val]) + cursor.execute("select c, b from lob_tbl where id = :1", [id_val], fetch_lobs=False) clob_data, blob_data = cursor.fetchone() print("CLOB length:", len(clob_data)) print("CLOB data:", clob_data) @@ -106,8 +104,7 @@ This displays:: BLOB length: 16 BLOB data: b'Some binary data' -An older alternative to using ``oracledb.defaults.fetch_lobs`` is to use a type -handler: +An older alternative to using ``fetch_lobs`` is to use a type handler: .. code-block:: python @@ -124,8 +121,9 @@ handler: Streaming LOBs (Read) ===================== -Without setting ``oracledb.defaults.fetch_lobs`` to False, or without using an -output type handler, the CLOB and BLOB values are fetched as :ref:`LOB +Without setting :attr:`oracledb.defaults.fetch_lobs ` or +equivalent execution parameter to *False*, or without using an output type +handler, then the CLOB and BLOB values are fetched as :ref:`LOB objects`. The size of the LOB object can be obtained by calling :meth:`LOB.size()` and the data can be read by calling :meth:`LOB.read()`: diff --git a/samples/json_blob.py b/samples/json_blob.py index 433c1b31..a5189158 100644 --- a/samples/json_blob.py +++ b/samples/json_blob.py @@ -105,11 +105,10 @@ # Using JSON_ARRAYAGG to extract a whole relational table as JSON - oracledb.defaults.fetch_lobs = False sql = """select json_arrayagg( json_object('key' is c.id, 'name' is c.json_data) returning clob) from CustomersAsBlob c""" - for r in cursor.execute(sql): + for r in cursor.execute(sql, fetch_lobs=False): print(r) diff --git a/samples/json_blob_async.py b/samples/json_blob_async.py index 8f89fc58..845063a4 100644 --- a/samples/json_blob_async.py +++ b/samples/json_blob_async.py @@ -108,13 +108,12 @@ async def main(): # Using JSON_ARRAYAGG to extract a whole relational table as JSON - oracledb.defaults.fetch_lobs = False sql = """select json_arrayagg( json_object('key' is c.id, 'name' is c.json_data) returning clob) from CustomersAsBlob c""" - await cursor.execute(sql) + await cursor.execute(sql, fetch_lobs=False) async for r in cursor: print(r) diff --git a/samples/json_direct.py b/samples/json_direct.py index 3b2219fb..a6302307 100644 --- a/samples/json_direct.py +++ b/samples/json_direct.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -112,11 +112,10 @@ # Using JSON_ARRAYAGG to extract a whole relational table as JSON - oracledb.defaults.fetch_lobs = False sql = """select json_arrayagg( json_object('key' is c.id, 'name' is c.json_data) returning clob) from CustomersAsJson c""" - for r in cursor.execute(sql): + for r in cursor.execute(sql, fetch_lobs=False): print(r) diff --git a/samples/json_direct_async.py b/samples/json_direct_async.py index 7272c842..5821f935 100644 --- a/samples/json_direct_async.py +++ b/samples/json_direct_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -100,13 +100,12 @@ async def main(): # Using JSON_ARRAYAGG to extract a whole relational table as JSON - oracledb.defaults.fetch_lobs = False sql = """select json_arrayagg( json_object('key' is c.id, 'name' is c.json_data) returning clob) from CustomersAsJson c""" - await cursor.execute(sql) + await cursor.execute(sql, fetch_lobs=False) async for r in cursor: print(r) diff --git a/samples/return_lobs_as_strings.py b/samples/return_lobs_as_strings.py index 4e07a57e..e50cb7b4 100644 --- a/samples/return_lobs_as_strings.py +++ b/samples/return_lobs_as_strings.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -44,7 +44,9 @@ if not sample_env.get_is_thin(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) -# indicate that LOBS should not be fetched +# A global indicating that LOB columns should be fetched as str or bytes, not +# as python-oracledb LOB objects. You can do the same in execute() calls by +# passing a fetch_lobs parameter. oracledb.defaults.fetch_lobs = False connection = oracledb.connect( diff --git a/samples/return_lobs_as_strings_async.py b/samples/return_lobs_as_strings_async.py index 686d5a46..e5d27f37 100644 --- a/samples/return_lobs_as_strings_async.py +++ b/samples/return_lobs_as_strings_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -39,7 +39,9 @@ import oracledb import sample_env -# indicate that LOBS should not be fetched +# A global indicating that LOB columns should be fetched as str or bytes, not +# as python-oracledb LOB objects. You can do the same in execute() calls by +# passing a fetch_lobs parameter. oracledb.defaults.fetch_lobs = False diff --git a/samples/return_numbers_as_decimals.py b/samples/return_numbers_as_decimals.py index aef2bf26..6b8708a1 100644 --- a/samples/return_numbers_as_decimals.py +++ b/samples/return_numbers_as_decimals.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2024, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,8 @@ import oracledb import sample_env -# indicate that numbers should be fetched as decimals +# A global indicating that NUMBER columns should be fetched as decimal.Decimal. +# You can do the same in execute() calls by passing a fetch_decimals parameter. oracledb.defaults.fetch_decimals = True # determine whether to use python-oracledb thin mode or thick mode diff --git a/samples/return_numbers_as_decimals_async.py b/samples/return_numbers_as_decimals_async.py index d3a063a6..df206fc7 100644 --- a/samples/return_numbers_as_decimals_async.py +++ b/samples/return_numbers_as_decimals_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -39,7 +39,8 @@ import oracledb import sample_env -# indicate that numbers should be fetched as decimals +# A global indicating that NUMBER columns should be fetched as decimal.Decimal. +# You can do the same in execute() calls by passing a fetch_decimals parameter. oracledb.defaults.fetch_decimals = True diff --git a/samples/spatial_to_geopandas.py b/samples/spatial_to_geopandas.py index 5fe8f496..8d9e3efa 100644 --- a/samples/spatial_to_geopandas.py +++ b/samples/spatial_to_geopandas.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2018, 2024, Oracle and/or its affiliates. +# Copyright (c) 2018, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -68,10 +68,6 @@ # executed for a single transaction connection.autocommit = True -# do not fetch LOBs, avoiding the second round trip to the database to read the -# LOB contents -oracledb.defaults.fetch_lobs = False - # drop and create table print("Dropping and creating table...") cursor.execute( @@ -88,8 +84,8 @@ cursor.execute( """ create table TestStates ( - state VARCHAR2(30) not null, - geometry SDO_GEOMETRY not null + state varchar2(30) not null, + geometry sdo_geometry not null ) """ ) @@ -531,7 +527,8 @@ def create_geometry_obj(*ordinates): # functions were introduced in Oracle 10g. We use WKB here; however the same # process applies for WKT. cursor.execute( - "SELECT state, sdo_util.to_wkbgeometry(geometry) FROM TestStates" + "select state, sdo_util.to_wkbgeometry(geometry) from TestStates", + fetch_lobs=False, ) gdf = gpd.GeoDataFrame(cursor.fetchall(), columns=["state", "wkbgeometry"]) @@ -543,8 +540,8 @@ def create_geometry_obj(*ordinates): print() print(gdf) -# perform a basic GeoPandas operation (unary_union) -# to combine the 3 adjacent states into 1 geometry +# perform a basic GeoPandas operation to combine the three adjacent states into +# one geometry print() print("GeoPandas combining the 3 geometries into a single geometry...") -print(gdf.unary_union) +print(gdf.union_all()) diff --git a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html index 5243700e..ff437319 100644 --- a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html +++ b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html @@ -1770,6 +1770,9 @@

      6.1 Basic output type handler

      oracledb.defaults.fetch_decimals = True +

      You can also set fetch_decimals as an execute() + parameter.

      +
    • @@ -1976,8 +1979,10 @@

      7.2 Fetching a CLOB as a string

      Setting oracledb.defaults.fetch_lobs to False causes - python-oracledb to fetch the CLOB as a string. Standard Python string - functions such as len() can be used on the result.

      + python-oracledb to fetch the CLOB as a string. You can alternatively set + fetch_lobs as an execute() parameter. Standard + Python string functions such as len() can be used on the + result.

      The output is the same as for clob.py. To check, run the script:

      diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index cba7475c..642ef129 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -675,6 +675,8 @@ cdef class BaseCursorImpl: public object warning public bint fetching_arrow public bint suspend_on_success + public bint fetch_lobs + public bint fetch_decimals uint32_t _buffer_rowcount uint32_t _buffer_index uint32_t _fetch_array_size @@ -909,6 +911,8 @@ cdef class PipelineOpImpl: readonly uint32_t arraysize readonly uint32_t num_rows readonly uint8_t op_type + readonly bint fetch_lobs + readonly bint fetch_decimals uint32_t num_execs diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index dd082a8e..bad2faa1 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1071,6 +1071,8 @@ def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -1088,6 +1090,12 @@ def fetch_df_all( the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set to the value of the explicit or default ``arraysize`` parameter value. + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() @@ -1095,7 +1103,11 @@ def fetch_df_all( if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - cursor.execute(statement, parameters) + cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) return cursor._impl.fetch_df_all(cursor) def fetch_df_batches( @@ -1103,6 +1115,8 @@ def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -1122,6 +1136,12 @@ def fetch_df_batches( :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() @@ -1129,7 +1149,11 @@ def fetch_df_batches( if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize - cursor.execute(statement, parameters) + cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) if size is None: yield cursor._impl.fetch_df_all(cursor) else: @@ -2285,6 +2309,9 @@ async def fetchall( parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> list: """ Executes a query and returns all of the rows. @@ -2302,7 +2329,12 @@ async def fetchall( if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchall() @@ -2311,6 +2343,8 @@ async def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -2327,13 +2361,23 @@ async def fetch_df_all( :attr:`oracledb.defaults.arraysize `. Internally, the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set to the value of the explicit or default ``arraysize`` parameter value. + + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. """ cursor = self.cursor() cursor._impl.fetching_arrow = True if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) return await cursor._impl.fetch_df_all(cursor) async def fetch_df_batches( @@ -2341,6 +2385,8 @@ async def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -2359,13 +2405,23 @@ async def fetch_df_batches( the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. + + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. """ cursor = self.cursor() cursor._impl.fetching_arrow = True if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) if size is None: yield await cursor._impl.fetch_df_all(cursor) else: @@ -2378,6 +2434,9 @@ async def fetchmany( parameters: Optional[Union[list, tuple, dict]] = None, num_rows: Optional[int] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> list: """ Executes a query and returns up to the specified number of rows. @@ -2402,7 +2461,12 @@ async def fetchmany( elif num_rows <= 0: return [] cursor.arraysize = cursor.prefetchrows = num_rows - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchmany(num_rows) @@ -2411,6 +2475,9 @@ async def fetchone( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> Any: """ Executes a query and returns the first row of the result set if one @@ -2429,7 +2496,12 @@ async def fetchone( """ with self.cursor() as cursor: cursor.prefetchrows = cursor.arraysize = 1 - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchone() diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index 3b4008d9..2e18a1dc 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -764,7 +764,10 @@ def execute( self, statement: Optional[str], parameters: Optional[Union[list, tuple, dict]] = None, + *, suspend_on_success: bool = False, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, **keyword_parameters: Any, ) -> Any: """ @@ -797,6 +800,15 @@ def execute( sessionless transaction will be suspended when ``execute()`` completes successfully. See :ref:`suspendtxns`. + The ``fetch_lobs`` parameter specifies whether to return LOB locators + or ``str``/``bytes`` values when fetching LOB columns. The default + value is :data:`oracledb.defaults.fetch_lobs `. + + The ``fetch_decimals`` parameter specifies whether to return + ``decimal.Decimal`` values when fetching columns of type ``NUMBER``. + The default value is :data:`oracledb.defaults.fetch_decimals + `. + For maximum efficiency when reusing a statement, it is best to use the :meth:`Cursor.setinputsizes()` method to specify the parameter types and sizes ahead of time; in particular, *None* is assumed to be a @@ -808,8 +820,12 @@ def execute( over the rows in the cursor); otherwise, *None* is returned. """ self._prepare_for_execute(statement, parameters, keyword_parameters) - self._impl.suspend_on_success = suspend_on_success impl = self._impl + if fetch_lobs is not None: + impl.fetch_lobs = fetch_lobs + if fetch_decimals is not None: + impl.fetch_decimals = fetch_decimals + impl.suspend_on_success = suspend_on_success impl.execute(self) if impl.fetch_vars is not None: return self @@ -1087,7 +1103,10 @@ async def execute( self, statement: Optional[str], parameters: Optional[Union[list, tuple, dict]] = None, + *, suspend_on_success: bool = False, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, **keyword_parameters: Any, ) -> None: """ @@ -1120,6 +1139,15 @@ async def execute( sessionless transaction will be suspended when ``execute()`` completes successfully. See :ref:`suspendtxns`. + The ``fetch_lobs`` parameter specifies whether to return LOB locators + or ``str``/``bytes`` values when fetching LOB columns. The default + value is :data:`oracledb.defaults.fetch_lobs `. + + The ``fetch_decimals`` parameter specifies whether to return + ``decimal.Decimal`` values when fetching columns of type ``NUMBER``. + The default value is :data:`oracledb.defaults.fetch_decimals + `. + For maximum efficiency when reusing a statement, it is best to use the :meth:`setinputsizes()` method to specify the parameter types and sizes ahead of time; in particular, *None* is assumed to be a string of @@ -1131,7 +1159,12 @@ async def execute( over the rows in the cursor); otherwise, *None* is returned. """ self._prepare_for_execute(statement, parameters, keyword_parameters) - self._impl.suspend_on_success = suspend_on_success + impl = self._impl + impl.suspend_on_success = suspend_on_success + if fetch_lobs is not None: + impl.fetch_lobs = fetch_lobs + if fetch_decimals is not None: + impl.fetch_decimals = fetch_decimals await self._impl.execute(self) async def executemany( diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index 5805fdae..3c512070 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -212,7 +212,7 @@ cdef class BaseCursorImpl: # applicable db_type_num = metadata.dbtype.num if db_type_num == DB_TYPE_NUM_NUMBER: - if C_DEFAULTS.fetch_decimals: + if self.fetch_decimals: var_impl.metadata._py_type_num = PY_TYPE_NUM_DECIMAL elif metadata.is_oson and db_type_num != DB_TYPE_NUM_JSON: conn_impl = self._get_conn_impl() @@ -221,7 +221,7 @@ cdef class BaseCursorImpl: var_impl.outconverter = conn_impl.decode_oson elif metadata.is_json and db_type_num != DB_TYPE_NUM_JSON: var_impl.outconverter = self._build_json_converter_fn() - elif not C_DEFAULTS.fetch_lobs or self.fetching_arrow: + elif not self.fetch_lobs or self.fetching_arrow: if db_type_num == DB_TYPE_NUM_BLOB: var_impl.metadata.dbtype = DB_TYPE_LONG_RAW var_impl._fetch_metadata.dbtype = DB_TYPE_LONG_RAW @@ -409,6 +409,10 @@ cdef class BaseCursorImpl: finally: self.set_input_sizes = False + # set default values of fetch options + self.fetch_lobs = C_DEFAULTS.fetch_lobs + self.fetch_decimals = C_DEFAULTS.fetch_decimals + # perform bind if parameters is not None: self.bind_one(cursor, parameters) diff --git a/src/oracledb/impl/base/pipeline.pyx b/src/oracledb/impl/base/pipeline.pyx index e01c36d7..35244e03 100644 --- a/src/oracledb/impl/base/pipeline.pyx +++ b/src/oracledb/impl/base/pipeline.pyx @@ -1,5 +1,5 @@ #------------------------------------------------------------------------------ -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -48,6 +48,8 @@ cdef class PipelineOpImpl: object rowfactory = None, uint32_t arraysize = 0, uint32_t num_rows = 0, + object fetch_lobs = None, + object fetch_decimals = None, ): self.op_type = op_type self.statement = statement @@ -58,6 +60,14 @@ cdef class PipelineOpImpl: self.rowfactory = rowfactory self.arraysize = arraysize self.num_rows = num_rows + if fetch_lobs is None: + self.fetch_lobs = C_DEFAULTS.fetch_lobs + else: + self.fetch_lobs = fetch_lobs + if fetch_decimals is None: + self.fetch_decimals = C_DEFAULTS.fetch_decimals + else: + self.fetch_decimals = fetch_decimals cdef class PipelineOpResultImpl: diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index 25634b4e..b7b3975f 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -882,16 +882,22 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): cursor_impl.prefetchrows = 1 cursor_impl.arraysize = 1 cursor_impl.rowfactory = op_impl.rowfactory + cursor_impl.fetch_lobs = op_impl.fetch_lobs + cursor_impl.fetch_decimals = op_impl.fetch_decimals elif op_impl.op_type == PIPELINE_OP_TYPE_FETCH_MANY: cursor._prepare_for_execute(op_impl.statement, op_impl.parameters) cursor_impl.prefetchrows = op_impl.num_rows cursor_impl.arraysize = op_impl.num_rows cursor_impl.rowfactory = op_impl.rowfactory + cursor_impl.fetch_lobs = op_impl.fetch_lobs + cursor_impl.fetch_decimals = op_impl.fetch_decimals elif op_impl.op_type == PIPELINE_OP_TYPE_FETCH_ALL: cursor._prepare_for_execute(op_impl.statement, op_impl.parameters) cursor_impl.prefetchrows = op_impl.arraysize cursor_impl.arraysize = op_impl.arraysize cursor_impl.rowfactory = op_impl.rowfactory + cursor_impl.fetch_lobs = op_impl.fetch_lobs + cursor_impl.fetch_decimals = op_impl.fetch_decimals else: errors._raise_err(errors.ERR_UNSUPPORTED_PIPELINE_OPERATION, op_type=op_impl.op_type) diff --git a/src/oracledb/pipeline.py b/src/oracledb/pipeline.py index 03f71a59..29fca658 100644 --- a/src/oracledb/pipeline.py +++ b/src/oracledb/pipeline.py @@ -67,6 +67,21 @@ def arraysize(self) -> int: """ return self._impl.arraysize + @property + def fetch_decimals(self) -> bool: + """ + Returns whether or not to fetch columns of type ``NUMBER`` as + ``decimal.Decimal`` values for a query. + """ + return self._impl.fetch_decimals + + @property + def fetch_lobs(self) -> bool: + """ + Returns whether or not to fetch LOB locators for a query. + """ + return self._impl.fetch_lobs + @property def keyword_parameters(self) -> Any: """ @@ -324,6 +339,8 @@ def add_fetchall( parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, rowfactory: Optional[Callable] = None, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> PipelineOp: """ Adds an operation that executes a query and returns all of the rows @@ -340,6 +357,15 @@ def add_fetchall( Internally, this operation's :attr:`Cursor.prefetchrows` size is set to the value of the explicit or default ``arraysize`` parameter value. + + The ``fetch_lobs`` parameter specifies whether to return LOB locators + or ``str``/``bytes`` values when fetching LOB columns. The default + value is :data:`oracledb.defaults.fetch_lobs `. + + The ``fetch_decimals`` parameter specifies whether to return + ``decimal.Decimal`` values when fetching columns of type ``NUMBER``. + The default value is + :data:`oracledb.defaults.fetch_decimals `. """ if arraysize is None: arraysize = defaults.arraysize @@ -349,6 +375,8 @@ def add_fetchall( parameters=parameters, arraysize=arraysize, rowfactory=rowfactory, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, ) return self._add_op(op_impl) @@ -358,6 +386,8 @@ def add_fetchmany( parameters: Optional[Union[list, tuple, dict]] = None, num_rows: Optional[int] = None, rowfactory: Optional[Callable] = None, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> PipelineOp: """ Adds an operation that executes a query and returns up to the specified @@ -379,6 +409,15 @@ def add_fetchmany( Since only one fetch is performed for a query operation, consider adding a ``FETCH NEXT`` clause to the statement to prevent the database processing rows that will never be fetched. + + The ``fetch_lobs`` parameter specifies whether to return LOB locators + or ``str``/``bytes`` values when fetching LOB columns. The default + value is :data:`oracledb.defaults.fetch_lobs `. + + The ``fetch_decimals`` parameter specifies whether to return + ``decimal.Decimal`` values when fetching columns of type ``NUMBER``. + The default value is + :data:`oracledb.defaults.fetch_decimals `. """ if num_rows is None: num_rows = defaults.arraysize @@ -388,6 +427,8 @@ def add_fetchmany( parameters=parameters, num_rows=num_rows, rowfactory=rowfactory, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, ) return self._add_op(op_impl) @@ -396,6 +437,8 @@ def add_fetchone( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, rowfactory: Optional[Callable] = None, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> PipelineOp: """ Adds an operation that executes a query and returns the first row of @@ -414,12 +457,23 @@ def add_fetchone( adding a ``WHERE`` condition or using a ``FETCH NEXT`` clause in the statement to prevent the database processing rows that will never be fetched. + + The ``fetch_lobs`` parameter specifies whether to return LOB locators + or ``str``/``bytes`` values when fetching LOB columns. The default + value is :data:`oracledb.defaults.fetch_lobs `. + + The ``fetch_decimals`` parameter specifies whether to return + ``decimal.Decimal`` values when fetching columns of type ``NUMBER``. + The default value is + :data:`oracledb.defaults.fetch_decimals `. """ op_impl = PipelineOpImpl( op_type=PipelineOpType.FETCH_ONE, statement=statement, parameters=parameters, rowfactory=rowfactory, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, ) return self._add_op(op_impl) diff --git a/tests/test_1900_lob_var.py b/tests/test_1900_lob_var.py index 79f5df1b..ed8e6731 100644 --- a/tests/test_1900_lob_var.py +++ b/tests/test_1900_lob_var.py @@ -104,9 +104,8 @@ def __test_bind_ordering(self, lob_type): """, data, ) - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute(f"select * from Test{lob_type}s") - self.assertEqual(self.cursor.fetchone(), data) + self.cursor.execute(f"select * from Test{lob_type}s", fetch_lobs=False) + self.assertEqual(self.cursor.fetchone(), data) def __test_fetch_lobs_direct(self, lob_type): self.cursor.execute(f"delete from Test{lob_type}s") diff --git a/tests/test_4300_cursor_other.py b/tests/test_4300_cursor_other.py index 0091fbe3..57a70e06 100644 --- a/tests/test_4300_cursor_other.py +++ b/tests/test_4300_cursor_other.py @@ -1010,6 +1010,15 @@ def test_4370(self): self.cursor.execute("begin :1 := 4370; end;", [var]) self.assertEqual(var.getvalue(), 4370) + def test_4371(self): + "4371 - test cursor with fetch_decimals=True specified" + value = 4371 + self.cursor.execute( + "select :1 from dual", [value], fetch_decimals=True + ) + rows = self.cursor.fetchall() + self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index fb887931..6180b6c3 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -26,6 +26,8 @@ 6300 - Module for testing other cursor methods and attributes with asyncio. """ +import decimal + import oracledb import test_env @@ -928,6 +930,15 @@ async def test_6354(self): await self.cursor.execute("begin :1 := 4370; end;", [var]) self.assertEqual(var.getvalue(), 4370) + async def test_6355(self): + "6355 - test cursor with fetch_decimals=True specified" + value = 4371 + await self.cursor.execute( + "select :1 from dual", [value], fetch_decimals=True + ) + rows = await self.cursor.fetchall() + self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7000_connection_async_shortcut_methods.py b/tests/test_7000_connection_async_shortcut_methods.py index b8c7ce33..6581bfbc 100644 --- a/tests/test_7000_connection_async_shortcut_methods.py +++ b/tests/test_7000_connection_async_shortcut_methods.py @@ -26,6 +26,8 @@ 7000 - Module for testing async connections shortcut methods """ +import decimal + import oracledb import test_env @@ -299,6 +301,54 @@ async def test_7014(self): await self.conn.commit() self.assertFalse(self.conn.transaction_in_progress) + async def test_7015(self): + "7015 - test fetchone() with fetch_lobs=False" + value = "test_7015" + (result,) = await self.conn.fetchone( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + self.assertEqual(result, value) + + async def test_7016(self): + "7016 - test fetchmany() with fetch_lobs=False" + value = "test_7016" + rows = await self.conn.fetchmany( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + self.assertEqual(rows, [(value,)]) + + async def test_7017(self): + "7017 - test fetchall() with fetch_lobs=False" + value = "test_7017" + rows = await self.conn.fetchall( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + self.assertEqual(rows, [(value,)]) + + async def test_7018(self): + "7018 - test fetchone() with fetch_decimals=True" + value = 7018 + (result,) = await self.conn.fetchone( + "select :1 from dual", [value], fetch_decimals=True + ) + self.assertTrue(isinstance(result, decimal.Decimal)) + + async def test_7019(self): + "7019 - test fetchmany() with fetch_decimals=True" + value = 7019 + rows = await self.conn.fetchmany( + "select :1 from dual", [value], fetch_decimals=True + ) + self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) + + async def test_7020(self): + "7020 - test fetchall() with fetch_decimals=True" + value = 7020 + rows = await self.conn.fetchall( + "select :1 from dual", [value], fetch_decimals=True + ) + self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_7600_pipelining_async.py b/tests/test_7600_pipelining_async.py index ddddffc1..40017749 100644 --- a/tests/test_7600_pipelining_async.py +++ b/tests/test_7600_pipelining_async.py @@ -618,27 +618,24 @@ async def test_7629(self): self.assertEqual(out_bind.values, values) async def test_7630(self): - "7630 - test oracledb.defaults.fetch_lobs" - clob_1_value = "CLOB Data One" - clob_2_value = "CLOB Data Two" + "7630 - test fetch_lobs with add_fetchone()" + clob_value = "CLOB Data 7630" pipeline = oracledb.create_pipeline() pipeline.add_execute("delete from TestCLOBs") pipeline.add_execute( "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", - [clob_1_value], - ) - clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] - ) - pipeline.add_fetchall( - "select CLOBCol from TestCLOBs order by IntCol", + [clob_value], ) with test_env.DefaultsContextManager("fetch_lobs", False): - res = await self.conn.run_pipeline(pipeline) - self.assertEqual( - [res[-1].rows], [[(clob_1_value,), (clob_2_value,)]] + pipeline.add_fetchone( + "select CLOBCol from TestCLOBs order by IntCol", ) + pipeline.add_fetchone( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) + res = await self.conn.run_pipeline(pipeline) + self.assertEqual([res[-2].rows], [[(clob_value,)]]) + self.assertEqual([res[-1].rows], [[(clob_value,)]]) async def test_7631(self): "7631 - test pipeline with lobs > 32K" @@ -747,14 +744,21 @@ def input_type_handler(cursor, value, num_elements): self.assertEqual(results[-1].rows, [(12,)]) async def test_7637(self): - "7637 - test oracledb.defaults.fetch_decimals" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute("insert into TestTempTable (IntCol) values (1)") - pipeline.add_fetchall("select IntCol from TestTempTable") + "7637 - test fetch_decimals with add_fetchone()" + value = 7637 with test_env.DefaultsContextManager("fetch_decimals", True): - res = await self.conn.run_pipeline(pipeline) - self.assertEqual([res[-1].rows], [[(decimal.Decimal("1"),)]]) + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (:1)", [value] + ) + pipeline.add_fetchone("select IntCol from TestTempTable") + pipeline.add_fetchone( + "select IntCol from TestTempTable", fetch_decimals=False + ) + res = await self.conn.run_pipeline(pipeline) + self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) + self.assertTrue(isinstance(res[-1].rows[0][0], int)) async def test_7638(self): "7638 - test oracledb.defaults.arraysize" @@ -917,6 +921,92 @@ async def test_7644(self): (fetched_value,) = await self.cursor.fetchone() self.assertEqual(fetched_value, test_env.get_main_user().upper()) + async def test_7645(self): + "7645 - test fetch_decimals with add_fetchmany()" + value = 7645 + with test_env.DefaultsContextManager("fetch_decimals", True): + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (:1)", [value] + ) + pipeline.add_fetchmany("select IntCol from TestTempTable") + pipeline.add_fetchmany( + "select IntCol from TestTempTable", fetch_decimals=False + ) + res = await self.conn.run_pipeline(pipeline) + self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) + self.assertTrue(isinstance(res[-1].rows[0][0], int)) + + async def test_7646(self): + "7646 - test fetch_decimals with add_fetchall()" + value = 7646 + with test_env.DefaultsContextManager("fetch_decimals", True): + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (:1)", [value] + ) + pipeline.add_fetchall("select IntCol from TestTempTable") + pipeline.add_fetchall( + "select IntCol from TestTempTable", fetch_decimals=False + ) + res = await self.conn.run_pipeline(pipeline) + self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) + self.assertTrue(isinstance(res[-1].rows[0][0], int)) + + async def test_7647(self): + "7647 - test fetch_lobs with add_fetchmany()" + clob_1_value = "CLOB Data 7647 - One" + clob_2_value = "CLOB Data 7647 - Two" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", + [clob_1_value], + ) + clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + ) + with test_env.DefaultsContextManager("fetch_lobs", False): + pipeline.add_fetchmany( + "select CLOBCol from TestCLOBs order by IntCol", + ) + pipeline.add_fetchmany( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) + + res = await self.conn.run_pipeline(pipeline) + self.assertEqual([res[-1].rows], [[(clob_1_value,), (clob_2_value,)]]) + self.assertEqual([res[-2].rows], [[(clob_1_value,), (clob_2_value,)]]) + + async def test_7648(self): + "7648 - test fetch_lobs with add_fetchall()" + clob_1_value = "CLOB Data 7648 - One" + clob_2_value = "CLOB Data 7648 - Two" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", + [clob_1_value], + ) + clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + ) + with test_env.DefaultsContextManager("fetch_lobs", False): + pipeline.add_fetchall( + "select CLOBCol from TestCLOBs order by IntCol", + ) + pipeline.add_fetchall( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) + + res = await self.conn.run_pipeline(pipeline) + self.assertEqual([res[-1].rows], [[(clob_1_value,), (clob_2_value,)]]) + self.assertEqual([res[-2].rows], [[(clob_1_value,), (clob_2_value,)]]) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py index 8ea9703d..28a84de9 100644 --- a/tests/test_8700_sessionless_transaction.py +++ b/tests/test_8700_sessionless_transaction.py @@ -375,10 +375,11 @@ def test_8706(self): cursor = conn.cursor() conn.resume_sessionless_transaction(transaction_id) conn.commit() - with test_env.DefaultsContextManager("fetch_lobs", False): - cursor.execute("select ClobValue from TestAllTypes") - (result,) = cursor.fetchone() - self.assertEqual(result, large_string) + cursor.execute( + "select ClobValue from TestAllTypes", fetch_lobs=False + ) + (result,) = cursor.fetchone() + self.assertEqual(result, large_string) def test_8707(self): "8707 - test sessionless transaction with multiple suspends/resumes" diff --git a/tests/test_8800_sessionless_transaction_async.py b/tests/test_8800_sessionless_transaction_async.py index 2fa3a843..b88c4dec 100644 --- a/tests/test_8800_sessionless_transaction_async.py +++ b/tests/test_8800_sessionless_transaction_async.py @@ -396,10 +396,11 @@ async def test_8806(self): cursor = conn.cursor() await conn.resume_sessionless_transaction(transaction_id) await conn.commit() - with test_env.DefaultsContextManager("fetch_lobs", False): - await cursor.execute("select ClobValue from TestAllTypes") - (result,) = await cursor.fetchone() - self.assertEqual(result, large_string) + await cursor.execute( + "select ClobValue from TestAllTypes", fetch_lobs=False + ) + (result,) = await cursor.fetchone() + self.assertEqual(result, large_string) async def test_8807(self): "8807 - test sessionless transaction with multiple suspends/resumes" diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index 9c4b99aa..95428cb3 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -221,18 +221,18 @@ def test_8904(self): df, ) self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) @test_env.skip_unless_native_boolean_supported() def test_8905(self): @@ -543,18 +543,18 @@ def test_8914(self): df, ) self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) + odf = self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) def test_8915(self): "8915 - test various timestamp values" diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index f0170e7d..5179dd74 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -222,18 +222,18 @@ async def test_9004(self): df, ) await self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) @test_env.skip_unless_native_boolean_supported() async def test_9005(self): @@ -544,18 +544,18 @@ async def test_9014(self): df, ) await self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) + odf = await self.conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + self.assertTrue(fetched_df.equals(df)) async def test_9015(self): "9015 - test various timestamp values" diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 2aafc959..b8485f8f 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1069,6 +1069,8 @@ def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -1086,6 +1088,12 @@ def fetch_df_all( the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set to the value of the explicit or default ``arraysize`` parameter value. + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() @@ -1093,7 +1101,11 @@ def fetch_df_all( if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - cursor.execute(statement, parameters) + cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) return cursor._impl.fetch_df_all(cursor) def fetch_df_batches( @@ -1101,6 +1113,8 @@ def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -1120,6 +1134,12 @@ def fetch_df_batches( :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() @@ -1127,7 +1147,11 @@ def fetch_df_batches( if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize - cursor.execute(statement, parameters) + cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) if size is None: yield cursor._impl.fetch_df_all(cursor) else: @@ -1989,6 +2013,9 @@ async def fetchall( parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> list: """ Executes a query and returns all of the rows. @@ -2006,7 +2033,12 @@ async def fetchall( if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchall() @@ -2015,6 +2047,8 @@ async def fetch_df_all( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, arraysize: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -2031,13 +2065,23 @@ async def fetch_df_all( :attr:`oracledb.defaults.arraysize `. Internally, the ``fetch_df_all()``'s :attr:`Cursor.prefetchrows` size is always set to the value of the explicit or default ``arraysize`` parameter value. + + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. """ cursor = self.cursor() cursor._impl.fetching_arrow = True if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) return await cursor._impl.fetch_df_all(cursor) async def fetch_df_batches( @@ -2045,6 +2089,8 @@ async def fetch_df_batches( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, size: Optional[int] = None, + *, + fetch_decimals: Optional[bool] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -2063,13 +2109,23 @@ async def fetch_df_batches( the ``fetch_df_batches()``'s :attr:`Cursor.arraysize` and :attr:`Cursor.prefetchrows` sizes are always set to the value of the explicit or default ``size`` parameter value. + + The ``fetch_decimals`` parameter specifies whether to return + decimal values when fetching columns of type ``NUMBER`` that are + capable of being represented in Arrow Decimal128 format. The default + value is + :data:`oracledb.defaults.fetch_decimals `. """ cursor = self.cursor() cursor._impl.fetching_arrow = True if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_decimals=fetch_decimals, + ) if size is None: yield await cursor._impl.fetch_df_all(cursor) else: @@ -2082,6 +2138,9 @@ async def fetchmany( parameters: Optional[Union[list, tuple, dict]] = None, num_rows: Optional[int] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> list: """ Executes a query and returns up to the specified number of rows. @@ -2106,7 +2165,12 @@ async def fetchmany( elif num_rows <= 0: return [] cursor.arraysize = cursor.prefetchrows = num_rows - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchmany(num_rows) @@ -2115,6 +2179,9 @@ async def fetchone( statement: str, parameters: Optional[Union[list, tuple, dict]] = None, rowfactory: Optional[Callable] = None, + *, + fetch_lobs: Optional[bool] = None, + fetch_decimals: Optional[bool] = None, ) -> Any: """ Executes a query and returns the first row of the result set if one @@ -2133,7 +2200,12 @@ async def fetchone( """ with self.cursor() as cursor: cursor.prefetchrows = cursor.arraysize = 1 - await cursor.execute(statement, parameters) + await cursor.execute( + statement, + parameters, + fetch_lobs=fetch_lobs, + fetch_decimals=fetch_decimals, + ) cursor.rowfactory = rowfactory return await cursor.fetchone() From 3b10860568e794c43a44e07dd68bedb94475559c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 8 Aug 2025 20:40:55 -0600 Subject: [PATCH 180/239] Fixed bug when attempting to execute an empty statement (#525). --- doc/src/release_notes.rst | 5 +++++ src/oracledb/cursor.py | 20 +++++++++++++++++--- src/oracledb/errors.py | 2 ++ src/oracledb/impl/thin/cursor.pyx | 2 +- tests/test_3900_cursor_execute.py | 7 +++++++ tests/test_4000_cursor_executemany.py | 7 +++++++ tests/test_5400_cursor_execute_async.py | 7 +++++++ tests/test_6100_cursor_executemany_async.py | 7 +++++++ 8 files changed, 53 insertions(+), 4 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 42f93729..9bdffc62 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -22,6 +22,9 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Executed statements are normalized by removing leading and trailing spaces + before being sent to Oracle Database. + Common Changes ++++++++++++++ @@ -32,6 +35,8 @@ Common Changes :attr:`oracledb.defaults.fetch_lobs ` and :attr:`oracledb.defaults.fetch_decimals ` are now stored with the operation and used during pipeline execution. +#) Fixed bug when attempting to execute an empty statement + (`issue 525 `__). #) API documentation is now generated from the source code. diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index 2e18a1dc..f1d9aa9d 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -126,6 +126,17 @@ def _call_get_execute_args( statement = "".join(statement_parts) return (statement, bind_values) + def _normalize_statement(self, statement: Optional[str]) -> Optional[str]: + """ + Normalizes a statement by stripping leading and trailing spaces. If the + result is an empty string, an error is raised immediately. + """ + if statement is not None: + statement = statement.strip() + if not statement: + errors._raise_err(errors.ERR_EMPTY_STATEMENT) + return statement + def _prepare( self, statement: str, tag: str = None, cache_statement: bool = True ) -> None: @@ -142,7 +153,10 @@ def _prepare_for_execute( """ self._verify_open() self._impl._prepare_for_execute( - self, statement, parameters, keyword_parameters + self, + self._normalize_statement(statement), + parameters, + keyword_parameters, ) def _verify_fetch(self) -> None: @@ -891,7 +905,7 @@ def executemany( """ self._verify_open() num_execs = self._impl._prepare_for_executemany( - self, statement, parameters + self, self._normalize_statement(statement), parameters ) self._impl.suspend_on_success = suspend_on_success if num_execs > 0: @@ -1227,7 +1241,7 @@ async def executemany( """ self._verify_open() num_execs = self._impl._prepare_for_executemany( - self, statement, parameters + self, self._normalize_statement(statement), parameters ) self._impl.suspend_on_success = suspend_on_success if num_execs > 0: diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 16ee37dc..404b9cee 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -289,6 +289,7 @@ def _raise_not_supported(feature: str) -> None: ERR_SCROLL_OUT_OF_RESULT_SET = 2063 ERR_POOL_MAX_LESS_THAN_MIN = 2064 ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED = 2065 +ERR_EMPTY_STATEMENT = 2066 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -587,6 +588,7 @@ def _raise_not_supported(feature: str) -> None: ERR_DUPLICATED_PARAMETER: ( '"{deprecated_name}" and "{new_name}" cannot be specified together' ), + ERR_EMPTY_STATEMENT: ("an empty statement cannot be executed"), ERR_EXCEEDED_IDLE_TIME: ( "the database closed the connection because the connection's idle " "time has been exceeded" diff --git a/src/oracledb/impl/thin/cursor.pyx b/src/oracledb/impl/thin/cursor.pyx index 205a8dab..73697c9c 100644 --- a/src/oracledb/impl/thin/cursor.pyx +++ b/src/oracledb/impl/thin/cursor.pyx @@ -169,7 +169,7 @@ cdef class BaseThinCursorImpl(BaseCursorImpl): if self._statement is not None: self._conn_impl._return_statement(self._statement) self._statement = None - self._statement = self._conn_impl._get_statement(statement.strip(), + self._statement = self._conn_impl._get_statement(statement, cache_statement) self.fetch_metadata = self._statement._fetch_metadata self.fetch_vars = self._statement._fetch_vars diff --git a/tests/test_3900_cursor_execute.py b/tests/test_3900_cursor_execute.py index 494aecd6..0d18c4c9 100644 --- a/tests/test_3900_cursor_execute.py +++ b/tests/test_3900_cursor_execute.py @@ -562,6 +562,13 @@ def test_3935(self): with self.assertRaisesFullCode("ORA-01403"): self.cursor.execute("begin raise no_data_found; end;") + def test_3936(self): + "3936 - test executing an empty statement" + with self.assertRaisesFullCode("DPY-2066"): + self.cursor.execute("") + with self.assertRaisesFullCode("DPY-2066"): + self.cursor.execute(" ") + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_4000_cursor_executemany.py b/tests/test_4000_cursor_executemany.py index dba50e41..f9c1a69a 100644 --- a/tests/test_4000_cursor_executemany.py +++ b/tests/test_4000_cursor_executemany.py @@ -441,6 +441,13 @@ def test_4028(self): "4028 - test executemany with empty parameter set" self.cursor.executemany("insert into TestTempTable values (:1)", []) + def test_4029(self): + "4029 - test executemany with an empty statement" + with self.assertRaisesFullCode("DPY-2066"): + self.cursor.executemany("", 5) + with self.assertRaisesFullCode("DPY-2066"): + self.cursor.executemany(" ", 5) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_5400_cursor_execute_async.py b/tests/test_5400_cursor_execute_async.py index 556ed5bf..68dd18ac 100644 --- a/tests/test_5400_cursor_execute_async.py +++ b/tests/test_5400_cursor_execute_async.py @@ -593,6 +593,13 @@ async def test_5436(self): with self.assertRaisesFullCode("ORA-01403"): await self.cursor.execute("begin raise no_data_found; end;") + async def test_5437(self): + "5437 - test executing an empty statement" + with self.assertRaisesFullCode("DPY-2066"): + await self.cursor.execute("") + with self.assertRaisesFullCode("DPY-2066"): + await self.cursor.execute(" ") + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_6100_cursor_executemany_async.py b/tests/test_6100_cursor_executemany_async.py index 1fbea095..f55c8785 100644 --- a/tests/test_6100_cursor_executemany_async.py +++ b/tests/test_6100_cursor_executemany_async.py @@ -380,6 +380,13 @@ async def test_6124(self): sql = "insert into TestTempTable values (:1)" await self.cursor.executemany(sql, []) + async def test_6125(self): + "6125 - test executemany with an empty statement" + with self.assertRaisesFullCode("DPY-2066"): + await self.cursor.executemany("", 5) + with self.assertRaisesFullCode("DPY-2066"): + await self.cursor.executemany(" ", 5) + if __name__ == "__main__": test_env.run_test_cases() From f1b35e8d930b85ee6594386a521bc7c55e3a66f1 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 12 Aug 2025 20:56:49 -0600 Subject: [PATCH 181/239] Add typing_extensions as a dependency. --- doc/src/release_notes.rst | 1 + pyproject.toml | 5 ++++- src/oracledb/soda.py | 19 ++++++++++--------- 3 files changed, 15 insertions(+), 10 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 9bdffc62..02d2c2ce 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -38,6 +38,7 @@ Common Changes #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). #) API documentation is now generated from the source code. +#) Internal change: typing_extensions is now a dependency. oracledb `3.3.0 `__ (July 2025) diff --git a/pyproject.toml b/pyproject.toml index f7196a35..974792e9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,10 @@ classifiers = [ "Topic :: Database", ] requires-python = ">=3.9" -dependencies = ["cryptography>=3.2.1"] +dependencies = [ + "cryptography>=3.2.1", + "typing_extensions>=4.14.0", +] dynamic = ["version"] [project.readme] diff --git a/src/oracledb/soda.py b/src/oracledb/soda.py index eaa735d1..602bc3dc 100644 --- a/src/oracledb/soda.py +++ b/src/oracledb/soda.py @@ -30,6 +30,7 @@ # ----------------------------------------------------------------------------- from typing import Any, Optional, Union +from typing_extensions import Self import json from . import errors @@ -519,7 +520,7 @@ def count(self) -> int: """ return self._collection._impl.get_count(self) - def fetchArraySize(self, value: int) -> "SodaOperation": + def fetchArraySize(self, value: int) -> Self: """ This is a tuning method to specify the number of documents that are internally fetched in batches by calls to @@ -543,7 +544,7 @@ def fetchArraySize(self, value: int) -> "SodaOperation": self._fetch_array_size = value return self - def filter(self, value: Union[dict, str]) -> "SodaOperation": + def filter(self, value: Union[dict, str]) -> Self: """ Sets a filter specification for complex document queries and ordering of JSON documents. Filter specifications must be provided as a @@ -585,7 +586,7 @@ def getOne(self) -> Union["SodaDocument", None]: if doc_impl is not None: return SodaDocument._from_impl(doc_impl) - def hint(self, value: str) -> "SodaOperation": + def hint(self, value: str) -> Self: """ Specifies a hint that will be provided to the SODA operation when it is performed. This is expected to be a string in the same format as SQL @@ -603,7 +604,7 @@ def hint(self, value: str) -> "SodaOperation": self._hint = value return self - def lock(self) -> "SodaOperation": + def lock(self) -> Self: """ Specifies whether the documents fetched from the collection should be locked (equivalent to SQL "select for update"). Use of this method @@ -628,7 +629,7 @@ def lock(self) -> "SodaOperation": self._lock = True return self - def key(self, value: str) -> "SodaOperation": + def key(self, value: str) -> Self: """ Specifies that the document with the specified key should be returned. This causes any previous calls made to this method and @@ -643,7 +644,7 @@ def key(self, value: str) -> "SodaOperation": self._keys = None return self - def keys(self, value: list) -> "SodaOperation": + def keys(self, value: list) -> Self: """ Specifies that documents that match the keys found in the supplied sequence should be returned. This causes any previous calls made to @@ -660,7 +661,7 @@ def keys(self, value: list) -> "SodaOperation": self._key = None return self - def limit(self, value: int) -> "SodaOperation": + def limit(self, value: int) -> Self: """ Specifies that only the specified number of documents should be returned. This method is only usable for read operations such as @@ -711,7 +712,7 @@ def replaceOneAndGet(self, doc: Any) -> "SodaDocument": ) return SodaDocument._from_impl(return_doc_impl) - def skip(self, value: int) -> "SodaOperation": + def skip(self, value: int) -> Self: """ Specifies the number of documents that match the other criteria that will be skipped. This method is only usable for read operations such as @@ -727,7 +728,7 @@ def skip(self, value: int) -> "SodaOperation": self._skip = value return self - def version(self, value: str) -> "SodaOperation": + def version(self, value: str) -> Self: """ Specifies that documents with the specified version should be returned. Typically this is used with :meth:`~SodaOperation.key()` to implement From 2475d9b9ef89613d57d48d31bfb739e6aa6bc869 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 12 Aug 2025 20:57:11 -0600 Subject: [PATCH 182/239] Clarify helper function returnes desired mode and not current mode. --- samples/app_context.py | 4 +- samples/array_dml_rowcounts.py | 4 +- samples/batch_errors.py | 4 +- samples/bind_insert.py | 2 +- samples/bind_query.py | 4 +- samples/bulk_aq.py | 2 +- samples/call_timeout.py | 4 +- samples/connection_pool.py | 2 +- samples/dataframe_insert.py | 2 +- samples/dataframe_numpy.py | 2 +- samples/dataframe_pandas.py | 2 +- samples/dataframe_parquet_write.py | 2 +- samples/dataframe_polars.py | 2 +- samples/dataframe_pyarrow.py | 2 +- samples/dataframe_torch.py | 2 +- samples/dbms_output.py | 4 +- samples/dml_returning_multiple_rows.py | 4 +- samples/drcp_pool.py | 4 +- samples/editioning.py | 4 +- samples/generic_row_factory.py | 4 +- samples/implicit_results.py | 4 +- samples/insert_geometry.py | 4 +- samples/json_blob.py | 2 +- samples/json_direct.py | 2 +- samples/json_duality.py | 4 +- samples/last_rowid.py | 4 +- samples/load_csv.py | 4 +- samples/multi_consumer_aq.py | 2 +- samples/object_aq.py | 2 +- samples/object_dump.py | 4 +- samples/plsql_batch.py | 4 +- samples/plsql_collection.py | 4 +- samples/plsql_function.py | 4 +- samples/plsql_procedure.py | 4 +- samples/plsql_record.py | 4 +- samples/plsql_rowtype.py | 4 +- samples/query.py | 4 +- samples/query_arraysize.py | 4 +- samples/query_strings_as_bytes.py | 4 +- samples/raw_aq.py | 2 +- samples/ref_cursor.py | 4 +- samples/return_lobs_as_strings.py | 2 +- samples/return_numbers_as_decimals.py | 2 +- samples/rows_as_instance.py | 4 +- samples/sample_env.py | 14 +-- samples/scrollable_cursors.py | 2 +- samples/session_callback.py | 4 +- samples/sessionless_transactions.py | 2 +- samples/spatial_to_geopandas.py | 2 +- samples/subclassing.py | 4 +- samples/transaction_guard.py | 2 +- samples/type_handlers_json_strings.py | 4 +- samples/type_handlers_objects.py | 4 +- samples/universal_rowids.py | 4 +- samples/vector.py | 4 +- samples/vector_numpy.py | 4 +- samples/write_csv.py | 4 +- tests/ext/test_ext_1700_warnings_async.py | 7 +- tests/ext/test_ext_1800_inband_notif_async.py | 8 +- tests/ext/test_ext_2400_tg_async.py | 6 +- .../test_ext_2600_sessionless_transaction.py | 4 +- tests/test_1000_module.py | 2 +- tests/test_1100_connection.py | 4 +- tests/test_1600_dml_returning.py | 2 +- tests/test_2000_long_var.py | 6 +- tests/test_2400_pool.py | 12 +-- tests/test_2700_aq_dbobject.py | 13 +-- tests/test_2800_aq_bulk.py | 9 +- tests/test_2900_rowid.py | 7 +- tests/test_3600_outputtypehandler.py | 12 +-- tests/test_4600_type_changes.py | 88 ++++--------------- tests/test_5300_connection_async.py | 2 +- tests/test_8700_sessionless_transaction.py | 4 +- tests/test_env.py | 44 ++++------ 74 files changed, 167 insertions(+), 259 deletions(-) diff --git a/samples/app_context.py b/samples/app_context.py index 2f733bae..2a4f3883 100644 --- a/samples/app_context.py +++ b/samples/app_context.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -39,7 +39,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # client context attributes to be set diff --git a/samples/array_dml_rowcounts.py b/samples/array_dml_rowcounts.py index 7a497f01..bdfee97c 100644 --- a/samples/array_dml_rowcounts.py +++ b/samples/array_dml_rowcounts.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/batch_errors.py b/samples/batch_errors.py index 60e5a4bd..50155752 100644 --- a/samples/batch_errors.py +++ b/samples/batch_errors.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -37,7 +37,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/bind_insert.py b/samples/bind_insert.py index d3fea7b1..a6094125 100644 --- a/samples/bind_insert.py +++ b/samples/bind_insert.py @@ -32,7 +32,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/bind_query.py b/samples/bind_query.py index 9a114cab..a7e5e5bb 100644 --- a/samples/bind_query.py +++ b/samples/bind_query.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -37,7 +37,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/bulk_aq.py b/samples/bulk_aq.py index 8ea983b1..cbfffa90 100644 --- a/samples/bulk_aq.py +++ b/samples/bulk_aq.py @@ -54,7 +54,7 @@ ] # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # connect to database diff --git a/samples/call_timeout.py b/samples/call_timeout.py index 5fc59b1e..1bed24a9 100644 --- a/samples/call_timeout.py +++ b/samples/call_timeout.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2024, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/connection_pool.py b/samples/connection_pool.py index 6a7647ea..e14a5a24 100644 --- a/samples/connection_pool.py +++ b/samples/connection_pool.py @@ -82,7 +82,7 @@ POOL_ALIAS_NAME = "mypool" # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/dataframe_insert.py b/samples/dataframe_insert.py index 0a1e89d6..80c91ed4 100644 --- a/samples/dataframe_insert.py +++ b/samples/dataframe_insert.py @@ -37,7 +37,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/dataframe_numpy.py b/samples/dataframe_numpy.py index ad5f9bad..5ed8ed35 100644 --- a/samples/dataframe_numpy.py +++ b/samples/dataframe_numpy.py @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dataframe_pandas.py b/samples/dataframe_pandas.py index 49c13be0..f86ea06f 100644 --- a/samples/dataframe_pandas.py +++ b/samples/dataframe_pandas.py @@ -40,7 +40,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dataframe_parquet_write.py b/samples/dataframe_parquet_write.py index 7a023859..fcb65784 100644 --- a/samples/dataframe_parquet_write.py +++ b/samples/dataframe_parquet_write.py @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dataframe_polars.py b/samples/dataframe_polars.py index 5d1aed22..52d76610 100644 --- a/samples/dataframe_polars.py +++ b/samples/dataframe_polars.py @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dataframe_pyarrow.py b/samples/dataframe_pyarrow.py index 1cc56dab..dc01531a 100644 --- a/samples/dataframe_pyarrow.py +++ b/samples/dataframe_pyarrow.py @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dataframe_torch.py b/samples/dataframe_torch.py index de2d0113..ed006c0e 100644 --- a/samples/dataframe_torch.py +++ b/samples/dataframe_torch.py @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dbms_output.py b/samples/dbms_output.py index 0366af84..3c0e8098 100644 --- a/samples/dbms_output.py +++ b/samples/dbms_output.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,7 +33,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/dml_returning_multiple_rows.py b/samples/dml_returning_multiple_rows.py index 7f7765f4..cc19884e 100644 --- a/samples/dml_returning_multiple_rows.py +++ b/samples/dml_returning_multiple_rows.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2024, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/drcp_pool.py b/samples/drcp_pool.py index 2401021e..eb7c89fc 100644 --- a/samples/drcp_pool.py +++ b/samples/drcp_pool.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -118,7 +118,7 @@ port = int(os.environ.get("PORT", "8080")) # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # ----------------------------------------------------------------------------- diff --git a/samples/editioning.py b/samples/editioning.py index c6d579d2..8b75e79f 100644 --- a/samples/editioning.py +++ b/samples/editioning.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -42,7 +42,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # connect to the editions user and create a procedure diff --git a/samples/generic_row_factory.py b/samples/generic_row_factory.py index b7821828..8b60f509 100644 --- a/samples/generic_row_factory.py +++ b/samples/generic_row_factory.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/implicit_results.py b/samples/implicit_results.py index 99e7ca6b..ff688377 100644 --- a/samples/implicit_results.py +++ b/samples/implicit_results.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -39,7 +39,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/insert_geometry.py b/samples/insert_geometry.py index bf6f5699..6946b42f 100644 --- a/samples/insert_geometry.py +++ b/samples/insert_geometry.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/json_blob.py b/samples/json_blob.py index a5189158..12694e18 100644 --- a/samples/json_blob.py +++ b/samples/json_blob.py @@ -43,7 +43,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/json_direct.py b/samples/json_direct.py index a6302307..9df9eb16 100644 --- a/samples/json_direct.py +++ b/samples/json_direct.py @@ -40,7 +40,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/json_duality.py b/samples/json_duality.py index cc0829da..95b4314c 100644 --- a/samples/json_duality.py +++ b/samples/json_duality.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -39,7 +39,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/last_rowid.py b/samples/last_rowid.py index bbe3583b..f536a795 100644 --- a/samples/last_rowid.py +++ b/samples/last_rowid.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2024, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/load_csv.py b/samples/load_csv.py index 49d28e10..397f278f 100644 --- a/samples/load_csv.py +++ b/samples/load_csv.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # CSV file. This sample file has both valid rows and some rows with data too diff --git a/samples/multi_consumer_aq.py b/samples/multi_consumer_aq.py index 4fe62fa6..b4e91289 100644 --- a/samples/multi_consumer_aq.py +++ b/samples/multi_consumer_aq.py @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) QUEUE_NAME = "DEMO_RAW_QUEUE_MULTI" diff --git a/samples/object_aq.py b/samples/object_aq.py index 233ba012..1fb8adcf 100644 --- a/samples/object_aq.py +++ b/samples/object_aq.py @@ -40,7 +40,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) BOOK_TYPE_NAME = "UDT_BOOK" diff --git a/samples/object_dump.py b/samples/object_dump.py index b6ce6412..786823e6 100644 --- a/samples/object_dump.py +++ b/samples/object_dump.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,7 +33,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # Create Oracle connection and cursor objects diff --git a/samples/plsql_batch.py b/samples/plsql_batch.py index 6cb0e8de..b5381ca3 100644 --- a/samples/plsql_batch.py +++ b/samples/plsql_batch.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/plsql_collection.py b/samples/plsql_collection.py index 009f581c..2049277f 100644 --- a/samples/plsql_collection.py +++ b/samples/plsql_collection.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/plsql_function.py b/samples/plsql_function.py index 5d75a743..fde6c2f2 100644 --- a/samples/plsql_function.py +++ b/samples/plsql_function.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/plsql_procedure.py b/samples/plsql_procedure.py index 3a30879f..8f812c47 100644 --- a/samples/plsql_procedure.py +++ b/samples/plsql_procedure.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -33,7 +33,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/plsql_record.py b/samples/plsql_record.py index e6962230..8aff6aea 100644 --- a/samples/plsql_record.py +++ b/samples/plsql_record.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/plsql_rowtype.py b/samples/plsql_rowtype.py index d7d38538..4b046907 100644 --- a/samples/plsql_rowtype.py +++ b/samples/plsql_rowtype.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/query.py b/samples/query.py index 283e0b1b..5048b0ca 100644 --- a/samples/query.py +++ b/samples/query.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -32,7 +32,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/query_arraysize.py b/samples/query_arraysize.py index 9ef713bb..0b25b85a 100644 --- a/samples/query_arraysize.py +++ b/samples/query_arraysize.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -41,7 +41,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/query_strings_as_bytes.py b/samples/query_strings_as_bytes.py index fa2115bd..b1c47b7d 100644 --- a/samples/query_strings_as_bytes.py +++ b/samples/query_strings_as_bytes.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,7 +34,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) STRING_VAL = "I bought a cafetière on the Champs-Élysées" diff --git a/samples/raw_aq.py b/samples/raw_aq.py index 67c96557..674557be 100644 --- a/samples/raw_aq.py +++ b/samples/raw_aq.py @@ -38,7 +38,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) QUEUE_NAME = "DEMO_RAW_QUEUE" diff --git a/samples/ref_cursor.py b/samples/ref_cursor.py index 537fae6b..e92f47fc 100644 --- a/samples/ref_cursor.py +++ b/samples/ref_cursor.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2018, 2024, Oracle and/or its affiliates. +# Copyright (c) 2018, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,7 +34,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/return_lobs_as_strings.py b/samples/return_lobs_as_strings.py index e50cb7b4..ac71d42e 100644 --- a/samples/return_lobs_as_strings.py +++ b/samples/return_lobs_as_strings.py @@ -41,7 +41,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # A global indicating that LOB columns should be fetched as str or bytes, not diff --git a/samples/return_numbers_as_decimals.py b/samples/return_numbers_as_decimals.py index 6b8708a1..3eed1afb 100644 --- a/samples/return_numbers_as_decimals.py +++ b/samples/return_numbers_as_decimals.py @@ -40,7 +40,7 @@ oracledb.defaults.fetch_decimals = True # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/rows_as_instance.py b/samples/rows_as_instance.py index c17150c7..f595f85f 100644 --- a/samples/rows_as_instance.py +++ b/samples/rows_as_instance.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -39,7 +39,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/sample_env.py b/samples/sample_env.py index 062f0dd7..6d757eba 100644 --- a/samples/sample_env.py +++ b/samples/sample_env.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2024, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -193,10 +193,6 @@ def get_driver_mode(): ) -def get_is_thin(): - return get_driver_mode() == "thin" - - def get_edition_connect_string(): return "%s/%s@%s" % ( get_edition_user(), @@ -247,17 +243,21 @@ def get_server_version(): def get_wallet_location(): - if get_is_thin(): + if not run_in_thick_mode(): return get_value("PYO_SAMPLES_WALLET_LOCATION", "Wallet Location") def get_wallet_password(): - if get_is_thin() and get_wallet_location(): + if not run_in_thick_mode() and get_wallet_location(): return get_value( "PYO_SAMPLES_WALLET_PASSWORD", "Wallet Password", password=True ) +def run_in_thick_mode(): + return get_driver_mode() != "thin" + + def run_sql_script(conn, script_name, **kwargs): statement_parts = [] cursor = conn.cursor() diff --git a/samples/scrollable_cursors.py b/samples/scrollable_cursors.py index a1341dd3..59447b0a 100644 --- a/samples/scrollable_cursors.py +++ b/samples/scrollable_cursors.py @@ -39,7 +39,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/session_callback.py b/samples/session_callback.py index 26110592..4d7dfb3b 100644 --- a/samples/session_callback.py +++ b/samples/session_callback.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2019, 2024, Oracle and/or its affiliates. +# Copyright (c) 2019, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -68,7 +68,7 @@ PORT = int(os.environ.get("PORT", "8080")) # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # ----------------------------------------------------------------------------- diff --git a/samples/sessionless_transactions.py b/samples/sessionless_transactions.py index c0b31b07..c2d55f0e 100644 --- a/samples/sessionless_transactions.py +++ b/samples/sessionless_transactions.py @@ -34,7 +34,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # this script only works with Oracle Database 23.6 or later diff --git a/samples/spatial_to_geopandas.py b/samples/spatial_to_geopandas.py index 8d9e3efa..c3cf17e0 100644 --- a/samples/spatial_to_geopandas.py +++ b/samples/spatial_to_geopandas.py @@ -50,7 +50,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # create Oracle connection and cursor objects diff --git a/samples/subclassing.py b/samples/subclassing.py index a1ca8aca..809eec31 100644 --- a/samples/subclassing.py +++ b/samples/subclassing.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,7 +34,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/transaction_guard.py b/samples/transaction_guard.py index 5a9c286a..99eac1a5 100644 --- a/samples/transaction_guard.py +++ b/samples/transaction_guard.py @@ -58,7 +58,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # constants diff --git a/samples/type_handlers_json_strings.py b/samples/type_handlers_json_strings.py index a523cd2e..f4d7e25d 100644 --- a/samples/type_handlers_json_strings.py +++ b/samples/type_handlers_json_strings.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -40,7 +40,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) diff --git a/samples/type_handlers_objects.py b/samples/type_handlers_objects.py index 903e011e..a3321f62 100644 --- a/samples/type_handlers_objects.py +++ b/samples/type_handlers_objects.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2016, 2024, Oracle and/or its affiliates. +# Copyright (c) 2016, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -42,7 +42,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/universal_rowids.py b/samples/universal_rowids.py index 822b0076..1f36dc04 100644 --- a/samples/universal_rowids.py +++ b/samples/universal_rowids.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2017, 2024, Oracle and/or its affiliates. +# Copyright (c) 2017, 2025, Oracle and/or its affiliates. # # Portions Copyright 2007-2015, Anthony Tuininga. All rights reserved. # @@ -41,7 +41,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) DATA = [ diff --git a/samples/vector.py b/samples/vector.py index eb38af06..f5066acc 100644 --- a/samples/vector.py +++ b/samples/vector.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024 Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,7 +35,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/vector_numpy.py b/samples/vector_numpy.py index e16a2f2d..f74c6a34 100644 --- a/samples/vector_numpy.py +++ b/samples/vector_numpy.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -36,7 +36,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) connection = oracledb.connect( diff --git a/samples/write_csv.py b/samples/write_csv.py index dcefcb1d..c74412c4 100644 --- a/samples/write_csv.py +++ b/samples/write_csv.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -34,7 +34,7 @@ import sample_env # determine whether to use python-oracledb thin mode or thick mode -if not sample_env.get_is_thin(): +if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) # CSV file to create diff --git a/tests/ext/test_ext_1700_warnings_async.py b/tests/ext/test_ext_1700_warnings_async.py index 921da72b..66a728e9 100644 --- a/tests/ext/test_ext_1700_warnings_async.py +++ b/tests/ext/test_ext_1700_warnings_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,15 +29,12 @@ """ import asyncio -import unittest import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): profile_name = "profile_priv_test_1700" user_name = "user_priv_test_1700" diff --git a/tests/ext/test_ext_1800_inband_notif_async.py b/tests/ext/test_ext_1800_inband_notif_async.py index 096d6607..ecc7b1dd 100644 --- a/tests/ext/test_ext_1800_inband_notif_async.py +++ b/tests/ext/test_ext_1800_inband_notif_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -35,14 +35,10 @@ # is required. # ----------------------------------------------------------------------------- -import unittest - import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): requires_connection = False setup_completed = False diff --git a/tests/ext/test_ext_2400_tg_async.py b/tests/ext/test_ext_2400_tg_async.py index 3a3e25a9..5ec39454 100644 --- a/tests/ext/test_ext_2400_tg_async.py +++ b/tests/ext/test_ext_2400_tg_async.py @@ -28,15 +28,11 @@ not intended for normal use. It also creates and drops a service. """ -import unittest - import oracledb import test_env -@unittest.skipUnless( - test_env.get_is_thin(), "asyncio not supported in thick mode" -) +@test_env.skip_unless_thin_mode() class TestCase(test_env.BaseAsyncTestCase): service_name = "oracledb-test-tg-async" requires_connection = False diff --git a/tests/ext/test_ext_2600_sessionless_transaction.py b/tests/ext/test_ext_2600_sessionless_transaction.py index 33d64c21..b1aaa9a0 100644 --- a/tests/ext/test_ext_2600_sessionless_transaction.py +++ b/tests/ext/test_ext_2600_sessionless_transaction.py @@ -47,7 +47,7 @@ def test_ext_2600(self): # suspending a non-existent transaction will fail only in thin # mode - if test_env.get_is_thin(): + if conn.thin: with self.assertRaisesFullCode("DPY-3036"): conn.suspend_sessionless_transaction() @@ -58,7 +58,7 @@ def test_ext_2600(self): # starting another sessionless transaction will fail only in thin # mode - if test_env.get_is_thin(): + if conn.thin: with self.assertRaisesFullCode("DPY-3035"): conn.begin_sessionless_transaction( transaction_id=other_transaction_id, timeout=5 diff --git a/tests/test_1000_module.py b/tests/test_1000_module.py index 7ab55473..dfa8596d 100644 --- a/tests/test_1000_module.py +++ b/tests/test_1000_module.py @@ -251,7 +251,7 @@ def test_1008(self): def test_1009(self): "1009 - test enable_thin_mode()" - if test_env.get_is_thin(): + if not test_env.run_in_thick_mode(): oracledb.enable_thin_mode() with self.assertRaisesFullCode("DPY-2019"): oracledb.init_oracle_client() diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index acf13bb7..8924eeea 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -81,7 +81,7 @@ def test_1100(self): self.assertEqual( conn.dsn, test_env.get_connect_string(), "dsn differs" ) - self.assertEqual(conn.thin, test_env.get_is_thin()) + self.assertEqual(conn.thin, not test_env.run_in_thick_mode()) @test_env.skip_if_drcp() def test_1101(self): @@ -132,7 +132,7 @@ def test_1103(self): conn, "client_identifier", "oracledb_cid", sql ) self.__verify_attributes(conn, "client_identifier", None, sql) - if not test_env.get_is_thin(): + if not conn.thin: sql = """select ecid from v$session where sid = sys_context('userenv', 'sid')""" self.__verify_attributes(conn, "econtext_id", "oracledb_ecid", sql) diff --git a/tests/test_1600_dml_returning.py b/tests/test_1600_dml_returning.py index f9bd20c2..0a1e2c6d 100644 --- a/tests/test_1600_dml_returning.py +++ b/tests/test_1600_dml_returning.py @@ -582,7 +582,7 @@ def test_1625(self): ret_lob_data.sort() self.assertEqual(ret_lob_data, lob_data) - @unittest.skipUnless(test_env.get_is_thin(), "blocked by bug 37741324") + @unittest.skipIf(test_env.run_in_thick_mode(), "blocked by bug 37741324") def test_1626(self): "1626 - test DML returning with multiple DbObjects returned" arrays = [ diff --git a/tests/test_2000_long_var.py b/tests/test_2000_long_var.py index 1eab87b1..612c64f5 100644 --- a/tests/test_2000_long_var.py +++ b/tests/test_2000_long_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,8 +26,6 @@ 2000 - Module for testing long and long raw variables """ -import unittest - import oracledb import test_env @@ -121,7 +119,7 @@ def test_2004(self): ] self.assertEqual(self.cursor.description, expected_value) - @unittest.skipIf(test_env.get_is_thin(), "not relevant for thin mode") + @test_env.skip_unless_thick_mode() def test_2005(self): "2005 - test array size too large generates an exception" self.cursor.arraysize = 268435456 diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index 2496fd95..c2244def 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -181,18 +181,18 @@ def test_2400(self): self.assertEqual(pool.max, 2) if test_env.has_client_version(12, 1): self.assertEqual(pool.max_lifetime_session, 0) - if not test_env.get_is_thin() and test_env.has_client_version(18, 3): + if not pool.thin and test_env.has_client_version(18, 3): self.assertEqual(pool.max_sessions_per_shard, 0) self.assertEqual(pool.min, 1) - if test_env.get_is_thin(): + if pool.thin: self.assertIsNone(pool.name) else: self.assertRegex(pool.name, "^OCI:SP:.+") self.assertEqual(pool.ping_interval, 60) self.assertEqual(pool.stmtcachesize, oracledb.defaults.stmtcachesize) - if not test_env.get_is_thin() and test_env.has_client_version(19, 11): + if not pool.thin and test_env.has_client_version(19, 11): self.assertFalse(pool.soda_metadata_cache) - self.assertEqual(pool.thin, test_env.get_is_thin()) + self.assertEqual(pool.thin, not test_env.run_in_thick_mode()) self.assertEqual(pool.timeout, 0) self.assertEqual(pool.username, test_env.get_main_user()) @@ -241,13 +241,13 @@ def test_2402(self): TypeError, setattr, pool, attr_name, "invalid value" ) - if not test_env.get_is_thin() and test_env.has_client_version(18, 3): + if not pool.thin and test_env.has_client_version(18, 3): self.assertEqual(pool.max_sessions_per_shard, 0) self.assertRaises( TypeError, setattr, pool, "max_sessions_per_shard", "bad_val" ) - if not test_env.get_is_thin() and test_env.has_client_version(19, 11): + if not pool.thin and test_env.has_client_version(19, 11): pool.soda_metadata_cache = True self.assertTrue(pool.soda_metadata_cache) self.assertRaises( diff --git a/tests/test_2700_aq_dbobject.py b/tests/test_2700_aq_dbobject.py index f2899327..e35a69b4 100644 --- a/tests/test_2700_aq_dbobject.py +++ b/tests/test_2700_aq_dbobject.py @@ -28,7 +28,6 @@ import decimal import threading -import unittest import oracledb import test_env @@ -319,9 +318,7 @@ def test_2713(self): props = queue.deqone() self.assertIsNone(props) - @unittest.skipIf( - test_env.get_is_thin(), "Thin mode doesn't support transformation yet" - ) + @test_env.skip_unless_thick_mode() def test_2714(self): "2714 - test dequeue transformation" queue = self.get_and_clear_queue( @@ -346,9 +343,7 @@ def test_2714(self): props = queue.deqone() self.assertEqual(props.payload.PRICE, expected_price) - @unittest.skipIf( - test_env.get_is_thin(), "Thin mode doesn't support transformation yet" - ) + @test_env.skip_unless_thick_mode() def test_2715(self): "2715 - test enqueue transformation" queue = self.get_and_clear_queue( @@ -436,9 +431,7 @@ def test_2719(self): props1 = queue.deqone() self.assertIsNone(props1) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support notification yet" - ) + @test_env.skip_unless_thick_mode() def test_2720(self): "2720 - verify attributes of AQ message which spawned notification" if self.is_on_oracle_cloud(self.conn): diff --git a/tests/test_2800_aq_bulk.py b/tests/test_2800_aq_bulk.py index a5bdaced..643f2778 100644 --- a/tests/test_2800_aq_bulk.py +++ b/tests/test_2800_aq_bulk.py @@ -28,7 +28,6 @@ import datetime import threading -import unittest import oracledb import test_env @@ -97,9 +96,7 @@ def test_2801(self): self.conn.commit() self.assertEqual(messages, []) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" - ) + @test_env.skip_unless_thick_mode() def test_2802(self): "2802 - test bulk dequeue with wait" queue = self.get_and_clear_queue(RAW_QUEUE_NAME) @@ -133,9 +130,7 @@ def test_2803(self): self.conn.commit() self.assertEqual(all_data, RAW_PAYLOAD_DATA) - @unittest.skipIf( - test_env.get_is_thin(), "thin mode doesn't support enq immediate yet" - ) + @test_env.skip_unless_thick_mode() def test_2804(self): "2804 - test visibility option for enqueue and dequeue" queue = self.get_and_clear_queue(RAW_QUEUE_NAME) diff --git a/tests/test_2900_rowid.py b/tests/test_2900_rowid.py index d8cd44a5..676251ef 100644 --- a/tests/test_2900_rowid.py +++ b/tests/test_2900_rowid.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,6 @@ """ import datetime -import unittest import oracledb import test_env @@ -161,9 +160,7 @@ def test_2905(self): ) self.assertEqual(self.cursor.fetchone(), (3, "String #3")) - @unittest.skipIf( - not test_env.get_is_thin(), "thick mode doesn't support DB_TYPE_UROWID" - ) + @test_env.skip_unless_thin_mode() def test_2906(self): "2906 - binding and inserting a rowid as urowid" self.cursor.execute("truncate table TestRowids") diff --git a/tests/test_3600_outputtypehandler.py b/tests/test_3600_outputtypehandler.py index 2622c30c..127575d7 100644 --- a/tests/test_3600_outputtypehandler.py +++ b/tests/test_3600_outputtypehandler.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -207,7 +207,7 @@ def test_3616(self): in_val = datetime.timedelta( days=-1, seconds=86314, microseconds=431152 ) - if test_env.get_is_thin(): + if self.conn.thin: out_val = str(in_val) else: out_val = "-000000001 23:58:34.431152000" @@ -223,7 +223,7 @@ def test_3617(self): in_val = datetime.timedelta( days=-1, seconds=86314, microseconds=431152 ) - if test_env.get_is_thin(): + if self.conn.thin: out_val = str(in_val) else: out_val = "-000000001 23:58:34.431152000" @@ -239,7 +239,7 @@ def test_3618(self): in_val = datetime.timedelta( days=-1, seconds=86314, microseconds=431152 ) - if test_env.get_is_thin(): + if self.conn.thin: out_val = str(in_val) else: out_val = "-000000001 23:58:34.431152000" @@ -709,7 +709,7 @@ def type_handler_2(cursor, metadata): def test_3677(self): "3677 - output type handler: from BINARY_DOUBLE to VARCHAR" - str_value = "36.75" if test_env.get_is_thin() else "3.675E+001" + str_value = "36.75" if self.conn.thin else "3.675E+001" self.__test_type_handler( oracledb.DB_TYPE_BINARY_DOUBLE, oracledb.DB_TYPE_VARCHAR, @@ -719,7 +719,7 @@ def test_3677(self): def test_3678(self): "3678 - output type handler: from BINARY_FLOAT to VARCHAR" - str_value = "16.25" if test_env.get_is_thin() else "1.625E+001" + str_value = "16.25" if self.conn.thin else "1.625E+001" self.__test_type_handler( oracledb.DB_TYPE_BINARY_FLOAT, oracledb.DB_TYPE_VARCHAR, diff --git a/tests/test_4600_type_changes.py b/tests/test_4600_type_changes.py index a11882c1..8f86cfe6 100644 --- a/tests/test_4600_type_changes.py +++ b/tests/test_4600_type_changes.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,7 +27,6 @@ """ import datetime -import unittest import oracledb import test_env @@ -69,10 +68,7 @@ def __test_type_change( finally: self.conn.outputtypehandler = orig_type_handler - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4600(self): "4600 - test data type changing from VARCHAR to CLOB" self.__test_type_change( @@ -82,10 +78,7 @@ def test_4600(self): "clob_4600", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4601(self): "4601 - test data type changing from CHAR to CLOB" self.__test_type_change( @@ -95,10 +88,7 @@ def test_4601(self): "clob_4601", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4602(self): "4602 - test data type changing from LONG to CLOB" self.cursor.execute("truncate table TestLongs") @@ -111,10 +101,7 @@ def test_4602(self): "TestLongs", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4603(self): "4603 - test data type changing from NVARCHAR to CLOB" self.__test_type_change( @@ -124,10 +111,7 @@ def test_4603(self): "clob_4603", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4604(self): "4604 - test data type changing from NCHAR to CLOB" self.__test_type_change( @@ -137,10 +121,7 @@ def test_4604(self): "clob_4604", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4605(self): "4605 - test data type changing from RAW to BLOB" self.__test_type_change( @@ -150,10 +131,7 @@ def test_4605(self): b"blob_4605", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4606(self): "4606 - test data type changing from LONGRAW to BLOB" self.cursor.execute("truncate table TestLongRaws") @@ -167,10 +145,7 @@ def test_4606(self): "TestLongRaws", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4607(self): "4607 - test data type changing from VARCHAR to NCLOB" self.__test_type_change( @@ -180,10 +155,7 @@ def test_4607(self): "nclob_4607", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4608(self): "4608 - test data type changing from CHAR to NCLOB" self.__test_type_change( @@ -193,10 +165,7 @@ def test_4608(self): "nclob_4608", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4609(self): "4609 - test data type changing from LONG to NCLOB" self.cursor.execute("truncate table TestLongs") @@ -209,10 +178,7 @@ def test_4609(self): "TestLongs", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4610(self): "4610 - test data type changing from NVARCHAR to NCLOB" self.__test_type_change( @@ -222,10 +188,7 @@ def test_4610(self): "nclob_4610", ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4611(self): "4611 - test data type changing from NCHAR to NCLOB" self.__test_type_change( @@ -280,10 +243,7 @@ def test_4616(self): datetime.datetime(2022, 1, 5, 0, 0), ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4617(self): "4617 - test data type changing from CLOB to VARCHAR" @@ -303,10 +263,7 @@ def type_handler(cursor, metadata): type_handler=type_handler, ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4618(self): "4618 - test data type changing from NCLOB to NVARCHAR" @@ -326,10 +283,7 @@ def type_handler(cursor, metadata): type_handler=type_handler, ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4619(self): "4619 - test data type changing from CLOB to NVARCHAR" @@ -349,10 +303,7 @@ def type_handler(cursor, metadata): type_handler=type_handler, ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4620(self): "4620 - test data type changing from BLOB to RAW" @@ -372,10 +323,7 @@ def type_handler(cursor, metadata): type_handler=type_handler, ) - @unittest.skipIf( - not test_env.get_is_thin(), - "thick mode doesn't support this type change", - ) + @test_env.skip_unless_thin_mode() def test_4621(self): "4621 - test data type changing from NVARCHAR to CLOB" self.__test_type_change( diff --git a/tests/test_5300_connection_async.py b/tests/test_5300_connection_async.py index c8703400..d781b31d 100644 --- a/tests/test_5300_connection_async.py +++ b/tests/test_5300_connection_async.py @@ -83,7 +83,7 @@ async def test_5300(self): self.assertEqual( conn.dsn, test_env.get_connect_string(), "dsn differs" ) - self.assertEqual(conn.thin, test_env.get_is_thin()) + self.assertTrue(conn.thin) async def test_5303(self): "5303 - test connection end-to-end tracing attributes" diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py index 28a84de9..4759efa1 100644 --- a/tests/test_8700_sessionless_transaction.py +++ b/tests/test_8700_sessionless_transaction.py @@ -447,12 +447,12 @@ def test_8708(self): transaction_id = self.conn.begin_sessionless_transaction() # try to resume with the wrong transaction id - if test_env.get_is_thin(): + if self.conn.thin: with self.assertRaisesFullCode("DPY-3035"): self.conn.resume_sessionless_transaction("wrong_id") # try to resume before suspend - if test_env.get_is_thin(): + if self.conn.thin: with self.assertRaisesFullCode("DPY-3035"): self.conn.resume_sessionless_transaction(transaction_id) diff --git a/tests/test_env.py b/tests/test_env.py index 6d610110..51193f93 100644 --- a/tests/test_env.py +++ b/tests/test_env.py @@ -101,7 +101,7 @@ def _initialize(): """ if PARAMETERS.get("INITIALIZED"): return - if not get_is_thin() and oracledb.is_thin_mode(): + if run_in_thick_mode() and oracledb.is_thin_mode(): oracledb.init_oracle_client(lib_dir=get_oracle_client()) oracledb.defaults.thick_mode_dsn_passthrough = False plugin_names = os.environ.get("PYO_TEST_PLUGINS") @@ -311,11 +311,6 @@ def get_is_implicit_pooling(): return value -def get_is_thin(): - driver_mode = get_value("DRIVER_MODE", "Driver mode (thin|thick)", "thin") - return driver_mode == "thin" - - def get_main_password(): return get_value( "MAIN_PASSWORD", f"Password for {get_main_user()}", password=True @@ -384,17 +379,17 @@ async def get_server_version_async(): def get_wallet_location(): - if get_is_thin(): + if not run_in_thick_mode(): return get_value("WALLET_LOCATION", "Wallet Location") def get_wallet_password(): - if get_is_thin(): + if not run_in_thick_mode(): return get_value("WALLET_PASSWORD", "Wallet Password", password=True) def get_external_user(): - if not get_is_thin(): + if run_in_thick_mode(): return get_value("EXTERNAL_USER", "External User") @@ -403,7 +398,7 @@ def get_random_string(length=10): def has_client_version(major_version, minor_version=0): - if get_is_thin(): + if not run_in_thick_mode(): return True return get_client_version() >= (major_version, minor_version) @@ -445,6 +440,11 @@ async def is_on_oracle_cloud_async(connection): return service_name is not None +def run_in_thick_mode(): + driver_mode = get_value("DRIVER_MODE", "Driver mode (thin|thick)", "thin") + return driver_mode != "thin" + + def run_sql_script(conn, script_name, **kwargs): statement_parts = [] cursor = conn.cursor() @@ -486,12 +486,12 @@ def run_sql_script(conn, script_name, **kwargs): def run_test_cases(): - get_is_thin() + run_in_thick_mode() unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) def skip_soda_tests(): - if get_is_thin(): + if not run_in_thick_mode(): return True if not has_client_version(18, 3): return True @@ -577,11 +577,11 @@ def skip_unless_sparse_vectors_supported(): def skip_unless_thick_mode(): - return unittest.skipIf(get_is_thin(), "requires thick mode") + return unittest.skipUnless(run_in_thick_mode(), "requires thick mode") def skip_unless_thin_mode(): - return unittest.skipUnless(get_is_thin(), "requires thin mode") + return unittest.skipIf(run_in_thick_mode(), "requires thin mode") def skip_unless_vectors_supported(): @@ -746,7 +746,7 @@ def get_sid_serial(self, conn=None): """ if conn is None: conn = self.conn - if get_is_thin(): + if not run_in_thick_mode(): return (conn.session_id, conn.serial_num) else: with conn.cursor() as cursor: @@ -876,19 +876,7 @@ async def get_sid_serial(self, conn=None): """ if conn is None: conn = self.conn - if get_is_thin(): - return (conn.session_id, conn.serial_num) - else: - with conn.cursor() as cursor: - await cursor.execute( - """ - select - dbms_debug_jdwp.current_session_id, - dbms_debug_jdwp.current_session_serial - from dual - """ - ) - return await cursor.fetchone() + return (conn.session_id, conn.serial_num) async def is_on_oracle_cloud(self, connection=None): if connection is None: From 50b32ac256571051e96575049605cae78002e551 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Tue, 12 Aug 2025 20:57:47 -0600 Subject: [PATCH 183/239] Documentation improvements. --- doc/src/api_manual/defaults.rst | 67 +-- doc/src/api_manual/module.rst | 105 +++-- doc/src/api_manual/variable.rst | 2 +- doc/src/user_guide/connection_handling.rst | 8 +- doc/src/user_guide/initialization.rst | 20 + src/oracledb/connect_params.py | 467 ++++++++++--------- src/oracledb/connection.py | 462 ++++++++++--------- src/oracledb/defaults.py | 90 +++- src/oracledb/pool.py | 499 +++++++++++---------- src/oracledb/pool_params.py | 462 ++++++++++--------- utils/build_from_template.py | 11 +- utils/fields.cfg | 181 ++++---- utils/templates/connection.py | 92 ++-- utils/templates/pool.py | 55 +-- 14 files changed, 1387 insertions(+), 1134 deletions(-) diff --git a/doc/src/api_manual/defaults.rst b/doc/src/api_manual/defaults.rst index 04043ec7..20e56603 100644 --- a/doc/src/api_manual/defaults.rst +++ b/doc/src/api_manual/defaults.rst @@ -11,37 +11,19 @@ Defaults Class .. autoclass:: Defaults - A Defaults object contains attributes that can be used to adjust the - behavior of the python-oracledb driver. + See :ref:`settingdefaults`. -An example of changing a default value is: - -.. code-block:: python - - import oracledb - - oracledb.defaults.fetch_lobs = False # return LOBs directly as strings or bytes +.. _defaultsattributes: Defaults Attributes =================== .. autoproperty:: Defaults.arraysize - This is an attribute for tuning the performance of fetching rows from - Oracle Database. It does not affect data insertion. See :ref:`Tuning Fetch - Performance `. + See :ref:`Tuning Fetch Performance `. .. autoproperty:: Defaults.config_dir - At time of ``import oracledb`` the value of - ``oracledb.defaults.config_dir`` will be set to (first one wins): - - - the value of ``$TNS_ADMIN``, if ``TNS_ADMIN`` is set. - - - ``$ORACLE_HOME/network/admin``, if ``$ORACLE_HOME`` is set. - - Otherwise, ``oracledb.defaults.config_dir`` will not be set. - See :ref:`optnetfiles`. .. versionchanged:: 3.0.0 @@ -49,69 +31,60 @@ Defaults Attributes The directory ``$ORACLE_HOME/network/admin`` was added to the heuristic. - At completion of a call to :meth:`oracledb.init_oracle_client()` in - Thick mode, the value of :attr:`Defaults.config_dir` may get changed - by python-oracledb. - .. autoproperty:: Defaults.driver_name - See :ref:`otherinit`. + See :ref:`otherinit` and :ref:`dbviews`. .. versionadded:: 2.5.0 .. autoproperty:: Defaults.fetch_decimals - An output type handler such as previously required in the obsolete - cx_Oracle driver can alternatively be used to adjust the returned type. If - a type handler exists and returns a variable (that is, - ``cursor.var(...)``), then that return variable is used. If the type - handler returns *None*, then the value of - ``oracledb.defaults.fetch_decimals`` is used to determine whether to return - ``decimal.Decimal`` values. + See `decimal.Decimal `__. .. autoproperty:: Defaults.fetch_lobs See :ref:`lobdata`. - An output type handler such as the one previously required in the obsolete - cx_Oracle driver can alternatively be used to adjust the returned type. If - a type handler exists and returns a variable (that is, `cursor.var(...)`), - then that return variable is used. If the type handler returns *None*, then - the value of ``oracledb.defaults.fetch_lobs`` is used. - .. autoproperty:: Defaults.machine + See :ref:`dbviews`. + .. versionadded:: 2.5.0 .. autoproperty:: Defaults.osuser + See :ref:`dbviews`. + .. versionadded:: 2.5.0 .. autoproperty:: Defaults.prefetchrows - This is an attribute for tuning the performance of fetching rows from - Oracle Database. It does not affect data insertion. See :ref:`Tuning Fetch - Performance `. + See :ref:`tuningfetch`. .. autoproperty:: Defaults.program + See :ref:`dbviews`. + .. versionadded:: 2.5.0 .. autoproperty:: Defaults.stmtcachesize - This is a tuning attribute, see :ref:`stmtcache`. + See :ref:`stmtcache`. .. autoproperty:: Defaults.terminal + See :ref:`dbviews`. + .. versionadded:: 2.5.0 .. autoproperty:: Defaults.thick_mode_dsn_passthrough When ``thick_mode_dsn_passthrough`` is the default value `True`, the - behavior of python-oracledb 2.5 and earlier versions occurs: Thick mode - passes connect strings unchanged to the Oracle Client libraries to - handle. Those libraries have their own heuristics for locating the optional - :ref:`tnsnames.ora `, if used. + behavior of python-oracledb 2.5 and earlier versions occurs: + python-oracledb Thick mode passes connect strings unchanged to the Oracle + Client libraries to handle. Those libraries have their own heuristics for + locating the optional :ref:`tnsnames.ora `, if used. When ``thick_mode_dsn_passthrough`` is `False`, python-oracledb Thick mode behaves similarly to Thin mode, which can be helpful for applications that diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 7b1a5b8a..a99369e2 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -267,77 +267,51 @@ Oracledb Methods .. versionadded:: 3.0.0 -.. _interval_ym: - -Oracledb IntervalYM Class -========================= +.. _moduleattributes: -Objects of this class are returned for columns of type INTERVAL YEAR TO MONTH -and can be passed to variables of type :data:`oracledb.DB_TYPE_INTERVAL_YM` -The class is a `collections.namedtuple() -`__ -class with two integer attributes, ``years`` and ``months``. - -.. versionadded:: 2.2.0 - - -.. _jsonid: - -Oracledb JsonId Class -===================== - -Objects of this class are returned by :ref:`SODA ` in the ``_id`` -attribute of documents stored in native collections when using Oracle Database -23.4 (and later). It is a subclass of the `bytes `__ class. - -.. versionadded:: 2.1.0 - - -.. _futureobj: - -Oracledb __future__ Object -========================== - -Special object that contains attributes which control the behavior of -python-oracledb, allowing for opting in for new features. - -.. dbapimethodextension:: - -.. _constants: +Oracledb Attributes +=================== -Oracledb Constants -================== +.. data:: apilevel -General -------- + A string constant stating the Python DB API level supported by + python-oracledb. Currently "2.0". -.. data:: apilevel +.. data:: defaults - String constant stating the supported DB API level. Currently '2.0'. + The :ref:`Defaults ` object for setting default behaviors of + python-oracledb. + See :ref:`settingdefaults`. .. data:: paramstyle - String constant stating the type of parameter marker formatting expected by - the interface. Currently 'named' as in 'where name = :name'. + A string constant stating the type of parameter marker formatting expected + by the interface. Currently 'named' as in 'where name = :name'. .. data:: threadsafety - Integer constant stating the level of thread safety that the interface - supports. Currently 2, which means that threads may share the module and + An integer constant stating the level of thread safety that python-oracledb + supports. Currently 2, which means that threads may share the module and connections, but not cursors. Sharing means that a thread may use a resource without wrapping it using a mutex semaphore to implement resource locking. .. data:: version + + A string constant stating the version of the module. Currently '|release|'. + .. data:: __version__ - String constant stating the version of the module. Currently '|release|'. + A string constant stating the version of the module. Currently '|release|'. .. dbapiattributeextension:: +.. _constants: + +Oracledb Constants +================== Advanced Queuing: Delivery Modes -------------------------------- @@ -1869,6 +1843,14 @@ See :ref:`exception` for usage information. See :ref:`tg` for more information. +.. _futureobj: + +Oracledb __future__ Object +========================== + +A special object that contains attributes which control the behavior of +python-oracledb, allowing for opting in for new features. + .. _oracledbplugins: Oracledb Plugins @@ -1979,3 +1961,30 @@ Python-oracledb then uses these tokens to connect to Oracle Database. See :ref:`cloudnativeauthoauth` for more information. .. versionadded:: 3.0.0 + +.. _interval_ym: + +Oracledb IntervalYM Class +========================= + +Objects of this class are returned for columns of type INTERVAL YEAR TO MONTH +and can be passed to variables of type :data:`oracledb.DB_TYPE_INTERVAL_YM` +The class is a `collections.namedtuple() +`__ +class with two integer attributes, ``years`` and ``months``. + +.. versionadded:: 2.2.0 + +.. _jsonid: + +Oracledb JsonId Class +===================== + +Objects of this class are returned by :ref:`SODA ` in the ``_id`` +attribute of documents stored in native collections when using Oracle Database +23.4 (and later). It is a subclass of the `bytes `__ class. + +.. versionadded:: 2.1.0 + +.. dbapimethodextension:: diff --git a/doc/src/api_manual/variable.rst b/doc/src/api_manual/variable.rst index e9857e01..dec5f024 100644 --- a/doc/src/api_manual/variable.rst +++ b/doc/src/api_manual/variable.rst @@ -11,7 +11,7 @@ Variable Class .. autoclass:: Var - An Var object should be created with :meth:`Cursor.var()` or + A Var object should be created with :meth:`Cursor.var()` or :meth:`Cursor.arrayvar()`. .. dbapiobjectextension:: diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index 3de344fe..cb5e8537 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -2468,10 +2468,10 @@ immediately return an available connection. Some users set larger ``increment`` values even for fixed-size pools because it can help a pool re-establish itself if all connections become invalid, for example after a network dropout. In the common case of Thin mode with the default ``getmode`` -of ``POOL_GETMODE_WAIT``, any :meth:`~ConnectionPool.acquire()` call that -initiates pool growth will return after the first new connection is created, -regardless of how big ``increment`` is. The pool will then continue to -re-establish connections in a background thread. +of :data:`oracledb.POOL_GETMODE_WAIT`, any :meth:`~ConnectionPool.acquire()` +call that initiates pool growth will return after the first new connection is +created, regardless of how big ``increment`` is. The pool will then continue +to re-establish connections in a background thread. A connection pool can shrink back to its minimum size ``min`` when connections opened by the pool are not used by the application. This frees up database diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index c6b303ea..975c6ca2 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -704,3 +704,23 @@ V$SESSION_CONNECT_INFO and verifying if the value of the column begins with the text ``python-oracledb thn``. See :ref:`vsessconinfo`. Note all connections in a python-oracledb application must use the same mode. + +.. _settingdefaults: + +Changing python-oracledb Default Settings +========================================= + +Python-oracledb has a singleton :ref:`Defaults ` object with +attributes that set default behaviors of the driver. The object is accessed +using the :data:`defaults` attribute of the imported driver. + +For example, to return queried LOB columns directly as strings or bytes: + +.. code-block:: python + + import oracledb + + oracledb.defaults.fetch_lobs = False + + +See :ref:`defaultsattributes` for the attributes that can be set. diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index e9eb4039..dca162b4 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -114,7 +114,7 @@ def __init__( All parameters are optional. A brief description of each parameter follows: - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this @@ -122,20 +122,21 @@ def __init__( the form "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new + password will take effect immediately upon a successful connection to + the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in thin mode + is encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. - If it is a string, it specifies an Azure AD OAuth2 token used for - Open Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open + Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -145,8 +146,8 @@ def __init__( expired (default: None) - - ``host``: the name or IP address of the machine hosting the database - or the database listener + - ``host``: the hostname or IP address of the machine hosting the + database or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -156,8 +157,8 @@ def __init__( to use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for - tunneling secure connections + - ``https_proxy``: the hostname or IP address of a proxy host to use + for tunneling secure connections (default: None) - ``https_proxy_port``: the port on which to communicate with the proxy @@ -175,36 +176,37 @@ def __init__( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" - or "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection - Pooling (DRCP) + - ``cclass``: the connection class to use for Database Resident + Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling - (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident + Connection Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between - the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of + keepalive probes. If this parameter is set to a value greater than + zero it enables keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should - be retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of - seconds to wait for establishing a connection to the database host + seconds to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching @@ -219,49 +221,62 @@ def __init__( (default: None) - ``wallet_location``: the directory where the wallet can be found. In - thin mode this must be the directory containing the PEM-encoded - wallet file ewallet.pem. In thick mode this must be the directory - containing the file cwallet.sso + python-oracledb Thin mode this must be the directory containing the + PEM-encoded wallet file ewallet.pem. In python-oracledb Thick mode + this must be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous - query notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be + enabled. This value is only used in python-oracledb Thick mode and is + needed for continuous query notification and high availability event + notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should - be disabled. This value is only used in thin mode. It has no effect - on Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, + :data:`oracledb.AUTH_MODE_PRELIM`, :data:`oracledb.AUTH_MODE_SYSASM`, + :data:`oracledb.AUTH_MODE_SYSBKP`, :data:`oracledb.AUTH_MODE_SYSDBA`, + :data:`oracledb.AUTH_MODE_SYSDGD`, :data:`oracledb.AUTH_MODE_SYSKMT`, + :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks + should be disabled. This value is only used in python-oracledb Thin + mode. It has no effect on Windows which does not support this + functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned - from a pool. This value is only used in thick mode + from a pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in - thick mode + - ``matchanytag``: a boolean specifying whether any tag can be used + when acquiring a connection from the pool. This value is only used in + python-oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. - For thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python- + oracledb Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the @@ -270,18 +285,18 @@ def __init__( - ``shardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value - is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable + is only used in python-oracledb Thin mode. For python-oracledb Thick + mode set the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is @@ -308,7 +323,7 @@ def __init__( This requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -318,37 +333,44 @@ def __init__( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application - connected to the Oracle Database - (default: oracledb.defaults.program) - - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) - - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) - - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) - - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) - - - ``use_sni``: boolean indicating whether to use the TLS SNI extension - to bypass the second TLS neogiation that would otherwise be required + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) + + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) + + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) + + - ``osuser``: a string recorded by Oracle Database as the operating + system user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) + + - ``driver_name``: a string recorded by Oracle Database as the name of + the driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) + + - ``use_sni``: a boolean indicating whether to use the TLS SNI + extension to bypass the second TLS neogiation that would otherwise be + required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without - parsing by the driver. Setting this to False makes thick and thin - mode applications behave similarly regarding connection string - parameter handling and locating any optional tnsnames.ora - configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + parsing by the driver. Setting this to False makes python-oracledb + Thick and Thin mode applications behave similarly regarding + connection string parameter handling and locating any optional + tnsnames.ora configuration file + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using @@ -357,12 +379,12 @@ def __init__( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP - with Oracle Database 23.4 or higher + with Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service - context handle. This value is only used in thick mode. It should be - used with extreme caution + context handle. This value is only used in python-oracledb Thick + mode. It should be used with extreme caution (default: 0) """ pass @@ -447,7 +469,7 @@ def appcontext(self) -> list: @_flatten_value def cclass(self) -> Union[list, str]: """ - Connection class to use for Database Resident Connection Pooling + The connection class to use for Database Resident Connection Pooling (DRCP). """ return [d.cclass for d in self._impl.description_list.children] @@ -455,9 +477,13 @@ def cclass(self) -> Union[list, str]: @property def config_dir(self) -> str: """ - Directory in which the optional tnsnames.ora configuration file is - located. This value is only used in thin mode. For thick mode use the - config_dir parameter of init_oracle_client(). + A directory in which the optional tnsnames.ora configuration file is + located. This value is only used in python-oracledb Thin mode. For + python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()`. """ return self._impl.config_dir @@ -477,24 +503,26 @@ def connection_id_prefix(self) -> Union[list, str]: def debug_jdwp(self) -> str: """ A string with the format "host=;port=" that specifies the - host and port of the PL/SQL debugger. This value is only used in thin - mode. For thick mode set the ORA_DEBUG_JDWP environment variable. + host and port of the PL/SQL debugger. This value is only used in + python-oracledb Thin mode. For python-oracledb Thick mode set the + ORA_DEBUG_JDWP environment variable. """ return self._impl.debug_jdwp @property def disable_oob(self) -> bool: """ - Boolean indicating whether out-of-band breaks should be disabled. This - value is only used in thin mode. It has no effect on Windows which does - not support this functionality. + A boolean indicating whether out-of-band breaks should be disabled. + This value is only used in python-oracledb Thin mode. It has no effect + on Windows which does not support this functionality. """ return self._impl.disable_oob @property def driver_name(self) -> str: """ - The driver name used by the client to connect to the Oracle Database. + A string recorded by Oracle Database as the name of the driver which + originated the connection. """ return self._impl.driver_name @@ -509,9 +537,9 @@ def edition(self) -> str: @property def events(self) -> bool: """ - Boolean specifying whether events mode should be enabled. This value is - only used in thick mode and is needed for continuous query notification - and high availability event notifications. + A boolean specifying whether events mode should be enabled. This value + is only used in python-oracledb Thick mode and is needed for continuous + query notification and high availability event notifications. """ return self._impl.events @@ -519,9 +547,8 @@ def events(self) -> bool: @_flatten_value def expire_time(self) -> Union[list, int]: """ - An integer indicating the number of minutes between the sending of - keepalive probes. If this parameter is set to a value greater than zero - it enables keepalive. + The number of minutes between the sending of keepalive probes. If this + parameter is set to a value greater than zero it enables keepalive. """ return [d.expire_time for d in self._impl.description_list.children] @@ -545,7 +572,7 @@ def extra_auth_params(self) -> dict: @_flatten_value def host(self) -> Union[list, str]: """ - The name or IP address of the machine hosting the database or the + The hostname or IP address of the machine hosting the database or the database listener. """ return [a.host for a in self._impl._get_addresses()] @@ -554,7 +581,7 @@ def host(self) -> Union[list, str]: @_flatten_value def https_proxy(self) -> Union[list, str]: """ - The name or IP address of a proxy host to use for tunneling secure + The hostname or IP address of a proxy host to use for tunneling secure connections. """ return [a.https_proxy for a in self._impl._get_addresses()] @@ -578,29 +605,37 @@ def instance_name(self) -> Union[list, str]: @property def machine(self) -> str: """ - The machine name of the client connecting to the Oracle Database. + A string recorded by Oracle Database as the name of the machine from + which the connection originates. """ return self._impl.machine @property def matchanytag(self) -> bool: """ - Boolean specifying whether any tag can be used when acquiring a - connection from the pool. This value is only used in thick mode. + A boolean specifying whether any tag can be used when acquiring a + connection from the pool. This value is only used in python-oracledb + Thick mode. """ return self._impl.matchanytag @property def mode(self) -> oracledb.AuthMode: """ - Authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA. + The authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, + or :data:`oracledb.AUTH_MODE_SYSRAC`. """ return oracledb.AuthMode(self._impl.mode) @property def osuser(self) -> str: """ - The operating system user that initiates the database connection. + A string recorded by Oracle Database as the operating system user who + originated the connection. """ return self._impl.osuser @@ -619,7 +654,7 @@ def pool_boundary(self) -> Union[list, str]: def pool_name(self) -> Union[list, str]: """ The name of the DRCP pool when using multi-pool DRCP with Oracle - Database 23.4 or higher. + Database 23.4, or higher. """ return [d.pool_name for d in self._impl.description_list.children] @@ -634,8 +669,8 @@ def port(self) -> Union[list, int]: @property def program(self) -> str: """ - The name of the executable program or application connected to the - Oracle Database. + A string recorded by Oracle Database as the program from which the + connection originates. """ return self._impl.program @@ -661,7 +696,8 @@ def proxy_user(self) -> str: @_flatten_value def purity(self) -> Union[list, oracledb.Purity]: """ - Purity to use for Database Resident Connection Pooling (DRCP). + The connection purity to use for Database Resident Connection Pooling + (DRCP). """ return [ oracledb.Purity(d.purity) @@ -672,8 +708,8 @@ def purity(self) -> Union[list, oracledb.Purity]: @_flatten_value def retry_count(self) -> Union[list, int]: """ - The number of times that a connection attempt should be retried before - the attempt is terminated. + The number of times that initial connection establishment should be + retried before the connection attempt is terminated. """ return [d.retry_count for d in self._impl.description_list.children] @@ -681,7 +717,8 @@ def retry_count(self) -> Union[list, int]: @_flatten_value def retry_delay(self) -> Union[list, int]: """ - The number of seconds to wait before making a new connection attempt. + The number of seconds to wait before retrying to establish a + connection. """ return [d.retry_delay for d in self._impl.description_list.children] @@ -703,7 +740,7 @@ def sdu(self) -> Union[list, int]: def server_type(self) -> Union[list, str]: """ The type of server connection that should be established. If specified, - it should be one of "dedicated", "shared" or "pooled". + it should be one of strings "dedicated", "shared" or "pooled". """ return [d.server_type for d in self._impl.description_list.children] @@ -719,7 +756,8 @@ def service_name(self) -> Union[list, str]: def shardingkey(self) -> list: """ A list of strings, numbers, bytes or dates that identify the database - shard to connect to. This value is only used in thick mode. + shard to connect to. This value is only used in python-oracledb Thick + mode. """ return self._impl.shardingkey @@ -760,7 +798,7 @@ def ssl_server_cert_dn(self) -> Union[list, str]: @_flatten_value def ssl_server_dn_match(self) -> Union[list, bool]: """ - Boolean indicating whether the server certificate distinguished name + A boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead. @@ -781,7 +819,7 @@ def ssl_version(self) -> Union[list, ssl.TLSVersion]: @property def stmtcachesize(self) -> int: """ - Identifies the initial size of the statement cache. + The size of the statement cache. """ return self._impl.stmtcachesize @@ -789,7 +827,8 @@ def stmtcachesize(self) -> int: def supershardingkey(self) -> list: """ A list of strings, numbers, bytes or dates that identify the database - shard to connect to. This value is only used in thick mode. + shard to connect to. This value is only used in python-oracledb Thick + mode. """ return self._impl.supershardingkey @@ -797,7 +836,7 @@ def supershardingkey(self) -> list: def tag(self) -> str: """ Identifies the type of connection that should be returned from a pool. - This value is only used in thick mode. + This value is only used in python-oracledb Thick mode. """ return self._impl.tag @@ -805,7 +844,7 @@ def tag(self) -> str: @_flatten_value def tcp_connect_timeout(self) -> Union[list, float]: """ - A float indicating the maximum number of seconds to wait for + A float indicating the maximum number of seconds to wait when establishing a connection to the database host. """ return [ @@ -815,25 +854,26 @@ def tcp_connect_timeout(self) -> Union[list, float]: @property def terminal(self) -> str: """ - The terminal identifier from which the connection originates. + A string recorded by Oracle Database as the terminal identifier from + which the connection originates. """ return self._impl.terminal @property def thick_mode_dsn_passthrough(self) -> bool: """ - Boolean indicating whether to pass the connect string to the Oracle + A boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing by the driver. Setting this - to False makes thick and thin mode applications behave similarly - regarding connection string parameter handling and locating any - optional tnsnames.ora configuration file. + to False makes python-oracledb Thick and Thin mode applications behave + similarly regarding connection string parameter handling and locating + any optional tnsnames.ora configuration file. """ return self._impl.thick_mode_dsn_passthrough @property def user(self) -> str: """ - The name of the user to connect to. + The name of the database user to connect to. """ return self._impl.user @@ -841,7 +881,7 @@ def user(self) -> str: @_flatten_value def use_sni(self) -> Union[list, bool]: """ - Boolean indicating whether to use the TLS SNI extension to bypass the + A boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required. """ return [d.use_sni for d in self._impl.description_list.children] @@ -850,7 +890,7 @@ def use_sni(self) -> Union[list, bool]: @_flatten_value def use_tcp_fast_open(self) -> Union[list, bool]: """ - Boolean indicating whether to use TCP fast open. This is an Oracle + A boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information. @@ -863,9 +903,10 @@ def use_tcp_fast_open(self) -> Union[list, bool]: @_flatten_value def wallet_location(self) -> Union[list, str]: """ - The directory where the wallet can be found. In thin mode this must be - the directory containing the PEM-encoded wallet file ewallet.pem. In - thick mode this must be the directory containing the file cwallet.sso. + The directory where the wallet can be found. In python-oracledb Thin + mode this must be the directory containing the PEM-encoded wallet file + ewallet.pem. In python-oracledb Thick mode this must be the directory + containing the file cwallet.sso. """ return [ d.wallet_location for d in self._impl.description_list.children @@ -979,23 +1020,24 @@ def set( object. All parameters are optional. A brief description of each parameter follows: - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to - ``proxy_user``: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the form "user[proxy_user]" - - ``password``: the password for the user + - ``password``: the password for the database user - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new + password will take effect immediately upon a successful connection to + the database - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in thin mode + is encrypted. This value is only used in python-oracledb Thin mode - - ``access_token``: expected to be a string or a 2-tuple or a callable. - If it is a string, it specifies an Azure AD OAuth2 token used for - Open Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open + Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -1004,16 +1046,16 @@ def set( create new connections but the current authentication token has expired - - ``host``: the name or IP address of the machine hosting the database - or the database listener + - ``host``: the hostname or IP address of the machine hosting the + database or the database listener - ``port``: the port number on which the database listener is listening - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic (TLS) - - ``https_proxy``: the name or IP address of a proxy host to use for - tunneling secure connections + - ``https_proxy``: the hostname or IP address of a proxy host to use + for tunneling secure connections - ``https_proxy_port``: the port on which to communicate with the proxy host @@ -1026,29 +1068,30 @@ def set( service_name instead is recommended - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" - or "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" - - ``cclass``: connection class to use for Database Resident Connection - Pooling (DRCP) + - ``cclass``: the connection class to use for Database Resident + Connection Pooling (DRCP) - - ``purity``: purity to use for Database Resident Connection Pooling - (DRCP) + - ``purity``: the connection purity to use for Database Resident + Connection Pooling (DRCP) - - ``expire_time``: an integer indicating the number of minutes between - the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of + keepalive probes. If this parameter is set to a value greater than + zero it enables keepalive - - ``retry_count``: the number of times that a connection attempt should - be retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection - ``tcp_connect_timeout``: a float indicating the maximum number of - seconds to wait for establishing a connection to the database host + seconds to wait when establishing a connection to the database host - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching @@ -1061,39 +1104,50 @@ def set( hostname will be used - ``wallet_location``: the directory where the wallet can be found. In - thin mode this must be the directory containing the PEM-encoded - wallet file ewallet.pem. In thick mode this must be the directory - containing the file cwallet.sso + python-oracledb Thin mode this must be the directory containing the + PEM-encoded wallet file ewallet.pem. In python-oracledb Thick mode + this must be the directory containing the file cwallet.sso - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous - query notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be + enabled. This value is only used in python-oracledb Thick mode and is + needed for continuous query notification and high availability event + notifications - ``externalauth``: a boolean indicating whether to use external authentication - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, + :data:`oracledb.AUTH_MODE_PRELIM`, :data:`oracledb.AUTH_MODE_SYSASM`, + :data:`oracledb.AUTH_MODE_SYSBKP`, :data:`oracledb.AUTH_MODE_SYSDBA`, + :data:`oracledb.AUTH_MODE_SYSDGD`, :data:`oracledb.AUTH_MODE_SYSKMT`, + :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` - - ``disable_oob``: boolean indicating whether out-of-band breaks should - be disabled. This value is only used in thin mode. It has no effect - on Windows which does not support this functionality + - ``disable_oob``: a boolean indicating whether out-of-band breaks + should be disabled. This value is only used in python-oracledb Thin + mode. It has no effect on Windows which does not support this + functionality - - ``stmtcachesize``: identifies the initial size of the statement cache + - ``stmtcachesize``: the size of the statement cache - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter - ``tag``: identifies the type of connection that should be returned - from a pool. This value is only used in thick mode + from a pool. This value is only used in python-oracledb Thick mode - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in - thick mode + - ``matchanytag``: a boolean specifying whether any tag can be used + when acquiring a connection from the pool. This value is only used in + python-oracledb Thick mode - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. - For thick mode use the config_dir parameter of init_oracle_client() + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python- + oracledb Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the @@ -1101,16 +1155,16 @@ def set( - ``shardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value - is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable + is only used in python-oracledb Thin mode. For python-oracledb Thick + mode set the ORA_DEBUG_JDWP environment variable - ``connection_id_prefix``: an application specific prefix that is added to the connection identifier used for tracing @@ -1132,7 +1186,7 @@ def set( indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -1140,30 +1194,31 @@ def set( - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use - - ``program``: the name of the executable program or application - connected to the Oracle Database + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates - - ``machine``: the machine name of the client connecting to the Oracle - Database + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates - - ``terminal``: the terminal identifier from which the connection - originates + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates - - ``osuser``: the operating system user that initiates the database - connection + - ``osuser``: a string recorded by Oracle Database as the operating + system user who originated the connection - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database + - ``driver_name``: a string recorded by Oracle Database as the name of + the driver which originated the connection - - ``use_sni``: boolean indicating whether to use the TLS SNI extension - to bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: a boolean indicating whether to use the TLS SNI + extension to bypass the second TLS neogiation that would otherwise be + required - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without - parsing by the driver. Setting this to False makes thick and thin - mode applications behave similarly regarding connection string - parameter handling and locating any optional tnsnames.ora - configuration file + parsing by the driver. Setting this to False makes python-oracledb + Thick and Thin mode applications behave similarly regarding + connection string parameter handling and locating any optional + tnsnames.ora configuration file - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using @@ -1171,11 +1226,11 @@ def set( plugins - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP - with Oracle Database 23.4 or higher + with Oracle Database 23.4, or higher - ``handle``: an integer representing a pointer to a valid service - context handle. This value is only used in thick mode. It should be - used with extreme caution + context handle. This value is only used in python-oracledb Thick + mode. It should be used with extreme caution """ pass diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index bad2faa1..6fe43814 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1038,8 +1038,8 @@ def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> LOB: """ - Creates and returns a new temporary :ref:`LOB object ` of the - specified type. The ``lob_type`` parameter should be one of + Creates and returns a new temporary LOB object of the specified type. + The ``lob_type`` parameter should be one of :data:`oracledb.DB_TYPE_CLOB`, :data:`oracledb.DB_TYPE_BLOB`, or :data:`oracledb.DB_TYPE_NCLOB`. @@ -1161,11 +1161,10 @@ def fetch_df_batches( def getSodaDatabase(self) -> SodaDatabase: """ - Returns a :ref:`SodaDatabase ` object for Simple Oracle - Document Access (SODA). All SODA operations are performed either on the - returned SodaDatabase object or from objects created by the returned - SodaDatabase object. See `here - `__ for additional information on SODA. """ @@ -1175,9 +1174,8 @@ def getSodaDatabase(self) -> SodaDatabase: def gettype(self, name: str) -> DbObjectType: """ - Returns a :ref:`type object ` given its name. This can - then be used to create objects which can be bound to cursors created by - this connection. + Returns a type object given its name. This can then be used to create + objects which can be bound to cursors created by this connection. """ self._verify_connected() obj_type_impl = self._impl.get_type(self, name) @@ -1287,9 +1285,8 @@ def subscribe( clientInitiated: bool = False, ) -> Subscription: """ - Returns a new :ref:`subscription object ` that receives - notifications for events that take place in the database that match the - given parameters. + Returns a new subscription object that receives notifications for + events that take place in the database that match the given parameters. The ``namespace`` parameter specifies the namespace the subscription uses. It can be one of :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` or @@ -1728,34 +1725,36 @@ def connect( """ Factory function which creates a connection to the database and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling pool.acquire(). + The ``pool`` parameter is expected to be a pool object. This parameter was + deprecated in python-oracledb 3.0.0. Use :meth:`ConnectionPool.acquire()` + instead since the use of this parameter is the equivalent of calling this + method. - The conn_class parameter is expected to be Connection or a subclass of + The ``conn_class`` parameter is expected to be Connection or a subclass of Connection. - The params parameter is expected to be of type ConnectParams and contains - connection parameters that will be used when establishing the connection. - See the documentation on ConnectParams for more information. If this - parameter is not specified, the additional keyword parameters will be used - to create an instance of ConnectParams. If both the params parameter and - additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a dsn is also supplied, - then in the python-oracledb Thin mode, the values of the parameters - specified (if any) within the dsn will override the values passed as + The ``params`` parameter is expected to be of type ConnectParams and + contains connection parameters that will be used when establishing the + connection. See the documentation on ConnectParams for more information. + If this parameter is not specified, the additional keyword parameters will + be used to create an instance of ConnectParams. If both the ``params`` + parameter and additional keyword parameters are specified, the values in + the keyword parameters have precedence. Note that if a ``dsn`` is also + supplied, then in python-oracledb Thin mode, the values of the parameters + specified (if any) within the ``dsn`` will override the values passed as additional keyword parameters, which themselves override the values set in - the params parameter object. + the ``params`` parameter object. The following parameters are all optional. A brief description of each parameter follows: - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this value @@ -1763,30 +1762,30 @@ def connect( "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new password + will take effect immediately upon a successful connection to the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, - it specifies the token and private key strings used for Oracle Cloud - Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open Authorization + (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies + the token and private key strings used for Oracle Cloud Infrastructure + (OCI) Identity and Access Management (IAM) token based authentication. If + it is a callable, it returns either a string or a 2-tuple used for OAuth + 2.0 or OCI IAM token based authentication and is useful when the pool + needs to expand and create new connections but the current authentication + token has expired (default: None) - - ``host``: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the hostname or IP address of the machine hosting the database + or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -1796,7 +1795,7 @@ def connect( use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for + - ``https_proxy``: the hostname or IP address of a proxy host to use for tunneling secure connections (default: None) @@ -1815,35 +1814,37 @@ def connect( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" or - "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection + - ``cclass``: the connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident Connection + Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of keepalive + probes. If this parameter is set to a value greater than zero it enables + keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is @@ -1856,50 +1857,60 @@ def connect( for any verfication. Otherwise the hostname will be used (default: None) - - ``wallet_location``: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet file - ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + python-oracledb Thin mode this must be the directory containing the PEM- + encoded wallet file ewallet.pem. In python-oracledb Thick mode this must + be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be enabled. + This value is only used in python-oracledb Thick mode and is needed for + continuous query notification and high availability event notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks should + be disabled. This value is only used in python-oracledb Thin mode. It has + no effect on Windows which does not support this functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in thick - mode + - ``matchanytag``: a boolean specifying whether any tag can be used when + acquiring a connection from the pool. This value is only used in python- + oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. For - thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python-oracledb + Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple @@ -1907,18 +1918,19 @@ def connect( (default: None) - ``shardingkey``: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick mode + the database shard to connect to. This value is only used in python- + oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only - used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable + used in python-oracledb Thin mode. For python-oracledb Thick mode set + the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is added to @@ -1944,7 +1956,7 @@ def connect( requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -1954,36 +1966,42 @@ def connect( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application connected - to the Oracle Database - (default: oracledb.defaults.program) + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) + - ``osuser``: a string recorded by Oracle Database as the operating system + user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) + - ``driver_name``: a string recorded by Oracle Database as the name of the + driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) - - ``use_sni``: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: a boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing - by the driver. Setting this to False makes thick and thin mode - applications behave similarly regarding connection string parameter + by the driver. Setting this to False makes python-oracledb Thick and Thin + mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the @@ -1991,12 +2009,12 @@ def connect( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + handle. This value is only used in python-oracledb Thick mode. It should + be used with extreme caution (default: 0) """ pass @@ -2507,9 +2525,8 @@ async def fetchone( async def gettype(self, name: str) -> DbObjectType: """ - Returns a :ref:`type object ` given its name. This can - then be used to create objects which can be bound to cursors created by - this connection. + Returns a type object given its name. This can then be used to create + objects which can be bound to cursors created by this connection. """ self._verify_connected() obj_type_impl = await self._impl.get_type(self, name) @@ -2922,34 +2939,36 @@ def connect_async( """ Factory function which creates a connection to the database and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling pool.acquire(). - - The conn_class parameter is expected to be AsyncConnection or a subclass of - AsyncConnection. - - The params parameter is expected to be of type ConnectParams and contains - connection parameters that will be used when establishing the connection. - See the documentation on ConnectParams for more information. If this - parameter is not specified, the additional keyword parameters will be used - to create an instance of ConnectParams. If both the params parameter and - additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a dsn is also supplied, - then in the python-oracledb Thin mode, the values of the parameters + The ``pool`` parameter is expected to be a pool object. This parameter was + deprecated in python-oracledb 3.0.0. Use :meth:`ConnectionPool.acquire()` + instead since the use of this parameter is the equivalent of calling this + method. + + The ``conn_class`` parameter is expected to be AsyncConnection or a + subclass of AsyncConnection. + + The ``params`` parameter is expected to be of type ConnectParams and + contains connection parameters that will be used when establishing the + connection. See the documentation on ConnectParams for more information. If + this parameter is not specified, the additional keyword parameters will be + used to create an instance of ConnectParams. If both the ``params`` + parameter and additional keyword parameters are specified, the values in + the keyword parameters have precedence. Note that if a ``dsn`` is also + supplied, then in python-oracledb Thin mode, the values of the parameters specified (if any) within the dsn will override the values passed as additional keyword parameters, which themselves override the values set in - the params parameter object. + the ``params`` parameter object. The following parameters are all optional. A brief description of each parameter follows: - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this value @@ -2957,30 +2976,30 @@ def connect_async( "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new password + will take effect immediately upon a successful connection to the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, - it specifies the token and private key strings used for Oracle Cloud - Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open Authorization + (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies + the token and private key strings used for Oracle Cloud Infrastructure + (OCI) Identity and Access Management (IAM) token based authentication. If + it is a callable, it returns either a string or a 2-tuple used for OAuth + 2.0 or OCI IAM token based authentication and is useful when the pool + needs to expand and create new connections but the current authentication + token has expired (default: None) - - ``host``: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the hostname or IP address of the machine hosting the database + or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -2990,7 +3009,7 @@ def connect_async( use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for + - ``https_proxy``: the hostname or IP address of a proxy host to use for tunneling secure connections (default: None) @@ -3009,35 +3028,37 @@ def connect_async( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" or - "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection + - ``cclass``: the connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident Connection + Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of keepalive + probes. If this parameter is set to a value greater than zero it enables + keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is @@ -3050,50 +3071,60 @@ def connect_async( for any verfication. Otherwise the hostname will be used (default: None) - - ``wallet_location``: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet file - ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + python-oracledb Thin mode this must be the directory containing the PEM- + encoded wallet file ewallet.pem. In python-oracledb Thick mode this must + be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be enabled. + This value is only used in python-oracledb Thick mode and is needed for + continuous query notification and high availability event notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks should + be disabled. This value is only used in python-oracledb Thin mode. It has + no effect on Windows which does not support this functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in thick - mode + - ``matchanytag``: a boolean specifying whether any tag can be used when + acquiring a connection from the pool. This value is only used in python- + oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. For - thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python-oracledb + Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple @@ -3101,18 +3132,19 @@ def connect_async( (default: None) - ``shardingkey``: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick mode + the database shard to connect to. This value is only used in python- + oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only - used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable + used in python-oracledb Thin mode. For python-oracledb Thick mode set + the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is added to @@ -3138,7 +3170,7 @@ def connect_async( requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -3148,36 +3180,42 @@ def connect_async( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application connected - to the Oracle Database - (default: oracledb.defaults.program) + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) + - ``osuser``: a string recorded by Oracle Database as the operating system + user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) + - ``driver_name``: a string recorded by Oracle Database as the name of the + driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) - - ``use_sni``: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: a boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing - by the driver. Setting this to False makes thick and thin mode - applications behave similarly regarding connection string parameter + by the driver. Setting this to False makes python-oracledb Thick and Thin + mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the @@ -3185,12 +3223,12 @@ def connect_async( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + handle. This value is only used in python-oracledb Thick mode. It should + be used with extreme caution (default: 0) """ pass diff --git a/src/oracledb/defaults.py b/src/oracledb/defaults.py index 9fdd9182..24ecff1b 100644 --- a/src/oracledb/defaults.py +++ b/src/oracledb/defaults.py @@ -36,7 +36,9 @@ class Defaults: """ - Identifies the default values used by the driver. + A singleton Defaults object contains attributes to adjust default + behaviors of python-oracledb. It is accessed using the :data:`defaults` + attribute of the imported module. """ __module__ = MODULE_NAME @@ -50,6 +52,9 @@ def arraysize(self) -> int: This read-write attribute specifies the default arraysize to use when cursors are created. + It is an attribute for tuning the performance of fetching rows from + Oracle Database. It does not affect data insertion. + This value is the default for :attr:`Cursor.arraysize` and :attr:`AsyncCursor.arraysize`. @@ -66,10 +71,21 @@ def config_dir(self) -> str: """ This read-write attribute specifies the directory in which the optional configuration file ``tnsnames.ora`` will be read in python-oracledb - Thin mode. + Thin mode. It is also used in Thick mode if + :attr:`Defaults.thick_mode_dsn_passthrough` is *False*. + + At time of ``import oracledb`` the value of + ``oracledb.defaults.config_dir`` will be set to (first one wins): + + - the value of ``$TNS_ADMIN``, if ``TNS_ADMIN`` is set. + + - ``$ORACLE_HOME/network/admin``, if ``$ORACLE_HOME`` is set. - This attribute is used in python-oracledb Thin mode. It is also used in - Thick mode if :attr:`Defaults.thick_mode_dsn_passthrough` is *False*. + Otherwise, ``oracledb.defaults.config_dir`` will not be set. + + At completion of a call to :meth:`oracledb.init_oracle_client()` in + python-oracledb Thick mode, the value of ``config_dir`` may get + changed. """ return self._impl.config_dir @@ -92,6 +108,16 @@ def fetch_lobs(self) -> bool: The value of ``oracledb.defaults.fetch_lobs`` does not affect LOBs returned as OUT binds. + The value of ``fetch_lobs`` can be overridden at statement execution by + passing an equivalent parameter. + + An output type handler such as the one previously required in the + obsolete cx_Oracle driver can alternatively be used to adjust the + returned type. If a type handler exists and returns a variable (that + is, `cursor.var(...)`), then that return variable is used. If the type + handler returns *None*, then the value of + ``oracledb.defaults.fetch_lobs`` is used. + This attribute has an initial value of *True*. """ return self._impl.fetch_lobs @@ -104,10 +130,20 @@ def fetch_lobs(self, value: bool): def fetch_decimals(self) -> bool: """ This read-write attribute specifies whether queries that contain - numbers should be fetched as `decimal.Decimal `__ objects or floating point - numbers. This can help avoid issues with converting numbers from Oracle - Database's decimal format to Python's binary format. + numbers should be fetched as Python decimal.Decimal objects or floating + point numbers. This can help avoid issues with converting numbers from + Oracle Database's decimal format to Python's binary format. + + The value of ``fetch_decimals`` can be overridden at statement + execution by passing an equivalent parameter. + + An output type handler such as previously required in the obsolete + cx_Oracle driver can alternatively be used to adjust the returned type. + If a type handler exists and returns a variable (that is, + ``cursor.var(...)``), then that return variable is used. If the type + handler returns *None*, then the value of + ``oracledb.defaults.fetch_decimals`` is used to determine whether to + return ``decimal.Decimal`` values. This attribute has an initial value of *False*. """ @@ -123,6 +159,9 @@ def prefetchrows(self) -> int: This read-write attribute specifies the default number of rows to prefetch when cursors are executed. + This is an attribute for tuning the performance of fetching rows from + Oracle Database. It does not affect data insertion. + This value is the default for :attr:`Cursor.prefetchrows` and :attr:`AsyncCursor.prefetchrows`. @@ -145,6 +184,9 @@ def stmtcachesize(self) -> int: This read-write attribute specifies the default size of the statement cache. + This is an attribute for tuning statement execution performance when a + statement is executed more than once. + This value is the default for :attr:`Connection.stmtcachesize`, :attr:`ConnectionPool.stmtcachesize`, :attr:`AsyncConnection.stmtcachesize`, and @@ -161,9 +203,9 @@ def stmtcachesize(self, value: int): @property def program(self) -> str: """ - This read-write attribute specifies the program name connected to - Oracle Database. This is the value used in the PROGRAM column of the - V$SESSION view. + This read-write attribute is a string recorded by Oracle Database + as the program from which the connection originates. This is the value + used in the PROGRAM column of the V$SESSION view. This attribute has an initial value that is populated by `sys.executable str: """ - This read-write attribute specifies the machine name of the client - connecting to Oracle Database. This is the value used in the MACHINE - column of the V$SESSION view. + This read-write attribute is a string recorded by Oracle Database as + the name of machine from which the connection originates. This is the + value used in the MACHINE column of the V$SESSION view. This attribute takes the host name where the application is running as its initial value. @@ -206,7 +248,7 @@ def terminal(self) -> str: the connection originates. This is the value used in the TERMINAL column of the V$SESSION view. - This attribute has an initial value of *unknown*. + This attribute has an initial value of "unknown". This attribute is only used in python-oracledb Thin mode. """ @@ -219,9 +261,9 @@ def terminal(self, value: str): @property def osuser(self) -> str: """ - This read-write attribute specifies the operating system user that - initiates the database connection. This is the value used in the OSUSER - column of the V$SESSION view. + This read-write attribute is a string recorded by Oracle Database + as the operating system user who originated the connection. This is the + value used in the OSUSER column of the V$SESSION view. This attribute takes the login name of the user as its initial value. @@ -238,9 +280,10 @@ def osuser(self, value: str): @property def driver_name(self) -> str: """ - This read-write attribute specifies the driver used by the client to - connect to Oracle Database. This is the value used in the CLIENT_DRIVER - column of the V$SESSION_CONNECT_INFO view. + This read-write attribute is a string recorded by Oracle Database + as the name of the driver which originated the connection. This is the + value used in the CLIENT_DRIVER column of the V$SESSION_CONNECT_INFO + view. This attribute has an initial value of *None*. It is used as required in python-oracledb Thick and Thin mode. @@ -266,7 +309,7 @@ def driver_name(self, value: str): def thick_mode_dsn_passthrough(self) -> bool: """ This read-write attribute determines whether - :ref:`connection strings ` passed as the ``dsn`` parameter to + connection strings passed as the ``dsn`` parameter to :meth:`oracledb.connect()`, :meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, and :meth:`oracledb.create_pool_async()` in python-oracledb Thick mode will @@ -274,8 +317,7 @@ def thick_mode_dsn_passthrough(self) -> bool: The value of ``thick_mode_dsn_passthrough`` is ignored in python-oracledb Thin mode, which always parses all connect strings - (including reading a :ref:`tnsnames.ora ` file, if - required). + (including reading a tnsnames.ora file, if required). This attribute has an initial value of *True*. """ diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index 013a6370..e16d4f95 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -707,31 +707,33 @@ def create_pool( """ Creates a connection pool with the supplied parameters and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool_class parameter is expected to be ConnectionPool or a subclass of - ConnectionPool. + The ``pool_class`` parameter is expected to be ConnectionPool or a subclass + of ConnectionPool. - The pool_alias parameter is expected to be a string representing the name - used to store and reference the pool in the python-oracledb connection + The ``pool_alias`` parameter is expected to be a string representing the + name used to store and reference the pool in the python-oracledb connection pool cache. If this parameter is not specified, then the pool will not be added to the cache. The value of this parameter can be used with the - oracledb.get_pool() and oracledb.connect() methods to access the pool. + :meth:`oracledb.get_pool()` and :meth:`oracledb.connect()` methods to + access the pool. - The params parameter is expected to be of type PoolParams and contains + The ``params`` parameter is expected to be of type PoolParams and contains parameters that are used to create the pool. See the documentation on PoolParams for more information. If this parameter is not specified, the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword parameters - are specified, the values in the keyword parameters have precedence. - Note that if a dsn is also supplied, then in the python-oracledb Thin mode, - the values of the parameters specified (if any) within the dsn will - override the values passed as additional keyword parameters, which - themselves override the values set in the params parameter object. + PoolParams. If both the ``params`` parameter and additional keyword + parameters are specified, the values in the keyword parameters have + precedence. Note that if a ``dsn`` is also supplied, then in + python-oracledb Thin mode, the values of the parameters specified (if any) + within the dsn will override the values passed as additional keyword + parameters, which themselves override the values set in the ``params`` + parameter object. The following parameters are all optional. A brief description of each parameter follows: @@ -743,7 +745,7 @@ def create_pool( (default: 2) - ``increment``: the number of connections that should be added to the pool - whenever a new connection needs to be created + whenever the pool needs to grow (default: 1) - ``connectiontype``: the class of the connection that should be returned @@ -752,27 +754,28 @@ def create_pool( (default: None) - ``getmode``: how pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT - (default: oracledb.POOL_GETMODE_WAIT) + :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` + (default: :attr:`oracledb.POOL_GETMODE_WAIT`) - - ``homogeneous``: a boolean indicating whether the connections are - homogeneous (same user) or heterogeneous (multiple users) + - ``homogeneous``: a boolean indicating whether the connections in the pool + are homogeneous (same user) or heterogeneous (multiple users) (default: True) - - ``timeout``: length of time (in seconds) that a connection may remain + - ``timeout``: the length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections are never terminated (default: 0) - - ``wait_timeout``: length of time (in milliseconds) that a caller should - wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT + - ``wait_timeout``: the length of time (in milliseconds) that a caller + should wait when acquiring a connection from the pool with getmode set to + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` (default: 0) - - ``max_lifetime_session``: length of time (in seconds) that connections - can remain in the pool. If it is 0 then connections may remain in the - pool indefinitely + - ``max_lifetime_session``: the length of time (in seconds) that + connections can remain in the pool. If it is 0 then connections may + remain in the pool indefinitely (default: 0) - ``session_callback``: a callable that is invoked when a connection is @@ -784,11 +787,11 @@ def create_pool( associated with a particular shard (default: 0) - - ``soda_metadata_cache``: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: a boolean indicating whether or not the SODA metadata cache should be enabled (default: False) - - ``ping_interval``: length of time (in seconds) after which an unused + - ``ping_interval``: the length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by @@ -796,12 +799,12 @@ def create_pool( functionality will be disabled (default: 60) - - ``ping_timeout``: maximum length of time (in milliseconds) to wait for a - connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() + - ``ping_timeout``: the maximum length of time (in milliseconds) to wait + for a connection in the pool to respond to an internal ping to the + database before being discarded and replaced during a call to acquire() (default: 5000) - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this value @@ -809,30 +812,30 @@ def create_pool( "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new password + will take effect immediately upon a successful connection to the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, - it specifies the token and private key strings used for Oracle Cloud - Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open Authorization + (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies + the token and private key strings used for Oracle Cloud Infrastructure + (OCI) Identity and Access Management (IAM) token based authentication. If + it is a callable, it returns either a string or a 2-tuple used for OAuth + 2.0 or OCI IAM token based authentication and is useful when the pool + needs to expand and create new connections but the current authentication + token has expired (default: None) - - ``host``: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the hostname or IP address of the machine hosting the database + or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -842,7 +845,7 @@ def create_pool( use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for + - ``https_proxy``: the hostname or IP address of a proxy host to use for tunneling secure connections (default: None) @@ -861,35 +864,37 @@ def create_pool( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" or - "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection + - ``cclass``: the connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident Connection + Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of keepalive + probes. If this parameter is set to a value greater than zero it enables + keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is @@ -902,50 +907,60 @@ def create_pool( for any verfication. Otherwise the hostname will be used (default: None) - - ``wallet_location``: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet file - ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + python-oracledb Thin mode this must be the directory containing the PEM- + encoded wallet file ewallet.pem. In python-oracledb Thick mode this must + be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be enabled. + This value is only used in python-oracledb Thick mode and is needed for + continuous query notification and high availability event notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks should + be disabled. This value is only used in python-oracledb Thin mode. It has + no effect on Windows which does not support this functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in thick - mode + - ``matchanytag``: a boolean specifying whether any tag can be used when + acquiring a connection from the pool. This value is only used in python- + oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. For - thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python-oracledb + Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple @@ -953,18 +968,19 @@ def create_pool( (default: None) - ``shardingkey``: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick mode + the database shard to connect to. This value is only used in python- + oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only - used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable + used in python-oracledb Thin mode. For python-oracledb Thick mode set + the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is added to @@ -990,7 +1006,7 @@ def create_pool( requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -1000,36 +1016,42 @@ def create_pool( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application connected - to the Oracle Database - (default: oracledb.defaults.program) + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) + - ``osuser``: a string recorded by Oracle Database as the operating system + user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) + - ``driver_name``: a string recorded by Oracle Database as the name of the + driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) - - ``use_sni``: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: a boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing - by the driver. Setting this to False makes thick and thin mode - applications behave similarly regarding connection string parameter + by the driver. Setting this to False makes python-oracledb Thick and Thin + mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the @@ -1037,12 +1059,12 @@ def create_pool( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + handle. This value is only used in python-oracledb Thick mode. It should + be used with extreme caution (default: 0) """ pass @@ -1275,32 +1297,33 @@ def create_pool_async( """ Creates a connection pool with the supplied parameters and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool_class parameter is expected to be AsyncConnectionPool or a + The ``pool_class`` parameter is expected to be AsyncConnectionPool or a subclass of AsyncConnectionPool. - The pool_alias parameter is expected to be a string representing the name - used to store and reference the pool in the python-oracledb connection + The ``pool_alias`` parameter is expected to be a string representing the + name used to store and reference the pool in the python-oracledb connection pool cache. If this parameter is not specified, then the pool will not be added to the cache. The value of this parameter can be used with the - oracledb.get_pool() and oracledb.connect_async() methods to access the - pool. + :meth:`oracledb.get_pool()` and :meth:o`racledb.connect_async()` methods to + access the pool. - The params parameter is expected to be of type PoolParams and contains + The ``params`` parameter is expected to be of type PoolParams and contains parameters that are used to create the pool. See the documentation on PoolParams for more information. If this parameter is not specified, the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword parameters - are specified, the values in the keyword parameters have precedence. - Note that if a dsn is also supplied, then in the python-oracledb Thin mode, - the values of the parameters specified (if any) within the dsn will - override the values passed as additional keyword parameters, which - themselves override the values set in the params parameter object. + PoolParams. If both the ``params`` parameter and additional keyword + parameters are specified, the values in the keyword parameters have + precedence. Note that if a ``dsn`` is also supplied, then in + python-oracledb Thin mode, the values of the parameters specified (if any) + within the ``dsn`` will override the values passed as additional keyword + parameters, which themselves override the values set in the ``params`` + parameter object. The following parameters are all optional. A brief description of each parameter follows: @@ -1312,7 +1335,7 @@ def create_pool_async( (default: 2) - ``increment``: the number of connections that should be added to the pool - whenever a new connection needs to be created + whenever the pool needs to grow (default: 1) - ``connectiontype``: the class of the connection that should be returned @@ -1321,27 +1344,28 @@ def create_pool_async( (default: None) - ``getmode``: how pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT - (default: oracledb.POOL_GETMODE_WAIT) + :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` + (default: :attr:`oracledb.POOL_GETMODE_WAIT`) - - ``homogeneous``: a boolean indicating whether the connections are - homogeneous (same user) or heterogeneous (multiple users) + - ``homogeneous``: a boolean indicating whether the connections in the pool + are homogeneous (same user) or heterogeneous (multiple users) (default: True) - - ``timeout``: length of time (in seconds) that a connection may remain + - ``timeout``: the length of time (in seconds) that a connection may remain idle in the pool before it is terminated. If it is 0 then connections are never terminated (default: 0) - - ``wait_timeout``: length of time (in milliseconds) that a caller should - wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT + - ``wait_timeout``: the length of time (in milliseconds) that a caller + should wait when acquiring a connection from the pool with getmode set to + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` (default: 0) - - ``max_lifetime_session``: length of time (in seconds) that connections - can remain in the pool. If it is 0 then connections may remain in the - pool indefinitely + - ``max_lifetime_session``: the length of time (in seconds) that + connections can remain in the pool. If it is 0 then connections may + remain in the pool indefinitely (default: 0) - ``session_callback``: a callable that is invoked when a connection is @@ -1353,11 +1377,11 @@ def create_pool_async( associated with a particular shard (default: 0) - - ``soda_metadata_cache``: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: a boolean indicating whether or not the SODA metadata cache should be enabled (default: False) - - ``ping_interval``: length of time (in seconds) after which an unused + - ``ping_interval``: the length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by @@ -1365,12 +1389,12 @@ def create_pool_async( functionality will be disabled (default: 60) - - ``ping_timeout``: maximum length of time (in milliseconds) to wait for a - connection in the pool to respond to an internal ping to the database - before being discarded and replaced during a call to acquire() + - ``ping_timeout``: the maximum length of time (in milliseconds) to wait + for a connection in the pool to respond to an internal ping to the + database before being discarded and replaced during a call to acquire() (default: 5000) - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this value @@ -1378,30 +1402,30 @@ def create_pool_async( "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new password + will take effect immediately upon a successful connection to the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in thin mode + encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. If - it is a string, it specifies an Azure AD OAuth2 token used for Open - Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, - it specifies the token and private key strings used for Oracle Cloud - Infrastructure (OCI) Identity and Access Management (IAM) token based - authentication. If it is a callable, it returns either a string or a - 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is - useful when the pool needs to expand and create new connections but the - current authentication token has expired + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open Authorization + (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies + the token and private key strings used for Oracle Cloud Infrastructure + (OCI) Identity and Access Management (IAM) token based authentication. If + it is a callable, it returns either a string or a 2-tuple used for OAuth + 2.0 or OCI IAM token based authentication and is useful when the pool + needs to expand and create new connections but the current authentication + token has expired (default: None) - - ``host``: the name or IP address of the machine hosting the database or - the database listener + - ``host``: the hostname or IP address of the machine hosting the database + or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -1411,7 +1435,7 @@ def create_pool_async( use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for + - ``https_proxy``: the hostname or IP address of a proxy host to use for tunneling secure connections (default: None) @@ -1430,35 +1454,37 @@ def create_pool_async( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" or - "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection + - ``cclass``: the connection class to use for Database Resident Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident Connection + Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between the - sending of keepalive probes. If this parameter is set to a value greater - than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of keepalive + probes. If this parameter is set to a value greater than zero it enables + keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should be - retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of seconds - to wait for establishing a connection to the database host + to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is @@ -1471,50 +1497,60 @@ def create_pool_async( for any verfication. Otherwise the hostname will be used (default: None) - - ``wallet_location``: the directory where the wallet can be found. In thin - mode this must be the directory containing the PEM-encoded wallet file - ewallet.pem. In thick mode this must be the directory containing the file - cwallet.sso + - ``wallet_location``: the directory where the wallet can be found. In + python-oracledb Thin mode this must be the directory containing the PEM- + encoded wallet file ewallet.pem. In python-oracledb Thick mode this must + be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous query - notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be enabled. + This value is only used in python-oracledb Thick mode and is needed for + continuous query notification and high availability event notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should be - disabled. This value is only used in thin mode. It has no effect on - Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks should + be disabled. This value is only used in python-oracledb Thin mode. It has + no effect on Windows which does not support this functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned from a - pool. This value is only used in thick mode + pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in thick - mode + - ``matchanytag``: a boolean specifying whether any tag can be used when + acquiring a connection from the pool. This value is only used in python- + oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. For - thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python-oracledb + Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the tuple @@ -1522,18 +1558,19 @@ def create_pool_async( (default: None) - ``shardingkey``: a list of strings, numbers, bytes or dates that identify - the database shard to connect to. This value is only used in thick mode + the database shard to connect to. This value is only used in python- + oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value is only - used in thin mode. For thick mode set the ORA_DEBUG_JDWP environment - variable + used in python-oracledb Thin mode. For python-oracledb Thick mode set + the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is added to @@ -1559,7 +1596,7 @@ def create_pool_async( requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast open. + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -1569,36 +1606,42 @@ def create_pool_async( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application connected - to the Oracle Database - (default: oracledb.defaults.program) + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) + - ``osuser``: a string recorded by Oracle Database as the operating system + user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) + - ``driver_name``: a string recorded by Oracle Database as the name of the + driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) - - ``use_sni``: boolean indicating whether to use the TLS SNI extension to + - ``use_sni``: a boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass the + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without parsing - by the driver. Setting this to False makes thick and thin mode - applications behave similarly regarding connection string parameter + by the driver. Setting this to False makes python-oracledb Thick and Thin + mode applications behave similarly regarding connection string parameter handling and locating any optional tnsnames.ora configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using plugins, such as the @@ -1606,12 +1649,12 @@ def create_pool_async( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP with - Oracle Database 23.4 or higher + Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service context - handle. This value is only used in thick mode. It should be used with - extreme caution + handle. This value is only used in python-oracledb Thick mode. It should + be used with extreme caution (default: 0) """ pass diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index dc679111..a9f7a680 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -134,7 +134,7 @@ def __init__( (default: 2) - ``increment``: the number of connections that should be added to the - pool whenever a new connection needs to be created + pool whenever the pool needs to grow (default: 1) - ``connectiontype``: the class of the connection that should be @@ -143,25 +143,27 @@ def __init__( (default: None) - ``getmode``: how pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT - (default: oracledb.POOL_GETMODE_WAIT) - - - ``homogeneous``: a boolean indicating whether the connections are - homogeneous (same user) or heterogeneous (multiple users) + :data:`oracledb.POOL_GETMODE_WAIT`, + :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` + (default: :attr:`oracledb.POOL_GETMODE_WAIT`) + + - ``homogeneous``: a boolean indicating whether the connections in the + pool are homogeneous (same user) or heterogeneous (multiple users) (default: True) - - ``timeout``: length of time (in seconds) that a connection may remain - idle in the pool before it is terminated. If it is 0 then connections - are never terminated + - ``timeout``: the length of time (in seconds) that a connection may + remain idle in the pool before it is terminated. If it is 0 then + connections are never terminated (default: 0) - - ``wait_timeout``: length of time (in milliseconds) that a caller + - ``wait_timeout``: the length of time (in milliseconds) that a caller should wait when acquiring a connection from the pool with getmode - set to oracledb.POOL_GETMODE_TIMEDWAIT + set to :data:`oracledb.POOL_GETMODE_TIMEDWAIT` (default: 0) - - ``max_lifetime_session``: length of time (in seconds) that + - ``max_lifetime_session``: the length of time (in seconds) that connections can remain in the pool. If it is 0 then connections may remain in the pool indefinitely (default: 0) @@ -175,25 +177,25 @@ def __init__( may be associated with a particular shard (default: 0) - - ``soda_metadata_cache``: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: a boolean indicating whether or not the SODA metadata cache should be enabled (default: False) - - ``ping_interval``: length of time (in seconds) after which an unused - connection in the pool will be a candidate for pinging when + - ``ping_interval``: the length of time (in seconds) after which an + unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping functionality will be disabled (default: 60) - - ``ping_timeout``: maximum length of time (in milliseconds) to wait - for a connection in the pool to respond to an internal ping to the - database before being discarded and replaced during a call to + - ``ping_timeout``: the maximum length of time (in milliseconds) to + wait for a connection in the pool to respond to an internal ping to + the database before being discarded and replaced during a call to acquire() (default: 5000) - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to (default: None) - ``proxy_user``: the name of the proxy user to connect to. If this @@ -201,20 +203,21 @@ def __init__( the form "user[proxy_user]" (default: None) - - ``password``: the password for the user + - ``password``: the password for the database user (default: None) - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new + password will take effect immediately upon a successful connection to + the database (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in thin mode + is encrypted. This value is only used in python-oracledb Thin mode (default: None) - - ``access_token``: expected to be a string or a 2-tuple or a callable. - If it is a string, it specifies an Azure AD OAuth2 token used for - Open Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open + Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -224,8 +227,8 @@ def __init__( expired (default: None) - - ``host``: the name or IP address of the machine hosting the database - or the database listener + - ``host``: the hostname or IP address of the machine hosting the + database or the database listener (default: None) - ``port``: the port number on which the database listener is listening @@ -235,8 +238,8 @@ def __init__( to use unencrypted network traffic or encrypted network traffic (TLS) (default: "tcp") - - ``https_proxy``: the name or IP address of a proxy host to use for - tunneling secure connections + - ``https_proxy``: the hostname or IP address of a proxy host to use + for tunneling secure connections (default: None) - ``https_proxy_port``: the port on which to communicate with the proxy @@ -254,36 +257,37 @@ def __init__( (default: None) - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" - or "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" (default: None) - - ``cclass``: connection class to use for Database Resident Connection - Pooling (DRCP) + - ``cclass``: the connection class to use for Database Resident + Connection Pooling (DRCP) (default: None) - - ``purity``: purity to use for Database Resident Connection Pooling - (DRCP) - (default: oracledb.PURITY_DEFAULT) + - ``purity``: the connection purity to use for Database Resident + Connection Pooling (DRCP) + (default: :attr:`oracledb.PURITY_DEFAULT`) - - ``expire_time``: an integer indicating the number of minutes between - the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of + keepalive probes. If this parameter is set to a value greater than + zero it enables keepalive (default: 0) - - ``retry_count``: the number of times that a connection attempt should - be retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated (default: 0) - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection (default: 1) - ``tcp_connect_timeout``: a float indicating the maximum number of - seconds to wait for establishing a connection to the database host + seconds to wait when establishing a connection to the database host (default: 20.0) - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching @@ -298,49 +302,62 @@ def __init__( (default: None) - ``wallet_location``: the directory where the wallet can be found. In - thin mode this must be the directory containing the PEM-encoded - wallet file ewallet.pem. In thick mode this must be the directory - containing the file cwallet.sso + python-oracledb Thin mode this must be the directory containing the + PEM-encoded wallet file ewallet.pem. In python-oracledb Thick mode + this must be the directory containing the file cwallet.sso (default: None) - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous - query notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be + enabled. This value is only used in python-oracledb Thick mode and is + needed for continuous query notification and high availability event + notifications (default: False) - ``externalauth``: a boolean indicating whether to use external authentication (default: False) - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA - (default: oracledb.AUTH_MODE_DEFAULT) - - - ``disable_oob``: boolean indicating whether out-of-band breaks should - be disabled. This value is only used in thin mode. It has no effect - on Windows which does not support this functionality + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, + :data:`oracledb.AUTH_MODE_PRELIM`, :data:`oracledb.AUTH_MODE_SYSASM`, + :data:`oracledb.AUTH_MODE_SYSBKP`, :data:`oracledb.AUTH_MODE_SYSDBA`, + :data:`oracledb.AUTH_MODE_SYSDGD`, :data:`oracledb.AUTH_MODE_SYSKMT`, + :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` + (default: :attr:`oracledb.AUTH_MODE_DEFAULT`) + + - ``disable_oob``: a boolean indicating whether out-of-band breaks + should be disabled. This value is only used in python-oracledb Thin + mode. It has no effect on Windows which does not support this + functionality (default: False) - - ``stmtcachesize``: identifies the initial size of the statement cache - (default: oracledb.defaults.stmtcachesize) + - ``stmtcachesize``: the size of the statement cache + (default: :attr:`oracledb.defaults.stmtcachesize + `) - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter (default: None) - ``tag``: identifies the type of connection that should be returned - from a pool. This value is only used in thick mode + from a pool. This value is only used in python-oracledb Thick mode (default: None) - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in - thick mode + - ``matchanytag``: a boolean specifying whether any tag can be used + when acquiring a connection from the pool. This value is only used in + python-oracledb Thick mode (default: False) - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. - For thick mode use the config_dir parameter of init_oracle_client() - (default: oracledb.defaults.config_dir) + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python- + oracledb Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` + (default: :attr:`oracledb.defaults.config_dir + `) - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the @@ -349,18 +366,18 @@ def __init__( - ``shardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode (default: None) - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value - is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable + is only used in python-oracledb Thin mode. For python-oracledb Thick + mode set the ORA_DEBUG_JDWP environment variable (default: None) - ``connection_id_prefix``: an application specific prefix that is @@ -387,7 +404,7 @@ def __init__( This requires the use of DRCP with Oracle Database 23.4 or higher (default: None) - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -397,37 +414,44 @@ def __init__( ssl.TLSVersion.TLSv1_3 indicating which TLS version to use (default: None) - - ``program``: the name of the executable program or application - connected to the Oracle Database - (default: oracledb.defaults.program) - - - ``machine``: the machine name of the client connecting to the Oracle - Database - (default: oracledb.defaults.machine) - - - ``terminal``: the terminal identifier from which the connection - originates - (default: oracledb.defaults.terminal) - - - ``osuser``: the operating system user that initiates the database - connection - (default: oracledb.defaults.osuser) - - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database - (default: oracledb.defaults.driver_name) - - - ``use_sni``: boolean indicating whether to use the TLS SNI extension - to bypass the second TLS neogiation that would otherwise be required + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates + (default: :attr:`oracledb.defaults.program + `) + + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates + (default: :attr:`oracledb.defaults.machine + `) + + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates + (default: :attr:`oracledb.defaults.terminal + `) + + - ``osuser``: a string recorded by Oracle Database as the operating + system user who originated the connection + (default: :attr:`oracledb.defaults.osuser + `) + + - ``driver_name``: a string recorded by Oracle Database as the name of + the driver which originated the connection + (default: :attr:`oracledb.defaults.driver_name + `) + + - ``use_sni``: a boolean indicating whether to use the TLS SNI + extension to bypass the second TLS neogiation that would otherwise be + required (default: False) - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without - parsing by the driver. Setting this to False makes thick and thin - mode applications behave similarly regarding connection string - parameter handling and locating any optional tnsnames.ora - configuration file - (default: oracledb.defaults.thick_mode_dsn_passthrough) + parsing by the driver. Setting this to False makes python-oracledb + Thick and Thin mode applications behave similarly regarding + connection string parameter handling and locating any optional + tnsnames.ora configuration file + (default: :attr:`oracledb.defaults.thick_mode_dsn_passthrough + `) - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using @@ -436,12 +460,12 @@ def __init__( (default: None) - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP - with Oracle Database 23.4 or higher + with Oracle Database 23.4, or higher (default: None) - ``handle``: an integer representing a pointer to a valid service - context handle. This value is only used in thick mode. It should be - used with extreme caution + context handle. This value is only used in python-oracledb Thick + mode. It should be used with extreme caution (default: 0) """ pass @@ -527,24 +551,26 @@ def connectiontype(self) -> Type["oracledb.Connection"]: def getmode(self) -> oracledb.PoolGetMode: """ How pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT. + :data:`oracledb.POOL_GETMODE_WAIT`, + :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. """ return oracledb.PoolGetMode(self._impl.getmode) @property def homogeneous(self) -> bool: """ - A boolean indicating whether the connections are homogeneous (same - user) or heterogeneous (multiple users). + A boolean indicating whether the connections in the pool are + homogeneous (same user) or heterogeneous (multiple users). """ return self._impl.homogeneous @property def increment(self) -> int: """ - The number of connections that should be added to the pool whenever a - new connection needs to be created. + The number of connections that should be added to the pool whenever the + pool needs to grow. """ return self._impl.increment @@ -558,8 +584,8 @@ def max(self) -> int: @property def max_lifetime_session(self) -> int: """ - Length of time (in seconds) that connections can remain in the pool. If - it is 0 then connections may remain in the pool indefinitely. + The length of time (in seconds) that connections can remain in the + pool. If it is 0 then connections may remain in the pool indefinitely. """ return self._impl.max_lifetime_session @@ -581,7 +607,7 @@ def min(self) -> int: @property def ping_interval(self) -> int: """ - Length of time (in seconds) after which an unused connection in the + The length of time (in seconds) after which an unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If @@ -593,8 +619,8 @@ def ping_interval(self) -> int: @property def ping_timeout(self) -> int: """ - Maximum length of time (in milliseconds) to wait for a connection in - the pool to respond to an internal ping to the database before being + The maximum length of time (in milliseconds) to wait for a connection + in the pool to respond to an internal ping to the database before being discarded and replaced during a call to acquire(). """ return self._impl.ping_timeout @@ -611,7 +637,7 @@ def session_callback(self) -> Callable: @property def soda_metadata_cache(self) -> bool: """ - Boolean indicating whether or not the SODA metadata cache should be + A boolean indicating whether or not the SODA metadata cache should be enabled. """ return self._impl.soda_metadata_cache @@ -619,8 +645,8 @@ def soda_metadata_cache(self) -> bool: @property def timeout(self) -> int: """ - Length of time (in seconds) that a connection may remain idle in the - pool before it is terminated. If it is 0 then connections are never + The length of time (in seconds) that a connection may remain idle in + the pool before it is terminated. If it is 0 then connections are never terminated. """ return self._impl.timeout @@ -628,9 +654,9 @@ def timeout(self) -> int: @property def wait_timeout(self) -> int: """ - Length of time (in milliseconds) that a caller should wait when + The length of time (in milliseconds) that a caller should wait when acquiring a connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT. + :data:`oracledb.POOL_GETMODE_TIMEDWAIT`. """ return self._impl.wait_timeout @@ -723,28 +749,30 @@ def set( - ``max``: the maximum number of connections the pool should contain - ``increment``: the number of connections that should be added to the - pool whenever a new connection needs to be created + pool whenever the pool needs to grow - ``connectiontype``: the class of the connection that should be returned during calls to pool.acquire(). It must be oracledb.Connection or a subclass of oracledb.Connection - ``getmode``: how pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT + :data:`oracledb.POOL_GETMODE_WAIT`, + :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` - - ``homogeneous``: a boolean indicating whether the connections are - homogeneous (same user) or heterogeneous (multiple users) + - ``homogeneous``: a boolean indicating whether the connections in the + pool are homogeneous (same user) or heterogeneous (multiple users) - - ``timeout``: length of time (in seconds) that a connection may remain - idle in the pool before it is terminated. If it is 0 then connections - are never terminated + - ``timeout``: the length of time (in seconds) that a connection may + remain idle in the pool before it is terminated. If it is 0 then + connections are never terminated - - ``wait_timeout``: length of time (in milliseconds) that a caller + - ``wait_timeout``: the length of time (in milliseconds) that a caller should wait when acquiring a connection from the pool with getmode - set to oracledb.POOL_GETMODE_TIMEDWAIT + set to :data:`oracledb.POOL_GETMODE_TIMEDWAIT` - - ``max_lifetime_session``: length of time (in seconds) that + - ``max_lifetime_session``: the length of time (in seconds) that connections can remain in the pool. If it is 0 then connections may remain in the pool indefinitely @@ -755,38 +783,39 @@ def set( - ``max_sessions_per_shard``: the maximum number of connections that may be associated with a particular shard - - ``soda_metadata_cache``: boolean indicating whether or not the SODA + - ``soda_metadata_cache``: a boolean indicating whether or not the SODA metadata cache should be enabled - - ``ping_interval``: length of time (in seconds) after which an unused - connection in the pool will be a candidate for pinging when + - ``ping_interval``: the length of time (in seconds) after which an + unused connection in the pool will be a candidate for pinging when pool.acquire() is called. If the ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping functionality will be disabled - - ``ping_timeout``: maximum length of time (in milliseconds) to wait - for a connection in the pool to respond to an internal ping to the - database before being discarded and replaced during a call to + - ``ping_timeout``: the maximum length of time (in milliseconds) to + wait for a connection in the pool to respond to an internal ping to + the database before being discarded and replaced during a call to acquire() - - ``user``: the name of the user to connect to + - ``user``: the name of the database user to connect to - ``proxy_user``: the name of the proxy user to connect to. If this value is not specified, it will be parsed out of user if user is in the form "user[proxy_user]" - - ``password``: the password for the user + - ``password``: the password for the database user - - ``newpassword``: the new password for the user. The new password will - take effect immediately upon a successful connection to the database + - ``newpassword``: a new password for the database user. The new + password will take effect immediately upon a successful connection to + the database - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in thin mode + is encrypted. This value is only used in python-oracledb Thin mode - - ``access_token``: expected to be a string or a 2-tuple or a callable. - If it is a string, it specifies an Azure AD OAuth2 token used for - Open Authorization (OAuth 2.0) token based authentication. If it is a + - ``access_token``: a string, or a 2-tuple, or a callable. If it is a + string, it specifies an Entra ID OAuth2 token used for Open + Authorization (OAuth 2.0) token based authentication. If it is a 2-tuple, it specifies the token and private key strings used for Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) token based authentication. If it is a callable, it returns @@ -795,16 +824,16 @@ def set( create new connections but the current authentication token has expired - - ``host``: the name or IP address of the machine hosting the database - or the database listener + - ``host``: the hostname or IP address of the machine hosting the + database or the database listener - ``port``: the port number on which the database listener is listening - ``protocol``: one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic or encrypted network traffic (TLS) - - ``https_proxy``: the name or IP address of a proxy host to use for - tunneling secure connections + - ``https_proxy``: the hostname or IP address of a proxy host to use + for tunneling secure connections - ``https_proxy_port``: the port on which to communicate with the proxy host @@ -817,29 +846,30 @@ def set( service_name instead is recommended - ``server_type``: the type of server connection that should be - established. If specified, it should be one of "dedicated", "shared" - or "pooled" + established. If specified, it should be one of strings "dedicated", + "shared" or "pooled" - - ``cclass``: connection class to use for Database Resident Connection - Pooling (DRCP) + - ``cclass``: the connection class to use for Database Resident + Connection Pooling (DRCP) - - ``purity``: purity to use for Database Resident Connection Pooling - (DRCP) + - ``purity``: the connection purity to use for Database Resident + Connection Pooling (DRCP) - - ``expire_time``: an integer indicating the number of minutes between - the sending of keepalive probes. If this parameter is set to a value - greater than zero it enables keepalive + - ``expire_time``: the number of minutes between the sending of + keepalive probes. If this parameter is set to a value greater than + zero it enables keepalive - - ``retry_count``: the number of times that a connection attempt should - be retried before the attempt is terminated + - ``retry_count``: the number of times that initial connection + establishment should be retried before the connection attempt is + terminated - - ``retry_delay``: the number of seconds to wait before making a new - connection attempt + - ``retry_delay``: the number of seconds to wait before retrying to + establish a connection - ``tcp_connect_timeout``: a float indicating the maximum number of - seconds to wait for establishing a connection to the database host + seconds to wait when establishing a connection to the database host - - ``ssl_server_dn_match``: boolean indicating whether the server + - ``ssl_server_dn_match``: a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching @@ -852,39 +882,50 @@ def set( hostname will be used - ``wallet_location``: the directory where the wallet can be found. In - thin mode this must be the directory containing the PEM-encoded - wallet file ewallet.pem. In thick mode this must be the directory - containing the file cwallet.sso + python-oracledb Thin mode this must be the directory containing the + PEM-encoded wallet file ewallet.pem. In python-oracledb Thick mode + this must be the directory containing the file cwallet.sso - - ``events``: boolean specifying whether events mode should be enabled. - This value is only used in thick mode and is needed for continuous - query notification and high availability event notifications + - ``events``: a boolean specifying whether events mode should be + enabled. This value is only used in python-oracledb Thick mode and is + needed for continuous query notification and high availability event + notifications - ``externalauth``: a boolean indicating whether to use external authentication - - ``mode``: authorization mode to use. For example - oracledb.AUTH_MODE_SYSDBA + - ``mode``: the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, + :data:`oracledb.AUTH_MODE_PRELIM`, :data:`oracledb.AUTH_MODE_SYSASM`, + :data:`oracledb.AUTH_MODE_SYSBKP`, :data:`oracledb.AUTH_MODE_SYSDBA`, + :data:`oracledb.AUTH_MODE_SYSDGD`, :data:`oracledb.AUTH_MODE_SYSKMT`, + :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` - - ``disable_oob``: boolean indicating whether out-of-band breaks should - be disabled. This value is only used in thin mode. It has no effect - on Windows which does not support this functionality + - ``disable_oob``: a boolean indicating whether out-of-band breaks + should be disabled. This value is only used in python-oracledb Thin + mode. It has no effect on Windows which does not support this + functionality - - ``stmtcachesize``: identifies the initial size of the statement cache + - ``stmtcachesize``: the size of the statement cache - ``edition``: edition to use for the connection. This parameter cannot be used simultaneously with the cclass parameter - ``tag``: identifies the type of connection that should be returned - from a pool. This value is only used in thick mode + from a pool. This value is only used in python-oracledb Thick mode - - ``matchanytag``: boolean specifying whether any tag can be used when - acquiring a connection from the pool. This value is only used in - thick mode + - ``matchanytag``: a boolean specifying whether any tag can be used + when acquiring a connection from the pool. This value is only used in + python-oracledb Thick mode - - ``config_dir``: directory in which the optional tnsnames.ora - configuration file is located. This value is only used in thin mode. - For thick mode use the config_dir parameter of init_oracle_client() + - ``config_dir``: a directory in which the optional tnsnames.ora + configuration file is located. This value is only used in python- + oracledb Thin mode. For python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick + mode use the ``config_dir`` parameter of + :meth:`oracledb.init_oracle_client()` - ``appcontext``: application context used by the connection. It should be a list of 3-tuples (namespace, name, value) and each entry in the @@ -892,16 +933,16 @@ def set( - ``shardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode - ``supershardingkey``: a list of strings, numbers, bytes or dates that identify the database shard to connect to. This value is only used in - thick mode + python-oracledb Thick mode - ``debug_jdwp``: a string with the format "host=;port=" that specifies the host and port of the PL/SQL debugger. This value - is only used in thin mode. For thick mode set the ORA_DEBUG_JDWP - environment variable + is only used in python-oracledb Thin mode. For python-oracledb Thick + mode set the ORA_DEBUG_JDWP environment variable - ``connection_id_prefix``: an application specific prefix that is added to the connection identifier used for tracing @@ -923,7 +964,7 @@ def set( indicating when pooled DRCP connections can be returned to the pool. This requires the use of DRCP with Oracle Database 23.4 or higher - - ``use_tcp_fast_open``: boolean indicating whether to use TCP fast + - ``use_tcp_fast_open``: a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -931,30 +972,31 @@ def set( - ``ssl_version``: one of the values ssl.TLSVersion.TLSv1_2 or ssl.TLSVersion.TLSv1_3 indicating which TLS version to use - - ``program``: the name of the executable program or application - connected to the Oracle Database + - ``program``: a string recorded by Oracle Database as the program from + which the connection originates - - ``machine``: the machine name of the client connecting to the Oracle - Database + - ``machine``: a string recorded by Oracle Database as the name of the + machine from which the connection originates - - ``terminal``: the terminal identifier from which the connection - originates + - ``terminal``: a string recorded by Oracle Database as the terminal + identifier from which the connection originates - - ``osuser``: the operating system user that initiates the database - connection + - ``osuser``: a string recorded by Oracle Database as the operating + system user who originated the connection - - ``driver_name``: the driver name used by the client to connect to the - Oracle Database + - ``driver_name``: a string recorded by Oracle Database as the name of + the driver which originated the connection - - ``use_sni``: boolean indicating whether to use the TLS SNI extension - to bypass the second TLS neogiation that would otherwise be required + - ``use_sni``: a boolean indicating whether to use the TLS SNI + extension to bypass the second TLS neogiation that would otherwise be + required - - ``thick_mode_dsn_passthrough``: boolean indicating whether to pass + - ``thick_mode_dsn_passthrough``: a boolean indicating whether to pass the connect string to the Oracle Client libraries unchanged without - parsing by the driver. Setting this to False makes thick and thin - mode applications behave similarly regarding connection string - parameter handling and locating any optional tnsnames.ora - configuration file + parsing by the driver. Setting this to False makes python-oracledb + Thick and Thin mode applications behave similarly regarding + connection string parameter handling and locating any optional + tnsnames.ora configuration file - ``extra_auth_params``: a dictionary containing configuration parameters necessary for Oracle Database authentication using @@ -962,10 +1004,10 @@ def set( plugins - ``pool_name``: the name of the DRCP pool when using multi-pool DRCP - with Oracle Database 23.4 or higher + with Oracle Database 23.4, or higher - ``handle``: an integer representing a pointer to a valid service - context handle. This value is only used in thick mode. It should be - used with extreme caution + context handle. This value is only used in python-oracledb Thick + mode. It should be used with extreme caution """ pass diff --git a/utils/build_from_template.py b/utils/build_from_template.py index e9e61ee9..a0603c59 100644 --- a/utils/build_from_template.py +++ b/utils/build_from_template.py @@ -109,7 +109,16 @@ def get_help_string( width=TEXT_WIDTH, ) if with_default: - help_string += f"\n{indent} (default: {self.default})" + if self.default.startswith("oracledb.defaults"): + default_attr_name = self.default.split(".")[-1] + help_string += ( + f"\n{indent} (default: :attr:`{self.default}" + f"\n{indent} `)" + ) + elif self.default.startswith("oracledb."): + help_string += f"\n{indent} (default: :attr:`{self.default}`)" + else: + help_string += f"\n{indent} (default: {self.default})" return help_string diff --git a/utils/fields.cfg b/utils/fields.cfg index 5f03c191..18d5abd9 100644 --- a/utils/fields.cfg +++ b/utils/fields.cfg @@ -49,8 +49,8 @@ type = int default: 1 pool_only: True description = - the number of connections that should be added to the pool whenever a new - connection needs to be created + the number of connections that should be added to the pool whenever the + pool needs to grow [connectiontype] type = Type["oracledb.Connection"] @@ -66,41 +66,43 @@ default = oracledb.POOL_GETMODE_WAIT pool_only: True description = how pool.acquire() will behave. One of the constants - oracledb.POOL_GETMODE_WAIT, oracledb.POOL_GETMODE_NOWAIT, - oracledb.POOL_GETMODE_FORCEGET, or oracledb.POOL_GETMODE_TIMEDWAIT + :data:`oracledb.POOL_GETMODE_WAIT`, :data:`oracledb.POOL_GETMODE_NOWAIT`, + :data:`oracledb.POOL_GETMODE_FORCEGET`, or + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` [homogeneous] type = bool default = True pool_only: True description = - a boolean indicating whether the connections are homogeneous (same user) or - heterogeneous (multiple users) + a boolean indicating whether the connections in the pool are homogeneous + (same user) or heterogeneous (multiple users) [timeout] type = int default = 0 pool_only: True description = - length of time (in seconds) that a connection may remain idle in the pool - before it is terminated. If it is 0 then connections are never terminated + the length of time (in seconds) that a connection may remain idle in the + pool before it is terminated. If it is 0 then connections are never + terminated [wait_timeout] type = int default = 0 pool_only: True description = - length of time (in milliseconds) that a caller should wait when acquiring a - connection from the pool with getmode set to - oracledb.POOL_GETMODE_TIMEDWAIT + the length of time (in milliseconds) that a caller should wait when + acquiring a connection from the pool with getmode set to + :data:`oracledb.POOL_GETMODE_TIMEDWAIT` [max_lifetime_session] type = int default = 0 pool_only: True description = - length of time (in seconds) that connections can remain in the pool. If it - is 0 then connections may remain in the pool indefinitely + the length of time (in seconds) that connections can remain in the pool. If + it is 0 then connections may remain in the pool indefinitely [session_callback] type = Callable @@ -122,16 +124,16 @@ type = bool default = False pool_only: True description = - boolean indicating whether or not the SODA metadata cache should be enabled + a boolean indicating whether or not the SODA metadata cache should be enabled [ping_interval] type = int default = 60 pool_only: True description = - length of time (in seconds) after which an unused connection in the pool - will be a candidate for pinging when pool.acquire() is called. If the ping - to the database indicates the connection is not alive a replacement + the length of time (in seconds) after which an unused connection in the + pool will be a candidate for pinging when pool.acquire() is called. If the + ping to the database indicates the connection is not alive a replacement connection will be returned by pool.acquire(). If ping_interval is a negative value the ping functionality will be disabled @@ -140,9 +142,9 @@ type = int default = 5000 pool_only: True description = - maximum length of time (in milliseconds) to wait for a connection in the - pool to respond to an internal ping to the database before being discarded - and replaced during a call to acquire() + the maximum length of time (in milliseconds) to wait for a connection in + the pool to respond to an internal ping to the database before being + discarded and replaced during a call to acquire() # common parameters @@ -150,7 +152,7 @@ description = [user] type = str description = - the name of the user to connect to + the name of the database user to connect to [proxy_user] type = str @@ -162,13 +164,13 @@ description = type = str hidden = True description = - the password for the user + the password for the database user [newpassword] type = str hidden = True description = - the new password for the user. The new password will take effect + a new password for the database user. The new password will take effect immediately upon a successful connection to the database [wallet_password] @@ -176,17 +178,17 @@ type = str hidden = True description = the password to use to decrypt the wallet, if it is encrypted. This value - is only used in thin mode + is only used in python-oracledb Thin mode [access_token] type = Union[str, tuple, Callable] hidden = True description = - expected to be a string or a 2-tuple or a callable. If it is a string, it - specifies an Azure AD OAuth2 token used for Open Authorization (OAuth 2.0) - token based authentication. If it is a 2-tuple, it specifies the token and - private key strings used for Oracle Cloud Infrastructure (OCI) Identity and - Access Management (IAM) token based authentication. If it is a callable, it + a string, or a 2-tuple, or a callable. If it is a string, it specifies an + Entra ID OAuth2 token used for Open Authorization (OAuth 2.0) token based + authentication. If it is a 2-tuple, it specifies the token and private key + strings used for Oracle Cloud Infrastructure (OCI) Identity and Access + Management (IAM) token based authentication. If it is a callable, it returns either a string or a 2-tuple used for OAuth 2.0 or OCI IAM token based authentication and is useful when the pool needs to expand and create new connections but the current authentication token has expired @@ -195,8 +197,8 @@ description = type = str source = address description = - the name or IP address of the machine hosting the database or the database - listener + the hostname or IP address of the machine hosting the database or the + database listener [port] type = int @@ -217,7 +219,7 @@ description = type = str source = address description = - the name or IP address of a proxy host to use for tunneling secure + the hostname or IP address of a proxy host to use for tunneling secure connections [https_proxy_port] @@ -251,59 +253,59 @@ type = str source = description description = the type of server connection that should be established. If specified, it - should be one of "dedicated", "shared" or "pooled" + should be one of strings "dedicated", "shared" or "pooled" [cclass] type = str source = description description = - connection class to use for Database Resident Connection Pooling (DRCP) + the connection class to use for Database Resident Connection Pooling (DRCP) [purity] type = oracledb.Purity default = oracledb.PURITY_DEFAULT source = description description = - purity to use for Database Resident Connection Pooling (DRCP) + the connection purity to use for Database Resident Connection Pooling + (DRCP) [expire_time] type = int default = 0 source = description description = - an integer indicating the number of minutes between the sending of - keepalive probes. If this parameter is set to a value greater than zero it - enables keepalive + the number of minutes between the sending of keepalive probes. If this + parameter is set to a value greater than zero it enables keepalive [retry_count] type = int default = 0 source = description description = - the number of times that a connection attempt should be retried before the - attempt is terminated + the number of times that initial connection establishment should be retried + before the connection attempt is terminated [retry_delay] type = int default = 1 source = description description = - the number of seconds to wait before making a new connection attempt + the number of seconds to wait before retrying to establish a connection [tcp_connect_timeout] type = float default = 20.0 source = description description = - a float indicating the maximum number of seconds to wait for establishing a - connection to the database host + a float indicating the maximum number of seconds to wait when establishing + a connection to the database host [ssl_server_dn_match] type = bool default = True source = description description = - boolean indicating whether the server certificate distinguished name (DN) + a boolean indicating whether the server certificate distinguished name (DN) should be matched in addition to the regular certificate verification that is performed. Note that if the ssl_server_cert_dn parameter is not privided, host name matching is performed instead @@ -321,17 +323,19 @@ description = type = str source = description description = - the directory where the wallet can be found. In thin mode this must be the - directory containing the PEM-encoded wallet file ewallet.pem. In thick mode - this must be the directory containing the file cwallet.sso + + the directory where the wallet can be found. In python-oracledb Thin mode + this must be the directory containing the PEM-encoded wallet file + ewallet.pem. In python-oracledb Thick mode this must be the directory + containing the file cwallet.sso [events] type = bool default = False description = - boolean specifying whether events mode should be enabled. This value is - only used in thick mode and is needed for continuous query notification and - high availability event notifications + a boolean specifying whether events mode should be enabled. This value is + only used in python-oracledb Thick mode and is needed for continuous query + notification and high availability event notifications [externalauth] type = bool @@ -343,21 +347,26 @@ description = type = oracledb.AuthMode default = oracledb.AUTH_MODE_DEFAULT description = - authorization mode to use. For example oracledb.AUTH_MODE_SYSDBA + the authorization mode to use. One of the constants + :data:`oracledb.AUTH_MODE_DEFAULT`, :data:`oracledb.AUTH_MODE_PRELIM`, + :data:`oracledb.AUTH_MODE_SYSASM`, :data:`oracledb.AUTH_MODE_SYSBKP`, + :data:`oracledb.AUTH_MODE_SYSDBA`, :data:`oracledb.AUTH_MODE_SYSDGD`, + :data:`oracledb.AUTH_MODE_SYSKMT`, :data:`oracledb.AUTH_MODE_SYSOPER`, or + :data:`oracledb.AUTH_MODE_SYSRAC` [disable_oob] type = bool default = False description = - boolean indicating whether out-of-band breaks should be disabled. This - value is only used in thin mode. It has no effect on Windows which does - not support this functionality + a boolean indicating whether out-of-band breaks should be disabled. This + value is only used in python-oracledb Thin mode. It has no effect on + Windows which does not support this functionality [stmtcachesize] type = int default = oracledb.defaults.stmtcachesize description = - identifies the initial size of the statement cache + the size of the statement cache [edition] type = str @@ -369,22 +378,26 @@ description = type = str description = identifies the type of connection that should be returned from a pool. - This value is only used in thick mode + This value is only used in python-oracledb Thick mode [matchanytag] type = bool default: False description = - boolean specifying whether any tag can be used when acquiring a connection - from the pool. This value is only used in thick mode + a boolean specifying whether any tag can be used when acquiring a + connection from the pool. This value is only used in python-oracledb Thick + mode [config_dir] type = str default: oracledb.defaults.config_dir description = - directory in which the optional tnsnames.ora configuration file is located. - This value is only used in thin mode. For thick mode use the config_dir - parameter of init_oracle_client() + a directory in which the optional tnsnames.ora configuration file is + located. This value is only used in python-oracledb Thin mode. For + python-oracledb Thick mode, it is used if + :attr:`oracledb.defaults.thick_mode_dsn_passthrough + ` is *False*. Otherwise in Thick mode + use the ``config_dir`` parameter of :meth:`oracledb.init_oracle_client()` [appcontext] type = list @@ -397,20 +410,21 @@ description = type = list description = a list of strings, numbers, bytes or dates that identify the database shard - to connect to. This value is only used in thick mode + to connect to. This value is only used in python-oracledb Thick mode [supershardingkey] type = list description = a list of strings, numbers, bytes or dates that identify the database - shard to connect to. This value is only used in thick mode + shard to connect to. This value is only used in python-oracledb Thick mode [debug_jdwp] type = str description = a string with the format "host=;port=" that specifies the host - and port of the PL/SQL debugger. This value is only used in thin mode. - For thick mode set the ORA_DEBUG_JDWP environment variable + and port of the PL/SQL debugger. This value is only used in python-oracledb + Thin mode. For python-oracledb Thick mode set the ORA_DEBUG_JDWP + environment variable [connection_id_prefix] type = str @@ -452,7 +466,7 @@ type = bool default = False source = description description = - boolean indicating whether to use TCP fast open. This is an Oracle + a boolean indicating whether to use TCP fast open. This is an Oracle Autonomous Database Serverless (ADB-S) specific property for clients connecting from within OCI Cloud network. Please refer to the ADB-S documentation for more information @@ -468,49 +482,54 @@ description = type = str default: oracledb.defaults.program description = - the name of the executable program or application connected to the Oracle Database + a string recorded by Oracle Database as the program from which the + connection originates [machine] type = str default: oracledb.defaults.machine description = - the machine name of the client connecting to the Oracle Database + a string recorded by Oracle Database as the name of the machine from which + the connection originates [terminal] type = str default: oracledb.defaults.terminal description = - the terminal identifier from which the connection originates + a string recorded by Oracle Database as the terminal identifier from which + the connection originates [osuser] type = str default: oracledb.defaults.osuser description = - the operating system user that initiates the database connection + a string recorded by Oracle Database as the operating system user who + originated the connection [driver_name] type = str default: oracledb.defaults.driver_name description = - the driver name used by the client to connect to the Oracle Database + a string recorded by Oracle Database as the name of the driver which + originated the connection [use_sni] type = bool default = False source = description description = - boolean indicating whether to use the TLS SNI extension to bypass the + a boolean indicating whether to use the TLS SNI extension to bypass the second TLS neogiation that would otherwise be required [thick_mode_dsn_passthrough] type = bool default = oracledb.defaults.thick_mode_dsn_passthrough description = - boolean indicating whether to pass the connect string to the Oracle Client - libraries unchanged without parsing by the driver. Setting this to False - makes thick and thin mode applications behave similarly regarding - connection string parameter handling and locating any optional tnsnames.ora - configuration file + a boolean indicating whether to pass the connect string to the Oracle + Client libraries unchanged without parsing by the driver. Setting this to + False makes python-oracledb Thick and Thin mode applications behave + similarly regarding connection string parameter handling and locating any + optional tnsnames.ora configuration file [extra_auth_params] type = dict @@ -524,13 +543,13 @@ type = str source = description description = the name of the DRCP pool when using multi-pool DRCP with Oracle Database - 23.4 or higher + 23.4, or higher [handle] type = int default = 0 hidden = True description = - an integer representing a pointer to a valid service context handle. - This value is only used in thick mode. It should be used with extreme - caution + an integer representing a pointer to a valid service context handle. This + value is only used in python-oracledb Thick mode. It should be used with + extreme caution diff --git a/utils/templates/connection.py b/utils/templates/connection.py index b8485f8f..83f83044 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1036,8 +1036,8 @@ def createlob( self, lob_type: DbType, data: Optional[Union[str, bytes]] = None ) -> LOB: """ - Creates and returns a new temporary :ref:`LOB object ` of the - specified type. The ``lob_type`` parameter should be one of + Creates and returns a new temporary LOB object of the specified type. + The ``lob_type`` parameter should be one of :data:`oracledb.DB_TYPE_CLOB`, :data:`oracledb.DB_TYPE_BLOB`, or :data:`oracledb.DB_TYPE_NCLOB`. @@ -1159,11 +1159,10 @@ def fetch_df_batches( def getSodaDatabase(self) -> SodaDatabase: """ - Returns a :ref:`SodaDatabase ` object for Simple Oracle - Document Access (SODA). All SODA operations are performed either on the - returned SodaDatabase object or from objects created by the returned - SodaDatabase object. See `here - `__ for additional information on SODA. """ @@ -1173,9 +1172,8 @@ def getSodaDatabase(self) -> SodaDatabase: def gettype(self, name: str) -> DbObjectType: """ - Returns a :ref:`type object ` given its name. This can - then be used to create objects which can be bound to cursors created by - this connection. + Returns a type object given its name. This can then be used to create + objects which can be bound to cursors created by this connection. """ self._verify_connected() obj_type_impl = self._impl.get_type(self, name) @@ -1285,9 +1283,8 @@ def subscribe( clientInitiated: bool = False, ) -> Subscription: """ - Returns a new :ref:`subscription object ` that receives - notifications for events that take place in the database that match the - given parameters. + Returns a new subscription object that receives notifications for + events that take place in the database that match the given parameters. The ``namespace`` parameter specifies the namespace the subscription uses. It can be one of :data:`oracledb.SUBSCR_NAMESPACE_DBCHANGE` or @@ -1674,29 +1671,31 @@ def connect( """ Factory function which creates a connection to the database and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling pool.acquire(). + The ``pool`` parameter is expected to be a pool object. This parameter was + deprecated in python-oracledb 3.0.0. Use :meth:`ConnectionPool.acquire()` + instead since the use of this parameter is the equivalent of calling this + method. - The conn_class parameter is expected to be Connection or a subclass of + The ``conn_class`` parameter is expected to be Connection or a subclass of Connection. - The params parameter is expected to be of type ConnectParams and contains - connection parameters that will be used when establishing the connection. - See the documentation on ConnectParams for more information. If this - parameter is not specified, the additional keyword parameters will be used - to create an instance of ConnectParams. If both the params parameter and - additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a dsn is also supplied, - then in the python-oracledb Thin mode, the values of the parameters - specified (if any) within the dsn will override the values passed as + The ``params`` parameter is expected to be of type ConnectParams and + contains connection parameters that will be used when establishing the + connection. See the documentation on ConnectParams for more information. + If this parameter is not specified, the additional keyword parameters will + be used to create an instance of ConnectParams. If both the ``params`` + parameter and additional keyword parameters are specified, the values in + the keyword parameters have precedence. Note that if a ``dsn`` is also + supplied, then in python-oracledb Thin mode, the values of the parameters + specified (if any) within the ``dsn`` will override the values passed as additional keyword parameters, which themselves override the values set in - the params parameter object. + the ``params`` parameter object. The following parameters are all optional. A brief description of each parameter follows: @@ -2211,9 +2210,8 @@ async def fetchone( async def gettype(self, name: str) -> DbObjectType: """ - Returns a :ref:`type object ` given its name. This can - then be used to create objects which can be bound to cursors created by - this connection. + Returns a type object given its name. This can then be used to create + objects which can be bound to cursors created by this connection. """ self._verify_connected() obj_type_impl = await self._impl.get_type(self, name) @@ -2574,29 +2572,31 @@ def connect_async( """ Factory function which creates a connection to the database and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool parameter is expected to be a pool object and the use of this - parameter is the equivalent of calling pool.acquire(). - - The conn_class parameter is expected to be AsyncConnection or a subclass of - AsyncConnection. - - The params parameter is expected to be of type ConnectParams and contains - connection parameters that will be used when establishing the connection. - See the documentation on ConnectParams for more information. If this - parameter is not specified, the additional keyword parameters will be used - to create an instance of ConnectParams. If both the params parameter and - additional keyword parameters are specified, the values in the keyword - parameters have precedence. Note that if a dsn is also supplied, - then in the python-oracledb Thin mode, the values of the parameters + The ``pool`` parameter is expected to be a pool object. This parameter was + deprecated in python-oracledb 3.0.0. Use :meth:`ConnectionPool.acquire()` + instead since the use of this parameter is the equivalent of calling this + method. + + The ``conn_class`` parameter is expected to be AsyncConnection or a + subclass of AsyncConnection. + + The ``params`` parameter is expected to be of type ConnectParams and + contains connection parameters that will be used when establishing the + connection. See the documentation on ConnectParams for more information. If + this parameter is not specified, the additional keyword parameters will be + used to create an instance of ConnectParams. If both the ``params`` + parameter and additional keyword parameters are specified, the values in + the keyword parameters have precedence. Note that if a ``dsn`` is also + supplied, then in python-oracledb Thin mode, the values of the parameters specified (if any) within the dsn will override the values passed as additional keyword parameters, which themselves override the values set in - the params parameter object. + the ``params`` parameter object. The following parameters are all optional. A brief description of each parameter follows: diff --git a/utils/templates/pool.py b/utils/templates/pool.py index 7bb2b7ae..6e89751f 100644 --- a/utils/templates/pool.py +++ b/utils/templates/pool.py @@ -639,31 +639,33 @@ def create_pool( """ Creates a connection pool with the supplied parameters and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool_class parameter is expected to be ConnectionPool or a subclass of - ConnectionPool. + The ``pool_class`` parameter is expected to be ConnectionPool or a subclass + of ConnectionPool. - The pool_alias parameter is expected to be a string representing the name - used to store and reference the pool in the python-oracledb connection + The ``pool_alias`` parameter is expected to be a string representing the + name used to store and reference the pool in the python-oracledb connection pool cache. If this parameter is not specified, then the pool will not be added to the cache. The value of this parameter can be used with the - oracledb.get_pool() and oracledb.connect() methods to access the pool. + :meth:`oracledb.get_pool()` and :meth:`oracledb.connect()` methods to + access the pool. - The params parameter is expected to be of type PoolParams and contains + The ``params`` parameter is expected to be of type PoolParams and contains parameters that are used to create the pool. See the documentation on PoolParams for more information. If this parameter is not specified, the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword parameters - are specified, the values in the keyword parameters have precedence. - Note that if a dsn is also supplied, then in the python-oracledb Thin mode, - the values of the parameters specified (if any) within the dsn will - override the values passed as additional keyword parameters, which - themselves override the values set in the params parameter object. + PoolParams. If both the ``params`` parameter and additional keyword + parameters are specified, the values in the keyword parameters have + precedence. Note that if a ``dsn`` is also supplied, then in + python-oracledb Thin mode, the values of the parameters specified (if any) + within the dsn will override the values passed as additional keyword + parameters, which themselves override the values set in the ``params`` + parameter object. The following parameters are all optional. A brief description of each parameter follows: @@ -834,32 +836,33 @@ def create_pool_async( """ Creates a connection pool with the supplied parameters and returns it. - The dsn parameter (data source name) can be a string in the format + The ``dsn`` parameter (data source name) can be a string in the format user/password@connect_string or can simply be the connect string (in which case authentication credentials such as the username and password need to be specified separately). See the documentation on connection strings for more information. - The pool_class parameter is expected to be AsyncConnectionPool or a + The ``pool_class`` parameter is expected to be AsyncConnectionPool or a subclass of AsyncConnectionPool. - The pool_alias parameter is expected to be a string representing the name - used to store and reference the pool in the python-oracledb connection + The ``pool_alias`` parameter is expected to be a string representing the + name used to store and reference the pool in the python-oracledb connection pool cache. If this parameter is not specified, then the pool will not be added to the cache. The value of this parameter can be used with the - oracledb.get_pool() and oracledb.connect_async() methods to access the - pool. + :meth:`oracledb.get_pool()` and :meth:o`racledb.connect_async()` methods to + access the pool. - The params parameter is expected to be of type PoolParams and contains + The ``params`` parameter is expected to be of type PoolParams and contains parameters that are used to create the pool. See the documentation on PoolParams for more information. If this parameter is not specified, the additional keyword parameters will be used to create an instance of - PoolParams. If both the params parameter and additional keyword parameters - are specified, the values in the keyword parameters have precedence. - Note that if a dsn is also supplied, then in the python-oracledb Thin mode, - the values of the parameters specified (if any) within the dsn will - override the values passed as additional keyword parameters, which - themselves override the values set in the params parameter object. + PoolParams. If both the ``params`` parameter and additional keyword + parameters are specified, the values in the keyword parameters have + precedence. Note that if a ``dsn`` is also supplied, then in + python-oracledb Thin mode, the values of the parameters specified (if any) + within the ``dsn`` will override the values passed as additional keyword + parameters, which themselves override the values set in the ``params`` + parameter object. The following parameters are all optional. A brief description of each parameter follows: From 23ac3207fce206c8bca8c6d45739941c4ab24f6d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 15 Aug 2025 13:45:27 -0600 Subject: [PATCH 184/239] Constants and other module attributes now use autodoc as well. --- doc/src/api_manual/module.rst | 1121 +++++----------------------- src/oracledb/__init__.py | 1303 +++++++++++++++++++++++++++------ src/oracledb/constants.py | 5 - src/oracledb/enums.py | 33 +- src/oracledb/exceptions.py | 76 +- src/oracledb/future.py | 4 +- 6 files changed, 1327 insertions(+), 1215 deletions(-) diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index a99369e2..9bfc9278 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -272,39 +272,20 @@ Oracledb Methods Oracledb Attributes =================== -.. data:: apilevel +.. autodata:: apilevel - A string constant stating the Python DB API level supported by - python-oracledb. Currently "2.0". +.. autodata:: defaults + :no-value: -.. data:: defaults + See :ref:`settingdefaults`. - The :ref:`Defaults ` object for setting default behaviors of - python-oracledb. - - See :ref:`settingdefaults`. - -.. data:: paramstyle - - A string constant stating the type of parameter marker formatting expected - by the interface. Currently 'named' as in 'where name = :name'. - - -.. data:: threadsafety - - An integer constant stating the level of thread safety that python-oracledb - supports. Currently 2, which means that threads may share the module and - connections, but not cursors. Sharing means that a thread may use a - resource without wrapping it using a mutex semaphore to implement resource - locking. - -.. data:: version + .. dbapiattributeextension:: - A string constant stating the version of the module. Currently '|release|'. +.. autodata:: paramstyle -.. data:: __version__ +.. autodata:: threadsafety - A string constant stating the version of the module. Currently '|release|'. +.. autodata:: __version__ .. dbapiattributeextension:: @@ -335,27 +316,11 @@ of the :ref:`message properties object ` passed as the .. dbapiconstantextension:: -.. data:: MSG_BUFFERED - - This constant is used to specify that enqueue or dequeue operations should - enqueue or dequeue buffered messages, respectively. For multi-consumer - queues, a `subscriber `__ with buffered delivery - mode needs to be created prior to enqueuing buffered messages. - - This mode is not supported for bulk array operations in python-oracledb - Thick mode. +.. autodata:: MSG_BUFFERED -.. data:: MSG_PERSISTENT +.. autodata:: MSG_PERSISTENT - This constant is used to specify that enqueue/dequeue operations should - enqueue or dequeue persistent messages. This is the default value. - - -.. data:: MSG_PERSISTENT_OR_BUFFERED - - This constant is used to specify that dequeue operations should dequeue - either persistent or buffered messages. +.. autodata:: MSG_PERSISTENT_OR_BUFFERED Advanced Queuing: Dequeue Modes @@ -369,30 +334,13 @@ parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, .. dbapiconstantextension:: -.. data:: DEQ_BROWSE - - This constant is used to specify that dequeue should read the message - without acquiring any lock on the message (equivalent to a select - statement). - - -.. data:: DEQ_LOCKED - - This constant is used to specify that dequeue should read and obtain a - write lock on the message for the duration of the transaction (equivalent - to a select for update statement). - - -.. data:: DEQ_REMOVE +.. autodata:: DEQ_BROWSE - This constant is used to specify that dequeue should read the message and - update or delete it. This is the default value. +.. autodata:: DEQ_LOCKED +.. autodata:: DEQ_REMOVE -.. data:: DEQ_REMOVE_NODATA - - This constant is used to specify that dequeue should confirm receipt of the - message but not deliver the actual message content. +.. autodata:: DEQ_REMOVE_NODATA Advanced Queuing: Dequeue Navigation Modes @@ -406,28 +354,11 @@ parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, .. dbapiconstantextension:: -.. data:: DEQ_FIRST_MSG - - This constant is used to specify that dequeue should retrieve the first - available message that matches the search criteria. This resets the - position to the beginning of the queue. - - -.. data:: DEQ_NEXT_MSG - - This constant is used to specify that dequeue should retrieve the next - available message that matches the search criteria. If the previous message - belongs to a message group, AQ retrieves the next available message that - matches the search criteria and belongs to the message group. This is the - default. +.. autodata:: DEQ_FIRST_MSG +.. autodata:: DEQ_NEXT_MSG -.. data:: DEQ_NEXT_TRANSACTION - - This constant is used to specify that dequeue should skip the remainder of - the transaction group and retrieve the first message of the next - transaction group. This option can only be used if message grouping is - enabled for the current queue. +.. autodata:: DEQ_NEXT_TRANSACTION Advanced Queuing: Dequeue Visibility Modes @@ -441,16 +372,9 @@ parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, .. dbapiconstantextension:: -.. data:: DEQ_IMMEDIATE - - This constant is used to specify that dequeue should perform its work as - part of an independent transaction. - - -.. data:: DEQ_ON_COMMIT +.. autodata:: DEQ_IMMEDIATE - This constant is used to specify that dequeue should be part of the current - transaction. This is the default value. +.. autodata:: DEQ_ON_COMMIT Advanced Queuing: Dequeue Wait Modes @@ -464,17 +388,9 @@ parameter for the :meth:`Queue.deqone()`, :meth:`Queue.deqmany()`, .. dbapiconstantextension:: -.. data:: DEQ_NO_WAIT - - This constant is used to specify that dequeue not wait for messages to be - available for dequeuing. - - -.. data:: DEQ_WAIT_FOREVER - - This constant is used to specify that dequeue should wait forever for - messages to be available for dequeuing. This is the default value. +.. autodata:: DEQ_NO_WAIT +.. autodata:: DEQ_WAIT_FOREVER Advanced Queuing: Enqueue Visibility Modes ------------------------------------------ @@ -487,20 +403,13 @@ parameter for the :meth:`Queue.enqone()`, :meth:`Queue.enqmany()`, .. dbapiconstantextension:: -.. data:: ENQ_IMMEDIATE - - This constant is used to specify that enqueue should perform its work as - part of an independent transaction. +.. autodata:: ENQ_IMMEDIATE - The use of this constant with :ref:`bulk enqueuing ` is only - supported in python-oracledb :ref:`Thick mode `. + .. seealso:: + :ref:`Bulk Enqueuing `. -.. data:: ENQ_ON_COMMIT - - This constant is used to specify that enqueue should be part of the current - transaction. This is the default value. - +.. autodata:: ENQ_ON_COMMIT Advanced Queuing: Message States -------------------------------- @@ -516,28 +425,13 @@ The AQ Message state constants are possible values for the .. dbapiconstantextension:: -.. data:: MSG_EXPIRED - - This constant is used to specify that the message has been moved to the - exception queue. - +.. autodata:: MSG_EXPIRED -.. data:: MSG_PROCESSED +.. autodata:: MSG_PROCESSED - This constant is used to specify that the message has been processed and - has been retained. - - -.. data:: MSG_READY - - This constant is used to specify that the message is ready to be processed. - - -.. data:: MSG_WAITING - - This constant is used to specify that the message delay has not yet been - reached. +.. autodata:: MSG_READY +.. autodata:: MSG_WAITING Advanced Queuing: Other Constants --------------------------------- @@ -546,27 +440,9 @@ This section contains other constants that are used for Advanced Queueing. .. dbapiconstantextension:: -.. data:: MSG_NO_DELAY - - This constant is a possible value for the :attr:`~MessageProperties.delay` - attribute of the :ref:`message properties object ` passed - as the ``msgproperties`` parameter to the :meth:`Queue.deqone()` or - :meth:`Queue.deqmany()` and :meth:`Queue.enqone()` or - :meth:`Queue.enqmany()` methods. It specifies that no delay should be - imposed and the message should be immediately available for dequeuing. This - is also the default value. - - -.. data:: MSG_NO_EXPIRATION - - This constant is a possible value for the - :attr:`~MessageProperties.expiration` attribute of the - :ref:`message properties object ` passed as the - ``msgproperties`` parameter to the :meth:`Queue.deqone()` or - :meth:`Queue.deqmany()` and :meth:`Queue.enqone()` or - :meth:`Queue.enqmany()` methods. It specifies that the message never - expires. This is also the default value. +.. autodata:: MSG_NO_DELAY +.. autodata:: MSG_NO_EXPIRATION .. _connection-authorization-modes: @@ -587,124 +463,24 @@ cx_Oracle driver. The integer constants for the connection authorization modes were replaced with the enumeration ``AuthMode``. -.. data:: AUTH_MODE_DEFAULT - - This constant is used to specify that default authentication is to take - place. This is the default value if no mode is passed at all. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.DEFAULT``. - - This constant deprecates the ``DEFAULT_AUTH`` constant that was used in the - obsolete cx_Oracle driver, and was the default ``mode`` value. - -.. data:: AUTH_MODE_PRELIM - - This constant is used to specify that preliminary authentication is to be - used. This is needed for performing database startup and shutdown. - - It can only be used in python-oracledb Thick mode for standalone - connections. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.PRELIM``. - - This constant deprecates the ``PRELIM_AUTH`` constant that was used in the - obsolete cx_Oracle driver. - -.. data:: AUTH_MODE_SYSASM - - This constant is used to specify that SYSASM access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSASM``. - - This constant deprecates the ``SYSASM`` constant that was used in the - obsolete cx_Oracle driver. - -.. data:: AUTH_MODE_SYSBKP - - This constant is used to specify that SYSBACKUP access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSBKP``. - - This constant deprecates the ``SYSBKP`` constant that was used in the - obsolete cx_Oracle driver. - -.. data:: AUTH_MODE_SYSDBA - - This constant is used to specify that SYSDBA access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSDBA``. - - This constant deprecates the ``SYSDBA`` constant that was used in the - obsolete cx_Oracle driver. - -.. data:: AUTH_MODE_SYSDGD - - This constant is used to specify that SYSDG access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. +.. autodata:: AUTH_MODE_DEFAULT - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSDGD``. +.. autodata:: AUTH_MODE_PRELIM - This constant deprecates the ``SYSDGD`` constant that was used in the - obsolete cx_Oracle driver. +.. autodata:: AUTH_MODE_SYSASM -.. data:: AUTH_MODE_SYSKMT +.. autodata:: AUTH_MODE_SYSBKP - This constant is used to specify that SYSKM access is to be acquired. +.. autodata:: AUTH_MODE_SYSDBA - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. +.. autodata:: AUTH_MODE_SYSDGD - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSKMT``. +.. autodata:: AUTH_MODE_SYSKMT - This constant deprecates the ``SYSKMT`` constant that was used in the - obsolete cx_Oracle driver. +.. autodata:: AUTH_MODE_SYSOPER -.. data:: AUTH_MODE_SYSOPER +.. autodata:: AUTH_MODE_SYSRAC - This constant is used to specify that SYSOPER access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSOPER``. - - This constant deprecates the ``SYSOPER`` constant that was used in the - obsolete cx_Oracle driver. - -.. data:: AUTH_MODE_SYSRAC - - This constant is used to specify that SYSRAC access is to be acquired. - - It can be used for standalone and pooled connections in python-oracledb - Thin mode, and for standalone connections in Thick mode. - - Its enumerated value can also be identified by - ``oracledb.AuthMode.SYSRAC``. - - This constant deprecates the ``SYSRAC`` constant that was used in the - obsolete cx_Oracle driver. .. _pipeline-operation-types: @@ -718,70 +494,22 @@ the type of operation added. They are possible values for the .. versionadded:: 2.4.0 -.. data:: oracledb.PIPELINE_OP_TYPE_CALL_FUNC - - This constant identifies the type of operation as the calling of a stored - function. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.CALL_FUNC``. - -.. data:: oracledb.PIPELINE_OP_TYPE_CALL_PROC - - This constant identifies the type of operation as the calling of a stored - procedure. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.CALL_PROC``. - -.. data:: oracledb.PIPELINE_OP_TYPE_COMMIT - - This constant identifies the type of operation as the performing of a - commit. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.COMMIT``. - -.. data:: oracledb.PIPELINE_OP_TYPE_EXECUTE - - This constant identifies the type of operation as the executing of a - statement. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.EXECUTE``. +.. autodata:: oracledb.PIPELINE_OP_TYPE_CALL_FUNC -.. data:: oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY +.. autodata:: oracledb.PIPELINE_OP_TYPE_CALL_PROC - This constant identifies the type of operations as the executing of a - statement multiple times. +.. autodata:: oracledb.PIPELINE_OP_TYPE_COMMIT - This enumerated value can also be identified by - ``oracledb.PipelineOpType.EXECUTE_MANY``. +.. autodata:: oracledb.PIPELINE_OP_TYPE_EXECUTE -.. data:: oracledb.PIPELINE_OP_TYPE_FETCH_ALL +.. autodata:: oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY - This constant identifies the type of operation as the executing of a - query and returning all of the rows from the result set. +.. autodata:: oracledb.PIPELINE_OP_TYPE_FETCH_ALL - This enumerated value can also be identified by - ``oracledb.PipelineOpType.FETCH_ALL``. +.. autodata:: oracledb.PIPELINE_OP_TYPE_FETCH_MANY -.. data:: oracledb.PIPELINE_OP_TYPE_FETCH_MANY +.. autodata:: oracledb.PIPELINE_OP_TYPE_FETCH_ONE - This constant identifies the type of operation as the executing of a - query and returning up to the specified number of rows from the result - set. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.FETCH_MANY``. - -.. data:: oracledb.PIPELINE_OP_TYPE_FETCH_ONE - - This constant identifies the type of operation as the executing of a query - and returning the first row of the result set. - - This enumerated value can also be identified by - ``oracledb.PipelineOpType.FETCH_ONE``. Database Shutdown Modes ----------------------- @@ -791,40 +519,15 @@ parameter of the :meth:`Connection.shutdown()` method. .. dbapiconstantextension:: -.. data:: DBSHUTDOWN_ABORT - - This constant is used to specify that the caller should not wait for - current processing to complete or for users to disconnect from the - database. This should only be used in unusual circumstances since database - recovery may be necessary upon next startup. - - -.. data:: DBSHUTDOWN_FINAL - - This constant is used to specify that the instance can be truly halted. - This should only be done after the database has been shutdown with one of - the other modes (except abort) and the database has been closed and - dismounted using the appropriate SQL commands. - +.. autodata:: DBSHUTDOWN_ABORT -.. data:: DBSHUTDOWN_IMMEDIATE +.. autodata:: DBSHUTDOWN_FINAL - This constant is used to specify that all uncommitted transactions should - be rolled back and any connected users should be disconnected. +.. autodata:: DBSHUTDOWN_IMMEDIATE +.. autodata:: DBSHUTDOWN_TRANSACTIONAL -.. data:: DBSHUTDOWN_TRANSACTIONAL - - This constant is used to specify that further connections to the database - should be prohibited and no new transactions should be allowed. It then - waits for all active transactions to complete. - - -.. data:: DBSHUTDOWN_TRANSACTIONAL_LOCAL - - This constant is used to specify that further connections to the database - should be prohibited and no new transactions should be allowed. It then - waits for only local active transactions to complete. +.. autodata:: DBSHUTDOWN_TRANSACTIONAL_LOCAL .. _eventtypes: @@ -837,53 +540,21 @@ attribute of the messages that are sent for subscriptions created by the .. dbapiconstantextension:: -.. data:: EVENT_AQ - - This constant is used to specify that one or more messages are available - for dequeuing on the queue specified when the subscription was created. - - -.. data:: EVENT_DEREG - - This constant is used to specify that the subscription has been - deregistered and no further notifications will be sent. - +.. autodata:: EVENT_AQ -.. data:: EVENT_NONE +.. autodata:: EVENT_DEREG - This constant is used to specify no information is available about the - event. +.. autodata:: EVENT_NONE +.. autodata:: EVENT_OBJCHANGE -.. data:: EVENT_OBJCHANGE +.. autodata:: EVENT_QUERYCHANGE - This constant is used to specify that a database change has taken place on - a table registered with the :meth:`Subscription.registerquery()` method. +.. autodata:: EVENT_SHUTDOWN +.. autodata:: EVENT_SHUTDOWN_ANY -.. data:: EVENT_QUERYCHANGE - - This constant is used to specify that the result set of a query registered - with the :meth:`Subscription.registerquery()` method has been changed. - - -.. data:: EVENT_SHUTDOWN - - This constant is used to specify that the instance is in the process of - being shut down. - - -.. data:: EVENT_SHUTDOWN_ANY - - This constant is used to specify that any instance (when running RAC) is in - the process of being shut down. - - -.. data:: EVENT_STARTUP - - This constant is used to specify that the instance is in the process of - being started up. - +.. autodata:: EVENT_STARTUP .. _cqn-operation-codes: @@ -898,48 +569,19 @@ the messages that are sent. .. dbapiconstantextension:: -.. data:: OPCODE_ALLOPS - - This constant is used to specify that messages should be sent for all - operations. - - -.. data:: OPCODE_ALLROWS - - This constant is used to specify that the table or query has been - completely invalidated. - - -.. data:: OPCODE_ALTER - - This constant is used to specify that messages should be sent when a - registered table has been altered in some fashion by DDL, or that the - message identifies a table that has been altered. - +.. autodata:: OPCODE_ALLOPS -.. data:: OPCODE_DELETE +.. autodata:: OPCODE_ALLROWS - This constant is used to specify that messages should be sent when data is - deleted, or that the message identifies a row that has been deleted. +.. autodata:: OPCODE_ALTER +.. autodata:: OPCODE_DELETE -.. data:: OPCODE_DROP +.. autodata:: OPCODE_DROP - This constant is used to specify that messages should be sent when a - registered table has been dropped, or that the message identifies a table - that has been dropped. +.. autodata:: OPCODE_INSERT - -.. data:: OPCODE_INSERT - - This constant is used to specify that messages should be sent when data is - inserted, or that the message identifies a row that has been inserted. - - -.. data:: OPCODE_UPDATE - - This constant is used to specify that messages should be sent when data is - updated, or that the message identifies a row that has been updated. +.. autodata:: OPCODE_UPDATE .. _connpoolmodes: @@ -961,65 +603,14 @@ cx_Oracle driver. and acquisition ``getmode`` parameters were replaced with the enumeration ``PoolGetMode``. -.. data:: POOL_GETMODE_FORCEGET - - This constant is used to specify that a new connection should be created - and returned by :meth:`ConnectionPool.acquire()` if there are no free - connections available in the pool and the pool is already at its maximum - size. - - When a connection acquired in this mode is eventually released back to the - pool, it will be dropped and not added to the pool if the pool is still at - its maximum size. - - This enumerated value can also be identified by - ``oracledb.PoolGetMode.FORCEGET``. - - This constant deprecates the ``SPOOL_ATTRVAL_FORCEGET`` constant that was - used in the obsolete cx_Oracle driver. - - -.. data:: POOL_GETMODE_NOWAIT - - This constant is used to specify that an exception should be raised by - :meth:`ConnectionPool.acquire()` when all currently created connections are - already in use and so :meth:`~ConnectionPool.acquire()` cannot immediately - return a connection. Note the exception may occur even if the pool is - smaller than its maximum size. - - This enumerated value can also be identified by - ``oracledb.PoolGetMode.NOWAIT``. - - This constant deprecates the ``SPOOL_ATTRVAL_NOWAIT`` constant that was - used in the obsolete cx_Oracle driver, and was the default ``getmode`` - value. - - -.. data:: POOL_GETMODE_WAIT - - This constant is used to specify that :meth:`ConnectionPool.acquire()` - should wait until a connection is available if there are currently no free - connections available in the pool. This is the default value. - - This enumerated value can also be identified by - ``oracledb.PoolGetMode.WAIT``. +.. autodata:: POOL_GETMODE_FORCEGET - This constant deprecates the ``SPOOL_ATTRVAL_WAIT`` constant that was used - in the obsolete cx_Oracle driver. +.. autodata:: POOL_GETMODE_NOWAIT +.. autodata:: POOL_GETMODE_TIMEDWAIT -.. data:: POOL_GETMODE_TIMEDWAIT +.. autodata:: POOL_GETMODE_WAIT - This constant is used to specify that :meth:`ConnectionPool.acquire()` - should wait for a period of time (defined by the ``wait_timeout`` - parameter) for a connection to become available before returning with an - error. - - This enumerated value can also be identified by - ``oracledb.PoolGetMode.TIMEDWAIT``. - - This constant deprecates the ``SPOOL_ATTRVAL_TIMEDWAIT`` constant that was - used in the obsolete cx_Oracle driver. .. _drcppurityconsts: @@ -1040,38 +631,12 @@ Pool purity constants that were used in the obsolete cx_Oracle driver. The integer constants for the connection pool get modes were replaced with the enumeration ``Purity``. -.. data:: PURITY_DEFAULT - - This constant is used to specify that the purity of the session is the - default value identified by Oracle (see Oracle's documentation for more - information). This is the default value. - - This enumerated value can also be identified by - ``oracledb.Purity.DEFAULT``. - - This constant deprecates the ``ATTR_PURITY_DEFAULT`` constant that was used - in the obsolete cx_Oracle driver, and was the default ``purity`` value. - -.. data:: PURITY_NEW - - This constant is used to specify that the session acquired from the pool - should be new and not have any prior session state. - - This enumerated value can also be identified by ``oracledb.Purity.NEW``. - - This constant deprecates the ``ATTR_PURITY_NEW`` constant that was used in - the obsolete cx_Oracle driver. - +.. autodata:: PURITY_DEFAULT -.. data:: PURITY_SELF +.. autodata:: PURITY_NEW - This constant is used to specify that the session acquired from the pool - need not be new and may have prior session state. +.. autodata:: PURITY_SELF - This enumerated value can also be identified by ``oracledb.Purity.SELF``. - - This constant deprecates the ``ATTR_PURITY_SELF`` constant that was used in - the obsolete cx_Oracle driver. Subscription Grouping Classes ----------------------------- @@ -1081,11 +646,9 @@ The Subscription Grouping Class constants are possible values for the .. dbapiconstantextension:: -.. data:: SUBSCR_GROUPING_CLASS_TIME - - This constant is used to specify that events are to be grouped by the - period of time in which they are received. +.. autodata:: SUBSCR_GROUPING_CLASS_NONE +.. autodata:: SUBSCR_GROUPING_CLASS_TIME Subscription Grouping Types --------------------------- @@ -1095,18 +658,9 @@ The Subscription Grouping Type constants are possible values for the .. dbapiconstantextension:: -.. data:: SUBSCR_GROUPING_TYPE_SUMMARY - - This constant is used to specify that when events are grouped a summary of - the events should be sent instead of the individual events. This is the - default value. - -.. data:: SUBSCR_GROUPING_TYPE_LAST - - This constant is used to specify that when events are grouped the last - event that makes up the group should be sent instead of the individual - events. +.. autodata:: SUBSCR_GROUPING_TYPE_SUMMARY +.. autodata:: SUBSCR_GROUPING_TYPE_LAST .. _subscr-namespaces: @@ -1118,16 +672,9 @@ parameter of the :meth:`Connection.subscribe()` method. .. dbapiconstantextension:: -.. data:: SUBSCR_NAMESPACE_AQ - - This constant is used to specify that notifications should be sent when a - queue has messages available to dequeue. - -.. data:: SUBSCR_NAMESPACE_DBCHANGE - - This constant is used to specify that database change notification or query - change notification messages are to be sent. This is the default value. +.. autodata:: SUBSCR_NAMESPACE_AQ +.. autodata:: SUBSCR_NAMESPACE_DBCHANGE .. _subscr-protocols: @@ -1139,44 +686,13 @@ parameter of the :meth:`Connection.subscribe()` method. .. dbapiconstantextension:: -.. data:: SUBSCR_PROTO_CALLBACK - - This constant is used to specify that notifications will be sent to the - callback routine identified when the subscription was created. It is the - default value and the only value currently supported. +.. autodata:: SUBSCR_PROTO_CALLBACK +.. autodata:: SUBSCR_PROTO_HTTP -.. data:: SUBSCR_PROTO_HTTP - - This constant is used to specify that notifications will be sent to an - HTTP URL when a message is generated. This value is currently not - supported. - - -.. data:: SUBSCR_PROTO_MAIL - - This constant is used to specify that notifications will be sent to an - e-mail address when a message is generated. This value is currently not - supported. - - -.. data:: SUBSCR_PROTO_OCI - - This constant is used to specify that notifications will be sent to the - callback routine identified when the subscription was created. It is the - default value and the only value currently supported. - - .. deprecated:: python-oracledb 1.0 - - Use :data:`~oracledb.SUBSCR_PROTO_CALLBACK` instead. - - -.. data:: SUBSCR_PROTO_SERVER - - This constant is used to specify that notifications will be sent to a - PL/SQL procedure when a message is generated. This value is currently not - supported. +.. autodata:: SUBSCR_PROTO_MAIL +.. autodata:: SUBSCR_PROTO_SERVER .. _subscr-qos: @@ -1189,36 +705,17 @@ these values can be OR'ed together. .. dbapiconstantextension:: -.. data:: SUBSCR_QOS_BEST_EFFORT - - This constant is used to specify that best effort filtering for query - result set changes is acceptable. False positive notifications may be - received. This behaviour may be suitable for caching applications. - - -.. data:: SUBSCR_QOS_DEREG_NFY - - This constant is used to specify that the subscription should be - automatically unregistered after the first notification is received. - +.. autodata:: SUBSCR_QOS_BEST_EFFORT -.. data:: SUBSCR_QOS_QUERY +.. autodata:: SUBSCR_QOS_DEFAULT - This constant is used to specify that notifications should be sent if the - result set of the registered query changes. By default, no false positive - notifications will be generated. +.. autodata:: SUBSCR_QOS_DEREG_NFY +.. autodata:: SUBSCR_QOS_QUERY -.. data:: SUBSCR_QOS_RELIABLE +.. autodata:: SUBSCR_QOS_RELIABLE - This constant is used to specify that notifications should not be lost in - the event of database failure. - - -.. data:: SUBSCR_QOS_ROWIDS - - This constant is used to specify that the rowids of the inserted, updated - or deleted rows should be included in the message objects that are sent. +.. autodata:: SUBSCR_QOS_ROWIDS .. _types: @@ -1226,48 +723,24 @@ these values can be OR'ed together. DB API Types ------------ -.. data:: BINARY - - This type object is used to describe columns in a database that contain - binary data. The database types :data:`DB_TYPE_RAW` and - :data:`DB_TYPE_LONG_RAW` will compare equal to this value. If a variable is - created with this type, the database type :data:`DB_TYPE_RAW` will be used. - - -.. data:: DATETIME - - This type object is used to describe columns in a database that are dates. - The database types :data:`DB_TYPE_DATE`, :data:`DB_TYPE_TIMESTAMP`, - :data:`DB_TYPE_TIMESTAMP_LTZ` and :data:`DB_TYPE_TIMESTAMP_TZ` will all - compare equal to this value. If a variable is created with this - type, the database type :data:`DB_TYPE_DATE` will be used. - - -.. data:: NUMBER - - This type object is used to describe columns in a database that are - numbers. The database types :data:`DB_TYPE_BINARY_DOUBLE`, - :data:`DB_TYPE_BINARY_FLOAT`, :data:`DB_TYPE_BINARY_INTEGER` and - :data:`DB_TYPE_NUMBER` will all compare equal to this value. If a variable - is created with this type, the database type :data:`DB_TYPE_NUMBER` will be - used. +.. autoclass:: ApiType + This type object is the Python type of the database API type constants. -.. data:: ROWID +.. autodata:: BINARY + :no-value: - This type object is used to describe the pseudo column "rowid". The - database types :data:`DB_TYPE_ROWID` and :data:`DB_TYPE_UROWID` will - compare equal to this value. If a variable is created with this type, the - database type :data:`DB_TYPE_VARCHAR` will be used. +.. autodata:: DATETIME + :no-value: +.. autodata:: NUMBER + :no-value: -.. data:: STRING +.. autodata:: ROWID + :no-value: - This type object is used to describe columns in a database that are - strings. The database types :data:`DB_TYPE_CHAR`, :data:`DB_TYPE_LONG`, - :data:`DB_TYPE_NCHAR`, :data:`DB_TYPE_NVARCHAR` and :data:`DB_TYPE_VARCHAR` - will all compare equal to this value. If a variable is created with this - type, the database type :data:`DB_TYPE_VARCHAR` will be used. +.. autodata:: STRING + :no-value: .. _dbtypes: @@ -1275,195 +748,98 @@ DB API Types Database Types -------------- -All of these types are extensions to the DB API definition. They are found in -query and object metadata. They can also be used to specify the database type -when binding data. - -Also see the table :ref:`supporteddbtypes`. - -.. data:: DB_TYPE_BFILE - - Describes columns, attributes or array elements in a database that are of - type BFILE. It will compare equal to the DB API type :data:`BINARY`. - - -.. data:: DB_TYPE_BINARY_DOUBLE - - Describes columns, attributes or array elements in a database that are of - type BINARY_DOUBLE. It will compare equal to the DB API type - :data:`NUMBER`. - - -.. data:: DB_TYPE_BINARY_FLOAT - - Describes columns, attributes or array elements in a database that are - of type BINARY_FLOAT. It will compare equal to the DB API type - :data:`NUMBER`. - - -.. data:: DB_TYPE_BINARY_INTEGER - - Describes attributes or array elements in a database that are of type - BINARY_INTEGER. It will compare equal to the DB API type :data:`NUMBER`. - - -.. data:: DB_TYPE_BLOB - - Describes columns, attributes or array elements in a database that are of - type BLOB. It will compare equal to the DB API type :data:`BINARY`. - - -.. data:: DB_TYPE_BOOLEAN - - Describes attributes or array elements in a database that are of type - BOOLEAN. It is only available in Oracle 12.1 and higher and only within - PL/SQL. - - -.. data:: DB_TYPE_CHAR - - Describes columns, attributes or array elements in a database that are of - type CHAR. It will compare equal to the DB API type :data:`STRING`. - - Note that these are fixed length string values and behave differently from - VARCHAR2. - - -.. data:: DB_TYPE_CLOB - - Describes columns, attributes or array elements in a database that are of - type CLOB. It will compare equal to the DB API type :data:`STRING`. - - -.. data:: DB_TYPE_CURSOR - - Describes columns in a database that are of type CURSOR. In PL/SQL, these - are known as REF CURSOR. - - -.. data:: DB_TYPE_DATE - - Describes columns, attributes or array elements in a database that are of - type DATE. It will compare equal to the DB API type :data:`DATETIME`. - - -.. data:: DB_TYPE_INTERVAL_DS - - Describes columns, attributes or array elements in a database that are of - type INTERVAL DAY TO SECOND. - - -.. data:: DB_TYPE_INTERVAL_YM - - Describes columns, attributes or array elements in a database that are of - type INTERVAL YEAR TO MONTH. - - -.. data:: DB_TYPE_JSON - - Describes columns in a database that are of type JSON (with Oracle Database - 21 or later). - -.. data:: DB_TYPE_LONG - - Describes columns, attributes or array elements in a database that are of - type LONG. It will compare equal to the DB API type :data:`STRING`. - - -.. data:: DB_TYPE_LONG_RAW - - Describes columns, attributes or array elements in a database that are of - type LONG RAW. It will compare equal to the DB API type :data:`BINARY`. - - -.. data:: DB_TYPE_LONG_NVARCHAR - - This constant can be used in output type handlers when fetching NCLOB - columns as a string. (Note a type handler is not needed if - :ref:`oracledb.defaults.fetch_lobs `, or the equivalent execution - parameter, is set to *False*). For IN binds, this constant can be used to - create a bind variable in :meth:`Cursor.var()` or via - :meth:`Cursor.setinputsizes()`. The ``DB_TYPE_LONG_NVARCHAR`` value won't - be shown in query metadata since it is not a database type. - - It will compare equal to the DB API type :data:`STRING`. - -.. data:: DB_TYPE_NCHAR - - Describes columns, attributes or array elements in a database that are of - type NCHAR. It will compare equal to the DB API type :data:`STRING`. - - Note that these are fixed length string values and behave differently from - NVARCHAR2. - - -.. data:: DB_TYPE_NCLOB - - Describes columns, attributes or array elements in a database that are of - type NCLOB. It will compare equal to the DB API type :data:`STRING`. +.. autoclass:: DbType + This type object is the Python type of the database type constants. All of + these types are extensions to the DB API definition. They are found in + query and object metadata. They can also be used to specify the database + type when binding data. -.. data:: DB_TYPE_NUMBER + Also see the table :ref:`supporteddbtypes`. - Describes columns, attributes or array elements in a database that are of - type NUMBER. It will compare equal to the DB API type :data:`NUMBER`. +.. autodata:: DB_TYPE_BFILE + :no-value: +.. autodata:: DB_TYPE_BINARY_DOUBLE + :no-value: -.. data:: DB_TYPE_NVARCHAR +.. autodata:: DB_TYPE_BINARY_FLOAT + :no-value: - Describes columns, attributes or array elements in a database that are of - type NVARCHAR2. It will compare equal to the DB API type :data:`STRING`. +.. autodata:: DB_TYPE_BINARY_INTEGER + :no-value: +.. autodata:: DB_TYPE_BLOB + :no-value: -.. data:: DB_TYPE_OBJECT +.. autodata:: DB_TYPE_BOOLEAN + :no-value: - Describes columns, attributes or array elements in a database that are an - instance of a named SQL or PL/SQL type. +.. autodata:: DB_TYPE_CHAR + :no-value: +.. autodata:: DB_TYPE_CLOB + :no-value: -.. data:: DB_TYPE_RAW +.. autodata:: DB_TYPE_CURSOR + :no-value: - Describes columns, attributes or array elements in a database that are of - type RAW. It will compare equal to the DB API type :data:`BINARY`. +.. autodata:: DB_TYPE_DATE + :no-value: +.. autodata:: DB_TYPE_INTERVAL_DS + :no-value: -.. data:: DB_TYPE_ROWID +.. autodata:: DB_TYPE_INTERVAL_YM + :no-value: - Describes columns, attributes or array elements in a database that are of - type ROWID or UROWID. It will compare equal to the DB API type - :data:`ROWID`. +.. autodata:: DB_TYPE_JSON + :no-value: +.. autodata:: DB_TYPE_LONG + :no-value: -.. data:: DB_TYPE_TIMESTAMP +.. autodata:: DB_TYPE_LONG_NVARCHAR + :no-value: - Describes columns, attributes or array elements in a database that are of - type TIMESTAMP. It will compare equal to the DB API type :data:`DATETIME`. +.. autodata:: DB_TYPE_LONG_RAW + :no-value: +.. autodata:: DB_TYPE_NCHAR + :no-value: -.. data:: DB_TYPE_TIMESTAMP_LTZ +.. autodata:: DB_TYPE_NCLOB + :no-value: - Describes columns, attributes or array elements in a database that are of - type TIMESTAMP WITH LOCAL TIME ZONE. It will compare equal to the DB API - type :data:`DATETIME`. +.. autodata:: DB_TYPE_NUMBER + :no-value: +.. autodata:: DB_TYPE_NVARCHAR + :no-value: -.. data:: DB_TYPE_TIMESTAMP_TZ +.. autodata:: DB_TYPE_OBJECT + :no-value: - Describes columns, attributes or array elements in a database that are of - type TIMESTAMP WITH TIME ZONE. It will compare equal to the DB API type - :data:`DATETIME`. +.. autodata:: DB_TYPE_RAW + :no-value: +.. autodata:: DB_TYPE_ROWID + :no-value: -.. data:: DB_TYPE_UNKNOWN +.. autodata:: DB_TYPE_TIMESTAMP + :no-value: - Describes columns, attributes or array elements in a database that are - of an unknown type. +.. autodata:: DB_TYPE_TIMESTAMP_LTZ + :no-value: +.. autodata:: DB_TYPE_TIMESTAMP_TZ + :no-value: -.. data:: DB_TYPE_UROWID +.. autodata:: DB_TYPE_UNKNOWN + :no-value: - Describes columns, attributes or array elements in a database that are of - type UROWID. It will compare equal to the DB API type :data:`ROWID`. +.. autodata:: DB_TYPE_UROWID + :no-value: .. note:: @@ -1471,24 +847,17 @@ Also see the table :ref:`supporteddbtypes`. See :ref:`querymetadatadiff`. -.. data:: DB_TYPE_VARCHAR - - Describes columns, attributes or array elements in a database that are of - type VARCHAR2. It will compare equal to the DB API type :data:`STRING`. - - -.. data:: DB_TYPE_VECTOR +.. autodata:: DB_TYPE_VARCHAR + :no-value: - Describes columns, attributes or array elements in a database that are of - type VECTOR. +.. autodata:: DB_TYPE_VECTOR + :no-value: .. versionadded:: 2.2.0 -.. data:: DB_TYPE_XMLTYPE - - Describes columns, attributes or array elements in a database that are of - type SYS.XMLTYPE. +.. autodata:: DB_TYPE_XMLTYPE + :no-value: .. versionadded:: 2.0.0 @@ -1612,23 +981,6 @@ version of python-oracledb. .. deprecated:: cx_Oracle 8.0 -Other Types ------------ - -All of these types are extensions to the DB API definition. - -.. autoclass:: ApiType - - This type object is the Python type of the database API type constants - :data:`BINARY`, :data:`DATETIME`, :data:`NUMBER`, :data:`ROWID` and - :data:`STRING`. - - -.. autoclass:: DbType - - This type object is the Python type of the - :ref:`database type constants `. - .. _tpcconstants: Two-Phase Commit (TPC) Constants @@ -1638,29 +990,17 @@ The constants for the two-phase commit (TPC) functions :meth:`~Connection.tpc_begin()` and :meth:`~Connection.tpc_end()` are listed below. -.. data:: TPC_BEGIN_JOIN - - Joins an existing TPC transaction. - -.. data:: TPC_BEGIN_NEW - - Creates a new TPC transaction. +.. autodata:: TPC_BEGIN_JOIN -.. data:: TPC_BEGIN_PROMOTE +.. autodata:: TPC_BEGIN_NEW - Promotes a local transaction to a TPC transaction. +.. autodata:: TPC_BEGIN_PROMOTE -.. data:: TPC_BEGIN_RESUME +.. autodata:: TPC_BEGIN_RESUME - Resumes an existing TPC transaction. +.. autodata:: TPC_END_NORMAL -.. data:: TPC_END_NORMAL - - Ends the TPC transaction participation normally. - -.. data:: TPC_END_SUSPEND - - Suspends the TPC transaction. +.. autodata:: TPC_END_SUSPEND .. _vectorformatconstants: @@ -1677,39 +1017,16 @@ possible values for the :attr:`FetchInfo.vector_format` attribute. The integer constants for the vector format constants were replaced with the enumeration ``VectorFormat``. -.. data:: VECTOR_FORMAT_BINARY - - This constant is used to represent the storage format of VECTOR columns - using 8-bit unsigned integers. - - This enumerated value can also be identified by - ``oracledb.VectorFormat.BINARY``. +.. autodata:: VECTOR_FORMAT_BINARY .. versionadded:: 2.3.0 -.. data:: VECTOR_FORMAT_FLOAT32 - - This constant is used to represent the storage format of VECTOR columns - using 32-bit floating point numbers. - - This enumerated value can also be identified by - ``oracledb.VectorFormat.FLOAT32``. - -.. data:: VECTOR_FORMAT_FLOAT64 - - This constant is used to represent the storage format of VECTOR columns - using 64-bit floating point numbers. +.. autodata:: VECTOR_FORMAT_FLOAT32 - This enumerated value can also be identified by - ``oracledb.VectorFormat.FLOAT64``. +.. autodata:: VECTOR_FORMAT_FLOAT64 -.. data:: VECTOR_FORMAT_INT8 +.. autodata:: VECTOR_FORMAT_INT8 - This constant is used to represent the storage format of VECTOR columns - using 8-bit signed integers. - - This enumerated value can also be identified by - ``oracledb.VectorFormat.INT8``. .. _exceptions: @@ -1718,80 +1035,26 @@ Oracledb Exceptions See :ref:`exception` for usage information. -.. exception:: Warning - - Exception raised for important warnings and defined by the DB API but not - actually used by python-oracledb. - -.. exception:: Error - - Exception that is the base class of all other exceptions defined by - python-oracledb and is a subclass of the Python StandardError exception - (defined in the module exceptions). - -.. exception:: InterfaceError - - Exception raised for errors that are related to the database interface - rather than the database itself. It is a subclass of Error. - - Exception messages of this class will have the prefix DPY and an error - number in the range 1000 - 1999. - -.. exception:: DatabaseError - - Exception raised for errors that are related to the database. It is a - subclass of Error. - - Exception messages of this class will have the prefix DPY and an error - number in the range 4000 - 4999. - -.. exception:: DataError - - Exception raised for errors that are due to problems with the processed - data. It is a subclass of DatabaseError. - - Exception messages of this class are generated by the database and will - have a prefix such as ORA - -.. exception:: OperationalError - - Exception raised for errors that are related to the operation of the - database but are not necessarily under the control of the programmer. It is - a subclass of DatabaseError. - - Exception messages of this class will have the prefix DPY and an error - number in the range 6000 - 6999. - -.. exception:: IntegrityError - - Exception raised when the relational integrity of the database is affected. - It is a subclass of DatabaseError. +.. autoexception:: Warning - Exception messages of this class are generated by the database and will - have a prefix such as ORA +.. autoexception:: Error -.. exception:: InternalError +.. autoexception:: DataError - Exception raised when the database encounters an internal error. It is a - subclass of DatabaseError. +.. autoexception:: DatabaseError - Exception messages of this class will have the prefix DPY and an error - number in the range 5000 - 5999. +.. autoexception:: IntegrityError -.. exception:: ProgrammingError +.. autoexception:: InterfaceError - Exception raised for programming errors. It is a subclass of DatabaseError. +.. autoexception:: InternalError - Exception messages of this class will have the prefix DPY and an error - number in the range 2000 - 2999. +.. autoexception:: NotSupportedError -.. exception:: NotSupportedError +.. autoexception:: OperationalError - Exception raised when a method or database API was used which is not - supported by the database. It is a subclass of DatabaseError. +.. autoexception:: ProgrammingError - Exception messages of this class will have the prefix DPY and an error - number in the range 3000 - 3999. .. _exchandling: diff --git a/src/oracledb/__init__.py b/src/oracledb/__init__.py index 51560b75..856beeaf 100644 --- a/src/oracledb/__init__.py +++ b/src/oracledb/__init__.py @@ -43,179 +43,58 @@ from . import base_impl, thick_impl, thin_impl from .base_impl import ( - # type classes ApiType as ApiType, DbType as DbType, - # database types - DB_TYPE_BFILE as DB_TYPE_BFILE, - DB_TYPE_BINARY_DOUBLE as DB_TYPE_BINARY_DOUBLE, - DB_TYPE_BINARY_FLOAT as DB_TYPE_BINARY_FLOAT, - DB_TYPE_BINARY_INTEGER as DB_TYPE_BINARY_INTEGER, - DB_TYPE_BLOB as DB_TYPE_BLOB, - DB_TYPE_BOOLEAN as DB_TYPE_BOOLEAN, - DB_TYPE_CHAR as DB_TYPE_CHAR, - DB_TYPE_CLOB as DB_TYPE_CLOB, - DB_TYPE_CURSOR as DB_TYPE_CURSOR, - DB_TYPE_DATE as DB_TYPE_DATE, - DB_TYPE_INTERVAL_DS as DB_TYPE_INTERVAL_DS, - DB_TYPE_INTERVAL_YM as DB_TYPE_INTERVAL_YM, - DB_TYPE_JSON as DB_TYPE_JSON, - DB_TYPE_LONG as DB_TYPE_LONG, - DB_TYPE_LONG_NVARCHAR as DB_TYPE_LONG_NVARCHAR, - DB_TYPE_LONG_RAW as DB_TYPE_LONG_RAW, - DB_TYPE_NCHAR as DB_TYPE_NCHAR, - DB_TYPE_NCLOB as DB_TYPE_NCLOB, - DB_TYPE_NUMBER as DB_TYPE_NUMBER, - DB_TYPE_NVARCHAR as DB_TYPE_NVARCHAR, - DB_TYPE_OBJECT as DB_TYPE_OBJECT, - DB_TYPE_RAW as DB_TYPE_RAW, - DB_TYPE_ROWID as DB_TYPE_ROWID, - DB_TYPE_TIMESTAMP as DB_TYPE_TIMESTAMP, - DB_TYPE_TIMESTAMP_LTZ as DB_TYPE_TIMESTAMP_LTZ, - DB_TYPE_TIMESTAMP_TZ as DB_TYPE_TIMESTAMP_TZ, - DB_TYPE_UNKNOWN as DB_TYPE_UNKNOWN, - DB_TYPE_UROWID as DB_TYPE_UROWID, - DB_TYPE_VARCHAR as DB_TYPE_VARCHAR, - DB_TYPE_VECTOR as DB_TYPE_VECTOR, - DB_TYPE_XMLTYPE as DB_TYPE_XMLTYPE, - # API types - BINARY as BINARY, - DATETIME as DATETIME, - NUMBER as NUMBER, - ROWID as ROWID, - STRING as STRING, - # flags for tpc_begin() - TPC_TXN_FLAGS_JOIN as TPC_BEGIN_JOIN, # noqa: F401 - TPC_TXN_FLAGS_NEW as TPC_BEGIN_NEW, # noqa: F401 - TPC_TXN_FLAGS_PROMOTE as TPC_BEGIN_PROMOTE, # noqa: F401 - TPC_TXN_FLAGS_RESUME as TPC_BEGIN_RESUME, # noqa: F401 ) from .enums import ( - # authentication modes AuthMode as AuthMode, - AUTH_MODE_DEFAULT as AUTH_MODE_DEFAULT, - AUTH_MODE_PRELIM as AUTH_MODE_PRELIM, - AUTH_MODE_SYSASM as AUTH_MODE_SYSASM, - AUTH_MODE_SYSBKP as AUTH_MODE_SYSBKP, - AUTH_MODE_SYSDBA as AUTH_MODE_SYSDBA, - AUTH_MODE_SYSDGD as AUTH_MODE_SYSDGD, - AUTH_MODE_SYSKMT as AUTH_MODE_SYSKMT, - AUTH_MODE_SYSOPER as AUTH_MODE_SYSOPER, - AUTH_MODE_SYSRAC as AUTH_MODE_SYSRAC, - # pipeline operation types PipelineOpType as PipelineOpType, - PIPELINE_OP_TYPE_CALL_FUNC as PIPELINE_OP_TYPE_CALL_FUNC, - PIPELINE_OP_TYPE_CALL_PROC as PIPELINE_OP_TYPE_CALL_PROC, - PIPELINE_OP_TYPE_COMMIT as PIPELINE_OP_TYPE_COMMIT, - PIPELINE_OP_TYPE_EXECUTE as PIPELINE_OP_TYPE_EXECUTE, - PIPELINE_OP_TYPE_EXECUTE_MANY as PIPELINE_OP_TYPE_EXECUTE_MANY, - PIPELINE_OP_TYPE_FETCH_ALL as PIPELINE_OP_TYPE_FETCH_ALL, - PIPELINE_OP_TYPE_FETCH_MANY as PIPELINE_OP_TYPE_FETCH_MANY, - PIPELINE_OP_TYPE_FETCH_ONE as PIPELINE_OP_TYPE_FETCH_ONE, - # pool "get" modes PoolGetMode as PoolGetMode, - POOL_GETMODE_WAIT as POOL_GETMODE_WAIT, - POOL_GETMODE_NOWAIT as POOL_GETMODE_NOWAIT, - POOL_GETMODE_FORCEGET as POOL_GETMODE_FORCEGET, - POOL_GETMODE_TIMEDWAIT as POOL_GETMODE_TIMEDWAIT, - # purity values Purity as Purity, - PURITY_DEFAULT as PURITY_DEFAULT, - PURITY_NEW as PURITY_NEW, - PURITY_SELF as PURITY_SELF, - # vector formats VectorFormat as VectorFormat, - VECTOR_FORMAT_BINARY as VECTOR_FORMAT_BINARY, - VECTOR_FORMAT_FLOAT32 as VECTOR_FORMAT_FLOAT32, - VECTOR_FORMAT_FLOAT64 as VECTOR_FORMAT_FLOAT64, - VECTOR_FORMAT_INT8 as VECTOR_FORMAT_INT8, ) -from .version import __version__ as __version__ - -from .constants import ( - # mandated DB API constants - apilevel as apilevel, - threadsafety as threadsafety, - paramstyle as paramstyle, - # AQ delivery modes - MSG_BUFFERED as MSG_BUFFERED, - MSG_PERSISTENT as MSG_PERSISTENT, - MSG_PERSISTENT_OR_BUFFERED as MSG_PERSISTENT_OR_BUFFERED, - # AQ dequeue modes - DEQ_BROWSE as DEQ_BROWSE, - DEQ_LOCKED as DEQ_LOCKED, - DEQ_REMOVE as DEQ_REMOVE, - DEQ_REMOVE_NODATA as DEQ_REMOVE_NODATA, - # AQ dequeue navigation modes - DEQ_FIRST_MSG as DEQ_FIRST_MSG, - DEQ_NEXT_MSG as DEQ_NEXT_MSG, - DEQ_NEXT_TRANSACTION as DEQ_NEXT_TRANSACTION, - # AQ dequeue visibility modes - DEQ_IMMEDIATE as DEQ_IMMEDIATE, - DEQ_ON_COMMIT as DEQ_ON_COMMIT, - # AQ dequeue wait modes - DEQ_NO_WAIT as DEQ_NO_WAIT, - DEQ_WAIT_FOREVER as DEQ_WAIT_FOREVER, - # AQ enqueue visibility modes - ENQ_IMMEDIATE as ENQ_IMMEDIATE, - ENQ_ON_COMMIT as ENQ_ON_COMMIT, - # AQ message states - MSG_EXPIRED as MSG_EXPIRED, - MSG_PROCESSED as MSG_PROCESSED, - MSG_READY as MSG_READY, - MSG_WAITING as MSG_WAITING, - # AQ other constants - MSG_NO_DELAY as MSG_NO_DELAY, - MSG_NO_EXPIRATION as MSG_NO_EXPIRATION, - # shutdown modes - DBSHUTDOWN_ABORT as DBSHUTDOWN_ABORT, - DBSHUTDOWN_FINAL as DBSHUTDOWN_FINAL, - DBSHUTDOWN_IMMEDIATE as DBSHUTDOWN_IMMEDIATE, - DBSHUTDOWN_TRANSACTIONAL as DBSHUTDOWN_TRANSACTIONAL, - DBSHUTDOWN_TRANSACTIONAL_LOCAL as DBSHUTDOWN_TRANSACTIONAL_LOCAL, - # subscription grouping classes - SUBSCR_GROUPING_CLASS_NONE as SUBSCR_GROUPING_CLASS_NONE, - SUBSCR_GROUPING_CLASS_TIME as SUBSCR_GROUPING_CLASS_TIME, - # subscription grouping types - SUBSCR_GROUPING_TYPE_SUMMARY as SUBSCR_GROUPING_TYPE_SUMMARY, - SUBSCR_GROUPING_TYPE_LAST as SUBSCR_GROUPING_TYPE_LAST, - # subscription namespaces - SUBSCR_NAMESPACE_AQ as SUBSCR_NAMESPACE_AQ, - SUBSCR_NAMESPACE_DBCHANGE as SUBSCR_NAMESPACE_DBCHANGE, - # subscription protocols - SUBSCR_PROTO_HTTP as SUBSCR_PROTO_HTTP, - SUBSCR_PROTO_MAIL as SUBSCR_PROTO_MAIL, - SUBSCR_PROTO_CALLBACK as SUBSCR_PROTO_CALLBACK, - SUBSCR_PROTO_SERVER as SUBSCR_PROTO_SERVER, - # subscription quality of service - SUBSCR_QOS_BEST_EFFORT as SUBSCR_QOS_BEST_EFFORT, - SUBSCR_QOS_DEFAULT as SUBSCR_QOS_DEFAULT, - SUBSCR_QOS_DEREG_NFY as SUBSCR_QOS_DEREG_NFY, - SUBSCR_QOS_QUERY as SUBSCR_QOS_QUERY, - SUBSCR_QOS_RELIABLE as SUBSCR_QOS_RELIABLE, - SUBSCR_QOS_ROWIDS as SUBSCR_QOS_ROWIDS, - # event types - EVENT_AQ as EVENT_AQ, - EVENT_DEREG as EVENT_DEREG, - EVENT_NONE as EVENT_NONE, - EVENT_OBJCHANGE as EVENT_OBJCHANGE, - EVENT_QUERYCHANGE as EVENT_QUERYCHANGE, - EVENT_SHUTDOWN as EVENT_SHUTDOWN, - EVENT_SHUTDOWN_ANY as EVENT_SHUTDOWN_ANY, - EVENT_STARTUP as EVENT_STARTUP, - # operation codes - OPCODE_ALLOPS as OPCODE_ALLOPS, - OPCODE_ALLROWS as OPCODE_ALLROWS, - OPCODE_ALTER as OPCODE_ALTER, - OPCODE_DELETE as OPCODE_DELETE, - OPCODE_DROP as OPCODE_DROP, - OPCODE_INSERT as OPCODE_INSERT, - OPCODE_UPDATE as OPCODE_UPDATE, - # flags for tpc_end() - TPC_END_NORMAL as TPC_END_NORMAL, - TPC_END_SUSPEND as TPC_END_SUSPEND, +from . import constants, version + +from .arrow_array import ( + ArrowArray as ArrowArray, +) + +from .constructors import ( + Binary as Binary, + Date as Date, + DateFromTicks as DateFromTicks, + Time as Time, + TimeFromTicks as TimeFromTicks, + Timestamp as Timestamp, + TimestampFromTicks as TimestampFromTicks, +) + +from .dataframe import ( + DataFrame as DataFrame, +) + +from .dbobject import ( + DbObject as DbObject, + DbObjectAttr as DbObjectAttr, + DbObjectType as DbObjectType, +) + +from .defaults import ( + Defaults as Defaults, +) + +from .driver_mode import ( + is_thin_mode as is_thin_mode, +) + +from .dsn import ( + makedsn as makedsn, +) + +from .errors import ( + _Error as _Error, ) from .exceptions import ( @@ -231,11 +110,17 @@ ProgrammingError as ProgrammingError, ) -from .errors import _Error as _Error +from .fetch_info import ( + FetchInfo as FetchInfo, +) -from .defaults import ( - defaults as defaults, - Defaults as Defaults, +from .future import ( + __future__ as __future__, +) + +from .lob import ( + LOB as LOB, + AsyncLOB as AsyncLOB, ) from .pipeline import ( @@ -245,19 +130,1026 @@ create_pipeline as create_pipeline, ) -from .connection import ( +from .soda import ( + SodaDatabase as SodaDatabase, + SodaCollection as SodaCollection, + SodaDocument as SodaDocument, + SodaDocCursor as SodaDocCursor, + SodaOperation as SodaOperation, +) + +from .sparse_vector import ( + SparseVector as SparseVector, +) + +from .utils import ( + clientversion as clientversion, + enable_thin_mode as enable_thin_mode, + from_arrow as from_arrow, + init_oracle_client as init_oracle_client, + register_params_hook as register_params_hook, + register_password_type as register_password_type, + register_protocol as register_protocol, + unregister_params_hook as unregister_params_hook, +) + +from .var import ( + Var as Var, +) + + +# module attributes +apilevel: str = "2.0" +""" +A string constant stating the Python DB API level supported by python-oracledb. +""" + +defaults: Defaults = Defaults() +""" +The defaults object for setting default behaviors of python-oracledb. +""" + +paramstyle: str = "named" +""" +A string constant stating the type of parameter marker formatting expected by +the interface. Currently 'named' as in 'where name = :name'. +""" + +threadsafety: int = 2 +""" +An integer constant stating the level of thread safety that python-oracledb +supports. Currently 2, which means that threads may share the module and +connections, but not cursors. Sharing means that a thread may use a resource +without wrapping it using a mutex semaphore to implement resource locking. +""" + +__version__: str = version.__version__ +""" +A string constant stating the version of the module. +""" + + +# API types +BINARY: ApiType = base_impl.BINARY +""" +This type object is used to describe columns in a database that contain binary +data. The database types :data:`DB_TYPE_RAW` and :data:`DB_TYPE_LONG_RAW` will +compare equal to this value. If a variable is created with this type, the +database type :data:`DB_TYPE_RAW` will be used. +""" + +DATETIME: ApiType = base_impl.DATETIME +""" +This type object is used to describe columns in a database that are dates. The +database types :data:`DB_TYPE_DATE`, :data:`DB_TYPE_TIMESTAMP`, +:data:`DB_TYPE_TIMESTAMP_LTZ` and :data:`DB_TYPE_TIMESTAMP_TZ` will all compare +equal to this value. If a variable is created with this type, the database type +:data:`DB_TYPE_DATE` will be used. +""" + +NUMBER: ApiType = base_impl.NUMBER +""" +This type object is used to describe columns in a database that are numbers. +The database types :data:`DB_TYPE_BINARY_DOUBLE`, :data:`DB_TYPE_BINARY_FLOAT`, +:data:`DB_TYPE_BINARY_INTEGER` and :data:`DB_TYPE_NUMBER` will all compare +equal to this value. If a variable is created with this type, the database type +:data:`DB_TYPE_NUMBER` will be used. +""" + +ROWID: ApiType = base_impl.ROWID +""" +This type object is used to describe the pseudo column "rowid". The database +types :data:`DB_TYPE_ROWID` and :data:`DB_TYPE_UROWID` will compare equal to +this value. If a variable is created with this type, the database type +:data:`DB_TYPE_VARCHAR` will be used. +""" + +STRING: ApiType = base_impl.STRING +""" +This type object is used to describe columns in a database that are strings. +The database types :data:`DB_TYPE_CHAR`, :data:`DB_TYPE_LONG`, +:data:`DB_TYPE_NCHAR`, :data:`DB_TYPE_NVARCHAR` and :data:`DB_TYPE_VARCHAR` +will all compare equal to this value. If a variable is created with this type, +the database type :data:`DB_TYPE_VARCHAR` will be used. +""" + + +# connection authorization modes +AUTH_MODE_DEFAULT: AuthMode = AuthMode.DEFAULT +""" +This constant is used to specify that default authentication is to take place. +This is the default value if no mode is passed at all. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``DEFAULT_AUTH`` constant that was used in the +obsolete cx_Oracle driver, and was the default ``mode`` value. +""" + +AUTH_MODE_PRELIM: AuthMode = AuthMode.PRELIM +""" +This constant is used to specify that preliminary authentication is to be used. +This is needed for performing database startup and shutdown. + +It can only be used in python-oracledb Thick mode for standalone connections. + +This constant deprecates the ``PRELIM_AUTH`` constant that was used in the +obsolete cx_Oracle driver. +""" + +AUTH_MODE_SYSASM: AuthMode = AuthMode.SYSASM +""" +This constant is used to specify that SYSASM access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSASM`` constant that was used in the obsolete +cx_Oracle driver. +""" + +AUTH_MODE_SYSBKP: AuthMode = AuthMode.SYSBKP +""" +This constant is used to specify that SYSBACKUP access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSBKP`` constant that was used in the +obsolete cx_Oracle driver. +""" + +AUTH_MODE_SYSDBA: AuthMode = AuthMode.SYSDBA +""" +This constant is used to specify that SYSDBA access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSDBA`` constant that was used in the obsolete +cx_Oracle driver. +""" + +AUTH_MODE_SYSDGD: AuthMode = AuthMode.SYSDGD +""" +This constant is used to specify that SYSDG access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSDGD`` constant that was used in the obsolete +cx_Oracle driver. +""" + +AUTH_MODE_SYSKMT: AuthMode = AuthMode.SYSKMT +""" +This constant is used to specify that SYSKM access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSKMT`` constant that was used in the obsolete +cx_Oracle driver. +""" + +AUTH_MODE_SYSOPER: AuthMode = AuthMode.SYSOPER +""" +This constant is used to specify that SYSOPER access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSOPER`` constant that was used in the obsolete +cx_Oracle driver. +""" + +AUTH_MODE_SYSRAC: AuthMode = AuthMode.SYSRAC +""" +This constant is used to specify that SYSRAC access is to be acquired. + +It can be used for standalone and pooled connections in python-oracledb Thin +mode, and for standalone connections in Thick mode. + +This constant deprecates the ``SYSRAC`` constant that was used in the obsolete +cx_Oracle driver. +""" + + +# database shutdown modes +DBSHUTDOWN_ABORT: int = constants.DBSHUTDOWN_ABORT +""" +This constant is used to specify that the caller should not wait for current +processing to complete or for users to disconnect from the database. This +should only be used in unusual circumstances since database recovery may be +necessary upon next startup. +""" + +DBSHUTDOWN_FINAL: int = constants.DBSHUTDOWN_FINAL +""" +This constant is used to specify that the instance can be truly halted. This +should only be done after the database has been shutdown with one of the other +modes (except abort) and the database has been closed and dismounted using the +appropriate SQL commands. +""" + +DBSHUTDOWN_IMMEDIATE: int = constants.DBSHUTDOWN_IMMEDIATE +""" +This constant is used to specify that all uncommitted transactions should be +rolled back and any connected users should be disconnected. +""" + +DBSHUTDOWN_TRANSACTIONAL: int = constants.DBSHUTDOWN_TRANSACTIONAL +""" +This constant is used to specify that further connections to the database +should be prohibited and no new transactions should be allowed. It then waits +for all active transactions to complete. +""" + +DBSHUTDOWN_TRANSACTIONAL_LOCAL: int = constants.DBSHUTDOWN_TRANSACTIONAL_LOCAL +""" +This constant is used to specify that further connections to the database +should be prohibited and no new transactions should be allowed. It then waits +for only local active transactions to complete. +""" + + +# database types +DB_TYPE_BFILE: DbType = base_impl.DB_TYPE_BFILE +""" +Describes columns, attributes or array elements in a database that are of type +BFILE. It will compare equal to the DB API type :data:`BINARY`. +""" + +DB_TYPE_BINARY_DOUBLE: DbType = base_impl.DB_TYPE_BINARY_DOUBLE +""" +Describes columns, attributes or array elements in a database that are of type +BINARY_DOUBLE. It will compare equal to the DB API type :data:`NUMBER`. +""" + +DB_TYPE_BINARY_FLOAT: DbType = base_impl.DB_TYPE_BINARY_FLOAT +""" +Describes columns, attributes or array elements in a database that are of type +BINARY_FLOAT. It will compare equal to the DB API type :data:`NUMBER`. +""" + +DB_TYPE_BINARY_INTEGER: DbType = base_impl.DB_TYPE_BINARY_INTEGER +""" +Describes attributes or array elements in a database that are of type +BINARY_INTEGER. It will compare equal to the DB API type :data:`NUMBER`. +""" + +DB_TYPE_BLOB: DbType = base_impl.DB_TYPE_BLOB +""" +Describes columns, attributes or array elements in a database that are of type +BLOB. It will compare equal to the DB API type :data:`BINARY`. +""" + +DB_TYPE_BOOLEAN: DbType = base_impl.DB_TYPE_BOOLEAN +""" +Describes attributes or array elements in a database that are of type BOOLEAN. +It is only available in Oracle 12.1 and higher and only within PL/SQL. +""" + +DB_TYPE_CHAR: DbType = base_impl.DB_TYPE_CHAR +""" +Describes columns, attributes or array elements in a database that are of type +CHAR. It will compare equal to the DB API type :data:`STRING`. + +Note that these are fixed length string values and behave differently from +VARCHAR2. +""" + +DB_TYPE_CLOB: DbType = base_impl.DB_TYPE_CLOB +""" +Describes columns, attributes or array elements in a database that are of type +CLOB. It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_CURSOR: DbType = base_impl.DB_TYPE_CURSOR +""" +Describes columns in a database that are of type CURSOR. In PL/SQL, these are +known as REF CURSOR. +""" + +DB_TYPE_DATE: DbType = base_impl.DB_TYPE_DATE +""" +Describes columns, attributes or array elements in a database that are of type +DATE. It will compare equal to the DB API type :data:`DATETIME`. +""" + +DB_TYPE_INTERVAL_DS: DbType = base_impl.DB_TYPE_INTERVAL_DS +""" +Describes columns, attributes or array elements in a database that are of type +INTERVAL DAY TO SECOND. +""" + +DB_TYPE_INTERVAL_YM: DbType = base_impl.DB_TYPE_INTERVAL_YM +""" +Describes columns, attributes or array elements in a database that are of type +INTERVAL YEAR TO MONTH. +""" + +DB_TYPE_JSON: DbType = base_impl.DB_TYPE_JSON +""" +Describes columns in a database that are of type JSON (with Oracle Database 21 +or later). +""" + +DB_TYPE_LONG: DbType = base_impl.DB_TYPE_LONG +""" +Describes columns, attributes or array elements in a database that are of type +LONG. It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_LONG_NVARCHAR: DbType = base_impl.DB_TYPE_LONG_NVARCHAR +""" +This constant can be used in output type handlers when fetching NCLOB columns +as a string. (Note a type handler is not needed if +:data:`oracledb.defaults.fetch_lobs `, or the equivalent +execution parameter, is set to *False*). For IN binds, this constant can be +used to create a bind variable in :meth:`Cursor.var()` or via +:meth:`Cursor.setinputsizes()`. The ``DB_TYPE_LONG_NVARCHAR`` value won't be +shown in query metadata since it is not a database type. + +It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_LONG_RAW: DbType = base_impl.DB_TYPE_LONG_RAW +""" +Describes columns, attributes or array elements in a database that are of type +LONG RAW. It will compare equal to the DB API type :data:`BINARY`. +""" + +DB_TYPE_NCHAR: DbType = base_impl.DB_TYPE_NCHAR +""" +Describes columns, attributes or array elements in a database that are of type +NCHAR. It will compare equal to the DB API type :data:`STRING`. + +Note that these are fixed length string values and behave differently from +NVARCHAR2. +""" + +DB_TYPE_NCLOB: DbType = base_impl.DB_TYPE_NCLOB +""" +Describes columns, attributes or array elements in a database that are of type +NCLOB. It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_NUMBER: DbType = base_impl.DB_TYPE_NUMBER +""" +Describes columns, attributes or array elements in a database that are of type +NUMBER. It will compare equal to the DB API type :data:`NUMBER`. +""" + +DB_TYPE_NVARCHAR: DbType = base_impl.DB_TYPE_NVARCHAR +""" +Describes columns, attributes or array elements in a database that are of type +NVARCHAR2. It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_OBJECT: DbType = base_impl.DB_TYPE_OBJECT +""" +Describes columns, attributes or array elements in a database that are an +instance of a named SQL or PL/SQL type. +""" + +DB_TYPE_RAW: DbType = base_impl.DB_TYPE_RAW +""" +Describes columns, attributes or array elements in a database that are of type +RAW. It will compare equal to the DB API type :data:`BINARY`. +""" + +DB_TYPE_ROWID: DbType = base_impl.DB_TYPE_ROWID +""" +Describes columns, attributes or array elements in a database that are of type +ROWID or UROWID. It will compare equal to the DB API type :data:`ROWID`. +""" + +DB_TYPE_TIMESTAMP: DbType = base_impl.DB_TYPE_TIMESTAMP +""" +Describes columns, attributes or array elements in a database that are of type +TIMESTAMP. It will compare equal to the DB API type :data:`DATETIME`. +""" + +DB_TYPE_TIMESTAMP_LTZ: DbType = base_impl.DB_TYPE_TIMESTAMP_LTZ +""" +Describes columns, attributes or array elements in a database that are of type +TIMESTAMP WITH LOCAL TIME ZONE. It will compare equal to the DB API type +:data:`DATETIME`. +""" + +DB_TYPE_TIMESTAMP_TZ: DbType = base_impl.DB_TYPE_TIMESTAMP_TZ +""" +Describes columns, attributes or array elements in a database that are of type +TIMESTAMP WITH TIME ZONE. It will compare equal to the DB API type +:data:`DATETIME`. +""" + +DB_TYPE_UNKNOWN: DbType = base_impl.DB_TYPE_UNKNOWN +""" +Describes columns, attributes or array elements in a database that are of an +unknown type. +""" + +DB_TYPE_UROWID: DbType = base_impl.DB_TYPE_UROWID +""" +Describes columns, attributes or array elements in a database that are of type +UROWID. It will compare equal to the DB API type :data:`ROWID`. +""" + +DB_TYPE_VARCHAR: DbType = base_impl.DB_TYPE_VARCHAR +""" +Describes columns, attributes or array elements in a database that are of type +VARCHAR2. It will compare equal to the DB API type :data:`STRING`. +""" + +DB_TYPE_VECTOR: DbType = base_impl.DB_TYPE_VECTOR +""" +Describes columns, attributes or array elements in a database that are of type +VECTOR (with Oracle Database 23 or later). +""" + +DB_TYPE_XMLTYPE: DbType = base_impl.DB_TYPE_XMLTYPE +""" +Describes columns, attributes or array elements in a database that are of type +SYS.XMLTYPE. +""" + + +# AQ dequeue modes +DEQ_BROWSE: int = constants.DEQ_BROWSE +""" +This constant is used to specify that dequeue should read the message without +acquiring any lock on the message (equivalent to a select statement). +""" + +DEQ_LOCKED: int = constants.DEQ_LOCKED +""" +This constant is used to specify that dequeue should read and obtain a write +lock on the message for the duration of the transaction (equivalent to a select +for update statement). +""" + +DEQ_REMOVE: int = constants.DEQ_REMOVE +""" +This constant is used to specify that dequeue should read the message and +update or delete it. This is the default value. +""" + +DEQ_REMOVE_NODATA: int = constants.DEQ_REMOVE_NODATA +""" +This constant is used to specify that dequeue should confirm receipt of the +message but not deliver the actual message content. +""" + + +# AQ dequeue navigation modes +DEQ_FIRST_MSG: int = constants.DEQ_FIRST_MSG +""" +This constant is used to specify that dequeue should retrieve the first +available message that matches the search criteria. This resets the +position to the beginning of the queue. +""" + +DEQ_NEXT_MSG: int = constants.DEQ_NEXT_MSG +""" +This constant is used to specify that dequeue should retrieve the next +available message that matches the search criteria. If the previous message +belongs to a message group, AQ retrieves the next available message that +matches the search criteria and belongs to the message group. This is the +default. +""" + +DEQ_NEXT_TRANSACTION: int = constants.DEQ_NEXT_TRANSACTION +""" +This constant is used to specify that dequeue should skip the remainder of the +transaction group and retrieve the first message of the next transaction group. +This option can only be used if message grouping is enabled for the current +queue. +""" + + +# AQ dequeue visibility modes +DEQ_IMMEDIATE: int = constants.DEQ_IMMEDIATE +""" +This constant is used to specify that dequeue should perform its work as part +of an independent transaction. +""" + +DEQ_ON_COMMIT: int = constants.DEQ_ON_COMMIT +""" +This constant is used to specify that dequeue should be part of the current +transaction. This is the default value. +""" + + +# AQ dequeue wait modes +DEQ_NO_WAIT: int = constants.DEQ_NO_WAIT +""" +This constant is used to specify that dequeue not wait for messages to be +available for dequeuing. +""" + +DEQ_WAIT_FOREVER: int = constants.DEQ_WAIT_FOREVER +""" +This constant is used to specify that dequeue should wait forever for messages +to be available for dequeuing. This is the default value. +""" + + +# AQ enqueue visibility modes +ENQ_IMMEDIATE: int = constants.ENQ_IMMEDIATE +""" +This constant is used to specify that enqueue should perform its work as +part of an independent transaction. + +The use of this constant with bulk enqueuing is only supported in +python-oracledb Thick mode. +""" + +ENQ_ON_COMMIT: int = constants.ENQ_ON_COMMIT +""" +This constant is used to specify that enqueue should be part of the current +transaction. This is the default value. +""" + + +# event types +EVENT_AQ: int = constants.EVENT_AQ +""" +This constant is used to specify that one or more messages are available for +dequeuing on the queue specified when the subscription was created. +""" + +EVENT_DEREG: int = constants.EVENT_DEREG +""" +This constant is used to specify that the subscription has been deregistered +and no further notifications will be sent. +""" + +EVENT_NONE: int = constants.EVENT_NONE +""" +This constant is used to specify no information is available about the event. +""" + +EVENT_OBJCHANGE: int = constants.EVENT_OBJCHANGE +""" +This constant is used to specify that a database change has taken place on a +table registered with the :meth:`Subscription.registerquery()` method. +""" + +EVENT_QUERYCHANGE: int = constants.EVENT_QUERYCHANGE +""" +This constant is used to specify that the result set of a query registered with +the :meth:`Subscription.registerquery()` method has been changed. +""" + +EVENT_SHUTDOWN: int = constants.EVENT_SHUTDOWN +""" +This constant is used to specify that the instance is in the process of being +shut down. +""" + +EVENT_SHUTDOWN_ANY: int = constants.EVENT_SHUTDOWN_ANY +""" +This constant is used to specify that any instance (when running RAC) is in the +process of being shut down. +""" + +EVENT_STARTUP: int = constants.EVENT_STARTUP +""" +This constant is used to specify that the instance is in the process of being +started up. +""" + + +# AQ delivery modes +MSG_BUFFERED: int = constants.MSG_BUFFERED +""" +This constant is used to specify that enqueue or dequeue operations should +enqueue or dequeue buffered messages, respectively. For multi-consumer queues, +a `subscriber `__ with buffered delivery mode +needs to be created prior to enqueuing buffered messages. + +This mode is not supported for bulk array operations in python-oracledb Thick +mode. +""" + +MSG_PERSISTENT: int = constants.MSG_PERSISTENT +""" +This constant is used to specify that enqueue/dequeue operations should enqueue +or dequeue persistent messages. This is the default value. +""" + +MSG_PERSISTENT_OR_BUFFERED: int = constants.MSG_PERSISTENT_OR_BUFFERED +""" +This constant is used to specify that dequeue operations should dequeue either +persistent or buffered messages. +""" + + +# AQ message states +MSG_EXPIRED: int = constants.MSG_EXPIRED +""" +This constant is used to specify that the message has been moved to the +exception queue. +""" + +MSG_PROCESSED: int = constants.MSG_PROCESSED +""" +This constant is used to specify that the message has been processed and has +been retained. +""" + +MSG_READY: int = constants.MSG_READY +""" +This constant is used to specify that the message is ready to be processed. +""" + +MSG_WAITING: int = constants.MSG_WAITING +""" +This constant is used to specify that the message delay has not yet been +reached. +""" + + +# other AQ constants +MSG_NO_DELAY: int = constants.MSG_NO_DELAY +""" +This constant is a possible value for the :attr:`~MessageProperties.delay` +attribute of the message properties object passed as the ``msgproperties`` +parameter to the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` and +:meth:`Queue.enqone()` or :meth:`Queue.enqmany()` methods. It specifies that +no delay should be imposed and the message should be immediately available for +dequeuing. This is also the default value. +""" + +MSG_NO_EXPIRATION: int = constants.MSG_NO_EXPIRATION +""" +This constant is a possible value for the :attr:`~MessageProperties.expiration` +attribute of the message properties object passed as the ``msgproperties`` +parameter to the :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` and +:meth:`Queue.enqone()` or :meth:`Queue.enqmany()` methods. It specifies that +the message never expires. This is also the default value. +""" + + +# operation codes (CQN) +OPCODE_ALLOPS: int = constants.OPCODE_ALLOPS +""" +This constant is used to specify that messages should be sent for all +operations. +""" + +OPCODE_ALLROWS: int = constants.OPCODE_ALLROWS +""" +This constant is used to specify that the table or query has been completely +invalidated. +""" + +OPCODE_ALTER: int = constants.OPCODE_ALTER +""" +This constant is used to specify that messages should be sent when a registered +table has been altered in some fashion by DDL, or that the message identifies a +table that has been altered. +""" + +OPCODE_DELETE: int = constants.OPCODE_DELETE +""" +This constant is used to specify that messages should be sent when data is +deleted, or that the message identifies a row that has been deleted. +""" + +OPCODE_DROP: int = constants.OPCODE_DROP +""" +This constant is used to specify that messages should be sent when a registered +table has been dropped, or that the message identifies a table that has been +dropped. +""" + +OPCODE_INSERT: int = constants.OPCODE_INSERT +""" +This constant is used to specify that messages should be sent when data is +inserted, or that the message identifies a row that has been inserted. +""" + +OPCODE_UPDATE: int = constants.OPCODE_UPDATE +""" +This constant is used to specify that messages should be sent when data is +updated, or that the message identifies a row that has been updated. +""" + + +# pipeline operation types +PIPELINE_OP_TYPE_CALL_FUNC: PipelineOpType = PipelineOpType.CALL_FUNC +""" +This constant identifies the type of operation as the calling of a stored +function. +""" + +PIPELINE_OP_TYPE_CALL_PROC: PipelineOpType = PipelineOpType.CALL_PROC +""" +This constant identifies the type of operation as the calling of a stored +procedure. +""" + +PIPELINE_OP_TYPE_COMMIT: PipelineOpType = PipelineOpType.COMMIT +""" +This constant identifies the type of operation as the performing of a commit. +""" + +PIPELINE_OP_TYPE_EXECUTE: PipelineOpType = PipelineOpType.EXECUTE +""" +This constant identifies the type of operation as the executing of a statement. +""" + +PIPELINE_OP_TYPE_EXECUTE_MANY: PipelineOpType = PipelineOpType.EXECUTE_MANY +""" +This constant identifies the type of operations as the executing of a statement +multiple times. +""" + +PIPELINE_OP_TYPE_FETCH_ALL: PipelineOpType = PipelineOpType.FETCH_ALL +""" +This constant identifies the type of operation as the executing of a query and +returning all of the rows from the result set. +""" + +PIPELINE_OP_TYPE_FETCH_MANY: PipelineOpType = PipelineOpType.FETCH_MANY +""" +This constant identifies the type of operation as the executing of a query and +returning up to the specified number of rows from the result set. +""" + +PIPELINE_OP_TYPE_FETCH_ONE: PipelineOpType = PipelineOpType.FETCH_ONE +""" +This constant identifies the type of operation as the executing of a query and +returning the first row of the result set. +""" + + +# connection pool "get" modes +POOL_GETMODE_FORCEGET: PoolGetMode = PoolGetMode.FORCEGET +""" +This constant is used to specify that a new connection should be created and +returned by :meth:`ConnectionPool.acquire()` if there are no free connections +available in the pool and the pool is already at its maximum size. + +When a connection acquired in this mode is eventually released back to the +pool, it will be dropped and not added to the pool if the pool is still at its +maximum size. + +This constant deprecates the ``SPOOL_ATTRVAL_FORCEGET`` constant that was used +in the obsolete cx_Oracle driver. +""" + +POOL_GETMODE_NOWAIT: PoolGetMode = PoolGetMode.NOWAIT +""" +This constant is used to specify that an exception should be raised by +:meth:`ConnectionPool.acquire()` when all currently created connections are +already in use and so :meth:`~ConnectionPool.acquire()` cannot immediately +return a connection. Note the exception may occur even if the pool is smaller +than its maximum size. + +This constant deprecates the ``SPOOL_ATTRVAL_NOWAIT`` constant that was used in +the obsolete cx_Oracle driver, and was the default ``getmode`` value. +""" + +POOL_GETMODE_TIMEDWAIT: PoolGetMode = PoolGetMode.TIMEDWAIT +""" +This constant is used to specify that :meth:`ConnectionPool.acquire()` should +wait for a period of time (defined by the ``wait_timeout`` parameter) for a +connection to become available before returning with an error. + +This constant deprecates the ``SPOOL_ATTRVAL_TIMEDWAIT`` constant that was used +in the obsolete cx_Oracle driver. +""" + +POOL_GETMODE_WAIT: PoolGetMode = PoolGetMode.WAIT +""" +This constant is used to specify that :meth:`ConnectionPool.acquire()` should +wait until a connection is available if there are currently no free connections +available in the pool. This is the default value. + +This constant deprecates the ``SPOOL_ATTRVAL_WAIT`` constant that was used in +the obsolete cx_Oracle driver. +""" + + +# connection pool purity +PURITY_DEFAULT: Purity = Purity.DEFAULT +""" +This constant is used to specify that the purity of the session is the default +value identified by Oracle (see Oracle's documentation for more information). +This is the default value. + +This constant deprecates the ``ATTR_PURITY_DEFAULT`` constant that was used in +the obsolete cx_Oracle driver, and was the default ``purity`` value. +""" + +PURITY_NEW: Purity = Purity.NEW +""" +This constant is used to specify that the session acquired from the pool should +be new and not have any prior session state. + +This constant deprecates the ``ATTR_PURITY_NEW`` constant that was used in the +obsolete cx_Oracle driver. +""" + +PURITY_SELF: Purity = Purity.SELF +""" +This constant is used to specify that the session acquired from the pool need +not be new and may have prior session state. + +This constant deprecates the ``ATTR_PURITY_SELF`` constant that was used in the +obsolete cx_Oracle driver. +""" + + +# subscription grouping classes +SUBSCR_GROUPING_CLASS_NONE: int = constants.SUBSCR_GROUPING_CLASS_NONE +""" +This constant is used to specify that no grouping should take place. +""" + +SUBSCR_GROUPING_CLASS_TIME: int = constants.SUBSCR_GROUPING_CLASS_TIME +""" +This constant is used to specify that events are to be grouped by the period of +time in which they are received. +""" + + +# subscription grouping types +SUBSCR_GROUPING_TYPE_SUMMARY: int = constants.SUBSCR_GROUPING_TYPE_SUMMARY +""" +This constant is used to specify that when events are grouped a summary of the +events should be sent instead of the individual events. This is the default +value. +""" + +SUBSCR_GROUPING_TYPE_LAST: int = constants.SUBSCR_GROUPING_TYPE_LAST +""" +This constant is used to specify that when events are grouped the last event +that makes up the group should be sent instead of the individual events. +""" + + +# subscription namespaces +SUBSCR_NAMESPACE_AQ: int = constants.SUBSCR_NAMESPACE_AQ +""" +This constant is used to specify that notifications should be sent when a queue +has messages available to dequeue. +""" + +SUBSCR_NAMESPACE_DBCHANGE: int = constants.SUBSCR_NAMESPACE_DBCHANGE +""" +This constant is used to specify that database change notification or query +change notification messages are to be sent. This is the default value. +""" + + +# subscription protocols +SUBSCR_PROTO_CALLBACK: int = constants.SUBSCR_PROTO_CALLBACK +""" +This constant is used to specify that notifications will be sent to the +callback routine identified when the subscription was created. It is the +default value and the only value currently supported. +""" + +SUBSCR_PROTO_HTTP: int = constants.SUBSCR_PROTO_HTTP +""" +This constant is used to specify that notifications will be sent to an HTTP +URL when a message is generated. This value is currently not supported. +""" + +SUBSCR_PROTO_MAIL: int = constants.SUBSCR_PROTO_MAIL +""" +This constant is used to specify that notifications will be sent to an e-mail +address when a message is generated. This value is currently not supported. +""" + +SUBSCR_PROTO_SERVER: int = constants.SUBSCR_PROTO_SERVER +""" +This constant is used to specify that notifications will be sent to a PL/SQL +procedure when a message is generated. This value is currently not supported. +""" + + +# subscription quality of service +SUBSCR_QOS_BEST_EFFORT: int = constants.SUBSCR_QOS_BEST_EFFORT +""" +This constant is used to specify that best effort filtering for query result +set changes is acceptable. False positive notifications may be received. This +behaviour may be suitable for caching applications. +""" + +SUBSCR_QOS_DEFAULT: int = constants.SUBSCR_QOS_DEFAULT +""" +This constant is used to specify that the default behavior for subscriptions +should be used. +""" + +SUBSCR_QOS_DEREG_NFY: int = constants.SUBSCR_QOS_DEREG_NFY +""" +This constant is used to specify that the subscription should be automatically +unregistered after the first notification is received. +""" + +SUBSCR_QOS_QUERY: int = constants.SUBSCR_QOS_QUERY +""" +This constant is used to specify that notifications should be sent if the +result set of the registered query changes. By default, no false positive +notifications will be generated. +""" + +SUBSCR_QOS_RELIABLE: int = constants.SUBSCR_QOS_RELIABLE +""" +This constant is used to specify that notifications should not be lost in the +event of database failure. +""" + +SUBSCR_QOS_ROWIDS: int = constants.SUBSCR_QOS_ROWIDS +""" +This constant is used to specify that the rowids of the inserted, updated or +deleted rows should be included in the message objects that are sent. +""" + + +# flags for tpc_begin() +TPC_BEGIN_JOIN: int = base_impl.TPC_TXN_FLAGS_JOIN +""" +This constant is used to join an existing TPC transaction. +""" + +TPC_BEGIN_NEW: int = base_impl.TPC_TXN_FLAGS_NEW +""" +This constant is used to create a new TPC transaction. +""" + +TPC_BEGIN_PROMOTE: int = base_impl.TPC_TXN_FLAGS_PROMOTE +""" +This constant is used to promote a local transaction to a TPC transaction. +""" + +TPC_BEGIN_RESUME: int = base_impl.TPC_TXN_FLAGS_RESUME +""" +This constant is used to resume an existing TPC transaction. +""" + + +# flags for tpc_end() +TPC_END_NORMAL: int = constants.TPC_END_NORMAL +""" +This constant is used to end TPC transaction participation normally. +""" + +TPC_END_SUSPEND: int = constants.TPC_END_SUSPEND +""" +This constant is used to suspend a TPC transaction. +""" + + +# vector formats +VECTOR_FORMAT_BINARY: VectorFormat = VectorFormat.BINARY +""" +This constant is used to represent the storage format of VECTOR columns using +8-bit unsigned integers. +""" + +VECTOR_FORMAT_FLOAT32: VectorFormat = VectorFormat.FLOAT32 +""" +This constant is used to represent the storage format of VECTOR columns using +32-bit floating point numbers. +""" + +VECTOR_FORMAT_FLOAT64: VectorFormat = VectorFormat.FLOAT64 +""" +This constant is used to represent the storage format of VECTOR columns using +64-bit floating point numbers. +""" + +VECTOR_FORMAT_INT8: VectorFormat = VectorFormat.INT8 +""" +This constant is used to represent the storage format of VECTOR columns using +8-bit signed integers. +""" + + +from .connection import ( # noqa: E402 AsyncConnection as AsyncConnection, connect as connect, connect_async as connect_async, Connection as Connection, ) -from .cursor import ( +from .cursor import ( # noqa: E402 AsyncCursor as AsyncCursor, Cursor as Cursor, ) -from .pool import ( +from .pool import ( # noqa: E402 AsyncConnectionPool as AsyncConnectionPool, ConnectionPool as ConnectionPool, create_pool as create_pool, @@ -265,7 +1157,7 @@ get_pool as get_pool, ) -from .subscr import ( +from .subscr import ( # noqa: E402 Subscription as Subscription, Message as Message, MessageQuery as MessageQuery, @@ -273,7 +1165,7 @@ MessageTable as MessageTable, ) -from .aq import ( +from .aq import ( # noqa: E402 Queue as Queue, AsyncQueue as AsyncQueue, DeqOptions as DeqOptions, @@ -281,75 +1173,11 @@ MessageProperties as MessageProperties, ) -from .soda import ( - SodaDatabase as SodaDatabase, - SodaCollection as SodaCollection, - SodaDocument as SodaDocument, - SodaDocCursor as SodaDocCursor, - SodaOperation as SodaOperation, -) - -from .connect_params import ConnectParams as ConnectParams - -from .pool_params import PoolParams as PoolParams - -from .lob import ( - LOB as LOB, - AsyncLOB as AsyncLOB, -) - -from .dbobject import ( - DbObject as DbObject, - DbObjectAttr as DbObjectAttr, - DbObjectType as DbObjectType, -) +from .connect_params import ConnectParams as ConnectParams # noqa: E402 -from .fetch_info import FetchInfo as FetchInfo - -from .var import Var as Var - -from .dsn import makedsn as makedsn - -from .driver_mode import is_thin_mode as is_thin_mode - -from .utils import ( - clientversion as clientversion, - enable_thin_mode as enable_thin_mode, - from_arrow as from_arrow, - init_oracle_client as init_oracle_client, - register_params_hook as register_params_hook, - register_password_type as register_password_type, - register_protocol as register_protocol, - unregister_params_hook as unregister_params_hook, -) - -from .constructors import ( - Binary as Binary, - Date as Date, - DateFromTicks as DateFromTicks, - Time as Time, - TimeFromTicks as TimeFromTicks, - Timestamp as Timestamp, - TimestampFromTicks as TimestampFromTicks, -) - -from .future import ( - future as __future__, # noqa: F401 -) - -from .sparse_vector import ( - SparseVector as SparseVector, -) - -from .arrow_array import ( - ArrowArray as ArrowArray, -) - -from .dataframe import ( - DataFrame as DataFrame, -) +from .pool_params import PoolParams as PoolParams # noqa: E402 -from . import builtin_hooks +from . import builtin_hooks # noqa: E402 IntervalYM = collections.namedtuple("IntervalYM", ["years", "months"]) @@ -358,6 +1186,7 @@ class JsonId(bytes): pass +# initialize implementations package = sys.modules[__name__] base_impl.init_base_impl(package) thick_impl.init_thick_impl(package) diff --git a/src/oracledb/constants.py b/src/oracledb/constants.py index 6f0cdb9c..e7b556dd 100644 --- a/src/oracledb/constants.py +++ b/src/oracledb/constants.py @@ -28,11 +28,6 @@ # Contains the constants defined by the package. # ----------------------------------------------------------------------------- -# mandated DB API constants -apilevel = "2.0" -threadsafety = 2 -paramstyle = "named" - # AQ delivery modes MSG_BUFFERED = 2 MSG_PERSISTENT = 1 diff --git a/src/oracledb/enums.py b/src/oracledb/enums.py index b68519d6..64ad2122 100644 --- a/src/oracledb/enums.py +++ b/src/oracledb/enums.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -74,34 +74,3 @@ class VectorFormat(enum.IntEnum): FLOAT32 = base_impl.VECTOR_FORMAT_FLOAT32 FLOAT64 = base_impl.VECTOR_FORMAT_FLOAT64 INT8 = base_impl.VECTOR_FORMAT_INT8 - - -# provide aliases for all enumerated values -AUTH_MODE_DEFAULT = AuthMode.DEFAULT -AUTH_MODE_PRELIM = AuthMode.PRELIM -AUTH_MODE_SYSASM = AuthMode.SYSASM -AUTH_MODE_SYSBKP = AuthMode.SYSBKP -AUTH_MODE_SYSDBA = AuthMode.SYSDBA -AUTH_MODE_SYSDGD = AuthMode.SYSDGD -AUTH_MODE_SYSKMT = AuthMode.SYSKMT -AUTH_MODE_SYSOPER = AuthMode.SYSOPER -AUTH_MODE_SYSRAC = AuthMode.SYSRAC -PIPELINE_OP_TYPE_CALL_FUNC = PipelineOpType.CALL_FUNC -PIPELINE_OP_TYPE_CALL_PROC = PipelineOpType.CALL_PROC -PIPELINE_OP_TYPE_COMMIT = PipelineOpType.COMMIT -PIPELINE_OP_TYPE_EXECUTE = PipelineOpType.EXECUTE -PIPELINE_OP_TYPE_EXECUTE_MANY = PipelineOpType.EXECUTE_MANY -PIPELINE_OP_TYPE_FETCH_ALL = PipelineOpType.FETCH_ALL -PIPELINE_OP_TYPE_FETCH_MANY = PipelineOpType.FETCH_MANY -PIPELINE_OP_TYPE_FETCH_ONE = PipelineOpType.FETCH_ONE -POOL_GETMODE_FORCEGET = PoolGetMode.FORCEGET -POOL_GETMODE_NOWAIT = PoolGetMode.NOWAIT -POOL_GETMODE_TIMEDWAIT = PoolGetMode.TIMEDWAIT -POOL_GETMODE_WAIT = PoolGetMode.WAIT -PURITY_DEFAULT = Purity.DEFAULT -PURITY_NEW = Purity.NEW -PURITY_SELF = Purity.SELF -VECTOR_FORMAT_BINARY = VectorFormat.BINARY -VECTOR_FORMAT_FLOAT32 = VectorFormat.FLOAT32 -VECTOR_FORMAT_FLOAT64 = VectorFormat.FLOAT64 -VECTOR_FORMAT_INT8 = VectorFormat.INT8 diff --git a/src/oracledb/exceptions.py b/src/oracledb/exceptions.py index 940e77dd..b262cac2 100644 --- a/src/oracledb/exceptions.py +++ b/src/oracledb/exceptions.py @@ -30,40 +30,96 @@ class Warning(Exception): - pass + """ + Exception raised for warnings. + + Exception messages of this class will have the prefix DPY and an error + number in the range 9000 - 9999. + """ class Error(Exception): - pass + """ + Exception that is the base class of all other exceptions defined by + python-oracledb. + """ class DatabaseError(Error): - pass + """ + Exception raised for errors that are related to the database. It is a + subclass of Error. + + Exception messages of this class will have the prefix DPY and an error + number in the range 4000 - 4999. + """ class DataError(DatabaseError): - pass + """ + Exception raised for errors that are due to problems with the processed + data. It is a subclass of DatabaseError. + + Exception messages of this class are generated by the database and will + have a prefix such as ORA. + """ class IntegrityError(DatabaseError): - pass + """ + Exception raised when the relational integrity of the database is affected. + It is a subclass of DatabaseError. + + Exception messages of this class are generated by the database and will + have a prefix such as ORA. + """ class InterfaceError(Error): - pass + """ + Exception raised for errors that are related to the database interface + rather than the database itself. It is a subclass of Error. + + Exception messages of this class will have the prefix DPY and an error + number in the range 1000 - 1999. + """ class InternalError(DatabaseError): - pass + """ + Exception raised when the database encounters an internal error. It is a + subclass of DatabaseError. + + Exception messages of this class will have the prefix DPY and an error + number in the range 5000 - 5999. + """ class NotSupportedError(DatabaseError): - pass + """ + Exception raised when a method or database API was used which is not + supported by the database. It is a subclass of DatabaseError. + + Exception messages of this class will have the prefix DPY and an error + number in the range 3000 - 3999. + """ class OperationalError(DatabaseError): - pass + """ + Exception raised for errors that are related to the operation of the + database but are not necessarily under the control of the programmer. It is + a subclass of DatabaseError. + + Exception messages of this class will have the prefix DPY and an error + number in the range 6000 - 6999. + """ class ProgrammingError(DatabaseError): - pass + """ + Exception raised for programming errors. It is a subclass of DatabaseError. + + Exception messages of this class will have the prefix DPY and an error + number in the range 2000 - 2999. + """ diff --git a/src/oracledb/future.py b/src/oracledb/future.py index 16270ac4..dff545d1 100644 --- a/src/oracledb/future.py +++ b/src/oracledb/future.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2023, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -43,4 +43,4 @@ def __setattr__(self, name, value): return super().__setattr__(name, value) -future = Future() +__future__ = Future() From 63bd8f78c82a59e36568e16f473630a393fb7b71 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Fri, 15 Aug 2025 13:46:04 -0600 Subject: [PATCH 185/239] Added optional dependencies [oci_config], [azure_config], [oci_auth] and [azure_auth] to simplify installation of required packages for centralized configuration providers and cloud native authentication plugins. --- doc/src/release_notes.rst | 5 ++++ doc/src/user_guide/installation.rst | 43 +++++++++++++++++++++-------- pyproject.toml | 8 ++++++ 3 files changed, 45 insertions(+), 11 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 02d2c2ce..db753bb4 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -35,6 +35,11 @@ Common Changes :attr:`oracledb.defaults.fetch_lobs ` and :attr:`oracledb.defaults.fetch_decimals ` are now stored with the operation and used during pipeline execution. +#) Added optional dependencies [oci_config], [azure_config], [oci_auth] and + [azure_auth] to simplify installation of required packages for + :ref:`Centralized Configuration Provider ` + support and :ref:`Cloud Native Authentication ` + support. #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). #) API documentation is now generated from the source code. diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 18accaba..72ebf20b 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -1080,7 +1080,13 @@ Install Modules for the OCI Object Storage Centralized Configuration Provider For python-oracledb to use an :ref:`Oracle Cloud Infrastructure (OCI) Object Storage configuration provider `, you must install the -`OCI `__ package:: +`OCI `__ package. This can be done: + +- By using the recommended optional [oci_config] dependency:: + + python -m pip install oracledb[oci_config] + +- Or, by installing the package manually:: python -m pip install oci @@ -1094,21 +1100,24 @@ Install Modules for the Azure App Centralized Configuration Provider For python-oracledb to use an :ref:`Azure App Configuration Provider `, you must install the `Azure App Configuration -`__, `Azure Core -`__, and `Azure Identity -`__ packages:: +`__, `Azure Identity +`__, and `Azure Key Vault Secrets +`__ packages. +This can be done: - python -m pip install azure-appconfiguration azure-core azure-identity +- By using the recommended optional [azure_config] dependency:: -If your password is stored in the Azure Key vault, then you additionally need -to install the `Azure Key Vault Secrets `__ package:: + python -m pip install oracledb[azure_config] - python -m pip install azure-keyvault-secrets +- Or, by installing the packages manually:: + + python -m pip install azure-appconfiguration azure-identity azure-keyvault-secrets See :ref:`azureappstorageprovider` for information on using this configuration provider with python-oracledb. +.. _cloudnativemodules: + Installing Cloud Native Authentication Modules for python-oracledb ================================================================== @@ -1122,7 +1131,13 @@ Install Modules for the OCI Cloud Native Authentication Plugin For python-oracledb to use the OCI Cloud Native Authentication Plugin, you must install the `Python SDK for Oracle Cloud Infrastructure -`__ package:: +`__ package. This can be done: + +- By using the recommended optional [oci_auth] dependency:: + + python -m pip install oracledb[oci_auth] + +- Or, by installing the package manually:: python -m pip install oci @@ -1140,7 +1155,13 @@ Install Modules for the Azure Cloud Native Authentication Plugin For python-oracledb to use the Azure Cloud Native Authentication Plugin, you must install the `Microsoft Authentication Library (MSAL) for Python -`__ package:: +`__ package. This can be done: + +- By using the recommended optional [azure_auth] dependency:: + + python -m pip install oracledb[azure_auth] + +- Or, by installing the package manually:: python -m pip install msal diff --git a/pyproject.toml b/pyproject.toml index 974792e9..860d2abb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -83,3 +83,11 @@ test = [ "pandas", "pyarrow", ] +oci_config = ["oci"] +oci_auth = ["oci"] +azure_config = [ + "azure-appconfiguration", + "azure-identity", + "azure-keyvault-secrets" +] +azure_auth = ["msal"] From d463b33bf5ec645cd949a054f3ab70422dfb4709 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 27 Aug 2025 11:05:11 -0600 Subject: [PATCH 186/239] Add link to release note. --- doc/src/release_notes.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index db753bb4..b53afcd9 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -39,7 +39,8 @@ Common Changes [azure_auth] to simplify installation of required packages for :ref:`Centralized Configuration Provider ` support and :ref:`Cloud Native Authentication ` - support. + support + (`issue 512 `__). #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). #) API documentation is now generated from the source code. From 47208ff39046b9dccc248bb6a4325cd448381db3 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Wed, 27 Aug 2025 11:05:29 -0600 Subject: [PATCH 187/239] Pin Cython to 3.1.x instead of 3.1.0 as requested (#530). --- doc/src/release_notes.rst | 2 ++ pyproject.toml | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b53afcd9..d43f9e07 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -43,6 +43,8 @@ Common Changes (`issue 512 `__). #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). +#) Pin Cython to 3.1.x instead of 3.1.0 as requested + (`issue 530 `__). #) API documentation is now generated from the source code. #) Internal change: typing_extensions is now a dependency. diff --git a/pyproject.toml b/pyproject.toml index 860d2abb..75714532 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,7 +2,7 @@ requires = [ "setuptools >= 77.0.0", "wheel", - "cython == 3.1", + "cython ~= 3.1", ] build-backend = "setuptools.build_meta" From efc76ff37bab6df96830f93bbf7fdef18c60ad19 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:22:25 -0600 Subject: [PATCH 188/239] Internal change: small performance improvement sending bytes on the network transport. --- doc/src/release_notes.rst | 3 +++ src/oracledb/impl/thin/transport.pyx | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index d43f9e07..7e123d8c 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,6 +19,9 @@ oracledb `3.4.0 Date: Thu, 11 Sep 2025 13:23:59 -0600 Subject: [PATCH 189/239] Documentation improvements. --- doc/README.md | 2 +- doc/src/api_manual/async_connection.rst | 2 +- doc/src/api_manual/async_connection_pool.rst | 4 +- doc/src/api_manual/async_cursor.rst | 3 +- doc/src/api_manual/async_lob.rst | 2 +- doc/src/api_manual/connection.rst | 2 + doc/src/api_manual/connection_pool.rst | 6 +- doc/src/api_manual/deprecations.rst | 2 +- doc/src/api_manual/module.rst | 2 + doc/src/api_manual/soda.rst | 8 +-- doc/src/conf.py | 2 +- doc/src/release_notes.rst | 6 +- doc/src/user_guide/appendix_b.rst | 73 ++++++++++---------- doc/src/user_guide/appendix_c.rst | 16 ++--- doc/src/user_guide/connection_handling.rst | 37 +++++----- doc/src/user_guide/cqn.rst | 27 ++++---- doc/src/user_guide/exception_handling.rst | 13 ++-- doc/src/user_guide/globalization.rst | 44 ++++++------ doc/src/user_guide/ha.rst | 6 +- doc/src/user_guide/installation.rst | 2 +- doc/src/user_guide/introduction.rst | 19 ++--- doc/src/user_guide/soda.rst | 2 +- doc/src/user_guide/startup.rst | 2 +- doc/src/user_guide/tracing.rst | 43 +++++++++--- doc/src/user_guide/tuning.rst | 8 +-- src/oracledb/connect_params.py | 8 ++- src/oracledb/connection.py | 13 ++-- src/oracledb/pool.py | 16 ++--- src/oracledb/pool_params.py | 8 ++- tests/README.md | 2 +- utils/fields.cfg | 7 +- utils/templates/connection.py | 5 +- utils/templates/pool.py | 8 +-- 33 files changed, 220 insertions(+), 180 deletions(-) diff --git a/doc/README.md b/doc/README.md index 400cba0f..fb9635fd 100644 --- a/doc/README.md +++ b/doc/README.md @@ -15,7 +15,7 @@ To build the documentation locally: 1. Install Sphinx and the Read the Docs theme using the Python package manager ``pip``, for example: - python -m pip install -r requirements.txt + python -m pip install --upgrade -r requirements.txt You can alternatively install these from pre-built packages for your operating system. diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index ada4e9fb..46c782f9 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -23,7 +23,7 @@ AsyncConnection Class .. note:: - AsyncConnection objects are only supported in the python-oracledb Thin + AsyncConnection objects are only supported in python-oracledb Thin mode. .. note:: diff --git a/doc/src/api_manual/async_connection_pool.rst b/doc/src/api_manual/async_connection_pool.rst index 60022bf8..064dc14d 100644 --- a/doc/src/api_manual/async_connection_pool.rst +++ b/doc/src/api_manual/async_connection_pool.rst @@ -20,8 +20,8 @@ AsyncConnectionPool Class .. note:: - AsyncConnectionPool objects are only supported in the python-oracledb - Thin mode. + AsyncConnectionPool objects are only supported in python-oracledb Thin + mode. .. _asynconnpoolmeth: diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index a4d92488..7f53ab79 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -20,8 +20,7 @@ AsyncCursor Class .. note:: - AsyncCursor objects are only supported in the python-oracledb Thin - mode. + AsyncCursor objects are only supported in python-oracledb Thin mode. .. _asynccursormeth: diff --git a/doc/src/api_manual/async_lob.rst b/doc/src/api_manual/async_lob.rst index 842dad86..983cf879 100644 --- a/doc/src/api_manual/async_lob.rst +++ b/doc/src/api_manual/async_lob.rst @@ -21,7 +21,7 @@ AsyncLOB Class .. note:: - AsyncLOB objects are only supported in the python-oracledb Thin mode. + AsyncLOB objects are only supported in python-oracledb Thin mode. .. _asynclobmeth: diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 720a8740..d7d8f472 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -422,6 +422,8 @@ Connection Attributes .. autoproperty:: Connection.thin + See :ref:`vsessconinfo`. + .. dbapiattributeextension:: .. autoproperty:: Connection.transaction_in_progress diff --git a/doc/src/api_manual/connection_pool.rst b/doc/src/api_manual/connection_pool.rst index 0cde3d79..13e58123 100644 --- a/doc/src/api_manual/connection_pool.rst +++ b/doc/src/api_manual/connection_pool.rst @@ -17,8 +17,8 @@ ConnectionPool Class SessionPool came from the `Oracle Call Interface (OCI) session pool `__. This - implementation is only used in the python-oracledb Thick mode and is not - available in the Thin mode). + implementation is only used in python-oracledb Thick mode and is not + available in Thin mode). .. dbapiobjectextension:: @@ -152,6 +152,8 @@ ConnectionPool Attributes .. autoproperty:: ConnectionPool.thin + See :ref:`vsessconinfo`. + .. autoproperty:: ConnectionPool.timeout .. autoproperty:: ConnectionPool.username diff --git a/doc/src/api_manual/deprecations.rst b/doc/src/api_manual/deprecations.rst index 92bb3dc3..4125c8d5 100644 --- a/doc/src/api_manual/deprecations.rst +++ b/doc/src/api_manual/deprecations.rst @@ -180,7 +180,7 @@ used for new development. * - ``Cursor.executemanyprepared()`` - Use :meth:`Cursor.executemany()` instead. * - Previously deprecated Advanced Queuing (AQ) API - - Use the new :ref:`AQ API ` instead. AQ is only available in the python-oracledb Thick mode. + - Use the new :ref:`AQ API ` instead. AQ is only available in python-oracledb Thick mode. * - ``Connection.deq()`` - Replace with :meth:`Queue.deqone()` or :meth:`Queue.deqmany()` * - ``Connection.deqoptions()`` diff --git a/doc/src/api_manual/module.rst b/doc/src/api_manual/module.rst index 9bfc9278..52f89c9d 100644 --- a/doc/src/api_manual/module.rst +++ b/doc/src/api_manual/module.rst @@ -214,6 +214,8 @@ Oracledb Methods .. autofunction:: is_thin_mode + See :ref:`vsessconinfo`. + .. dbapimethodextension:: .. versionadded:: 1.1.0 diff --git a/doc/src/api_manual/soda.rst b/doc/src/api_manual/soda.rst index 7f70e46e..8c2841db 100644 --- a/doc/src/api_manual/soda.rst +++ b/doc/src/api_manual/soda.rst @@ -14,7 +14,7 @@ JSON strings. See the :ref:`user manual ` for examples. .. note:: - SODA is only supported in the python-oracledb Thick mode. See + SODA is only supported in python-oracledb Thick mode. See :ref:`enablingthick`. .. _sodarequirements: @@ -30,14 +30,14 @@ DBA: SQL> grant soda_app, create table to myuser; -Advanced users who are using Oracle sequences for keys will also need the CREATE -SEQUENCE privilege. +Advanced users who are using Oracle sequences for keys will also need the +CREATE SEQUENCE privilege. SODA requires Oracle Client 18.3 or higher and Oracle Database 18.1 and higher. .. note:: - SODA APIs are only supported in the python-oracledb Thick mode. See + SODA APIs are only supported in python-oracledb Thick mode. See :ref:`enablingthick`. If you are using Oracle Database 21c (or later) and create new collections diff --git a/doc/src/conf.py b/doc/src/conf.py index 9bf9d7ff..ebb643d7 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -44,7 +44,7 @@ templates_path = [".templates"] # The suffix of source filenames. -source_suffix = ".rst" +source_suffix = {".rst": "restructuredtext"} # The root toctree document. root_doc = master_doc = "index" diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 7e123d8c..c25e7ac1 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -995,12 +995,12 @@ Common Changes #) Error ``DPY-2016: variable array size of %d is too small (should be at least %d)`` is now raised when :meth:`Cursor.executemany()` is called with an integer number of iterations that is too large for the existing bind - variables. Previously, the python-oracledb Thin mode raised ``IndexError`` + variables. Previously, python-oracledb Thin mode raised ``IndexError`` and python-oracledb Thick mode raised ``DPI-1018: array size of %d is too small``. #) Error ``DPY-1001: not connected to database`` is now raised when an attempt is made to perform an operation on a LOB using a closed connection. - Previously, the python-oracledb Thin mode raised an ``AttributeError`` + Previously, python-oracledb Thin mode raised an ``AttributeError`` exception and python-oracledb Thick mode raised ``DPI-1040: LOB was already closed``. #) Fixed bug in :meth:`ConnectParams.get_connect_string()` when a value for @@ -1809,7 +1809,7 @@ oracledb 1.0.0 (May 2022) :ref:`'Thick' mode ` with some additional functionality. Both modes support the Python Database API v2.0 Specification. #) Added a :attr:`Connection.thin` attribute which shows whether the - connection was established in the python-oracledb Thin mode or Thick mode. + connection was established in python-oracledb Thin mode or Thick mode. #) Creating connections or connection pools now requires keyword parameters be passed. This brings python-oracledb into compliance with the Python Database API specification PEP 249. diff --git a/doc/src/user_guide/appendix_b.rst b/doc/src/user_guide/appendix_b.rst index ef9626d4..5c69ae37 100644 --- a/doc/src/user_guide/appendix_b.rst +++ b/doc/src/user_guide/appendix_b.rst @@ -61,7 +61,7 @@ equivalent, must always be used. Oracle Net Services and Client Configuration Files -------------------------------------------------- -In the python-oracledb Thin mode: +In python-oracledb Thin mode: - The location of any ``tnsnames.ora`` files must explicitly be passed to the application. @@ -78,7 +78,7 @@ See :ref:`optnetfiles` and :ref:`optclientfiles` for more information. Token Based Authentication -------------------------- -In the python-oracledb Thin mode: +In python-oracledb Thin mode: - When connecting to Oracle Cloud Database with mutual TLS (mTLS) using OAuth2 tokens, you need to explicitly set the ``config_dir``, ``wallet_location``, @@ -98,10 +98,10 @@ When connecting with mutual TLS (mTLS) also known as two-way TLS, for example to Oracle Autonomous Database in Oracle Cloud using a wallet, the certificate must be in the correct format. -For the python-oracledb Thin mode, the certificate must be in a Privacy -Enhanced Mail (PEM) ``ewallet.pem`` file. In python-oracledb Thick mode the -certificate must be in a ``cwallet.sso`` file. See :ref:`autonomousdb` for -more information. +For python-oracledb Thin mode, the certificate must be in a Privacy Enhanced +Mail (PEM) ``ewallet.pem`` file. In python-oracledb Thick mode the certificate +must be in a ``cwallet.sso`` file. See :ref:`autonomousdb` for more +information. Native Network Encryption and Checksumming ------------------------------------------ @@ -140,15 +140,15 @@ replace ``SessionPool``. A new :func:`oracledb.create_pool()` method is now the recommended way to create a connection pool. The use of the equivalent ``SessionPool()`` constructor is :ref:`deprecated `. -The :func:`~oracledb.create_pool()` method in the python-oracledb Thin mode -differs from the python-oracledb Thick mode in the following ways: +The :func:`~oracledb.create_pool()` method in python-oracledb Thin mode differs +from python-oracledb Thick mode in the following ways: * Not all the parameters of the :func:`oracledb.create_pool()` method are applicable to both python-oracledb modes. Each mode ignores unrecognized parameters. The parameters that are ignored in Thin mode include ``events``, ``tag``, ``matchanytag``, ``shardingkey``, ``supershardingkey``, and - ``handle`` parameters. The parameters that are ignored in the Thick mode - include ``wallet_password``, ``disable_oob``, and ``debug_jdwp`` parameters. + ``handle`` parameters. The parameters that are ignored in Thick mode include + ``wallet_password``, ``disable_oob``, and ``debug_jdwp`` parameters. * The python-oracledb Thin mode only supports :ref:`homogeneous ` pools. @@ -166,13 +166,13 @@ differs from the python-oracledb Thick mode in the following ways: is used, then this behavior will not be an issue. With this new default value, any immediate :meth:`ConnectionPool.acquire()` calls will wait for the connections to be created by the daemon thread. This improves application - start up time when compared to the python-oracledb Thick mode, where + start up time when compared to python-oracledb Thick mode, where :func:`oracledb.create_pool()` will not return control to the application until all ``pool.min`` connections have been created. - If the old default value ``POOL_GETMODE_NOWAIT`` is required, then the application - could check if :attr:`ConnectionPool.opened` has reached :attr:`ConnectionPool.min` - and then continue with application start up. + If the old default value ``POOL_GETMODE_NOWAIT`` is required, then the + application could check if :attr:`ConnectionPool.opened` has reached + :attr:`ConnectionPool.min` and then continue with application start up. * In python-oracledb Thick mode, when you close a connection pool with the parameter ``force=True``, the underlying Oracle Client libraries wait for the @@ -185,10 +185,11 @@ differs from the python-oracledb Thick mode in the following ways: up its end of the connections. * In python-oracledb Thin mode, the ``cclass`` parameter value is not used to - tag connections in the application connection pool. It is only used for :ref:`drcp`. + tag connections in the application connection pool. It is only used for + :ref:`drcp`. -* In python-oracledb Thin mode, the connection pool supports all the :ref:`connection - mode privileges `. +* In python-oracledb Thin mode, the connection pool supports all the + :ref:`connection mode privileges `. * In python-oracledb Thick mode, when the ``thick_mode_dsn_passthrough`` value in effect is *True*, the ``pool_name`` parameter can be used to specify a @@ -209,15 +210,15 @@ XMLType database data types has some small differences. See Query Metadata in Thin and Thick Modes ====================================== -In python-oracledb Thin mode, :data:`Cursor.description` metadata can distinguish -the ROWID and UROWID database types. The UROWID database type shows the new value -``DB_TYPE_UROWID`` and the database type ROWID uses the existing value -``DB_TYPE_ROWID``. +In python-oracledb Thin mode, :data:`Cursor.description` metadata can +distinguish the ROWID and UROWID database types. The UROWID database type shows +the new value ``DB_TYPE_UROWID`` and the database type ROWID uses the existing +value ``DB_TYPE_ROWID``. -In python-oracledb Thick mode, the value ``DB_TYPE_ROWID`` is shown for both ROWID -and UROWID database types. In python-oracledb Thick and Thin modes, comparison with -the type ``oracledb.ROWID`` (defined in the Python DB API) will match both ROWID and -UROWID database types. +In python-oracledb Thick mode, the value ``DB_TYPE_ROWID`` is shown for both +ROWID and UROWID database types. In python-oracledb Thick and Thin modes, +comparison with the type ``oracledb.ROWID`` (defined in the Python DB API) will +match both ROWID and UROWID database types. .. _implicitresultsdiff: @@ -240,11 +241,11 @@ cursors are independently handled in Thin mode. Statement Caching in Thin and Thick Modes ========================================= -The :ref:`statement cache ` implemented in the python-oracledb Thin -mode is capable of determining when different database data types are used for -the same bind variables when a statement is re-executed. This capability is -not supported in the Oracle Client libraries that are used in python-oracledb -Thick mode. Note changing the type of bind variables for the same SQL text is +The :ref:`statement cache ` implemented in python-oracledb Thin mode +is capable of determining when different database data types are used for the +same bind variables when a statement is re-executed. This capability is not +supported in the Oracle Client libraries that are used in python-oracledb Thick +mode. Note changing the type of bind variables for the same SQL text is inappropriate and gives indeterminate results in both modes. Duplicate SQL Bind Variable Placeholders in Thin and Thick Modes @@ -260,22 +261,22 @@ This does not apply to PL/SQL blocks. Error Handling in Thin and Thick Modes ====================================== -The python-oracledb Thin and Thick modes handle some errors differently. See +Python-oracledb Thin and Thick modes handle some errors differently. See :ref:`errorhandling`. Globalization in Thin and Thick Modes ===================================== All NLS environment variables, and the ``ORA_TZFILE`` environment variable, are -ignored by the python-oracledb Thin mode. Use Python's capabilities instead. +ignored by python-oracledb Thin mode. Use Python's capabilities instead. -The python-oracledb Thin mode can only use NCHAR, NVARCHAR2, and NCLOB data -when Oracle Database's secondary character set is AL16UTF16. +Python-oracledb Thin mode can only use NCHAR, NVARCHAR2, and NCLOB data when +Oracle Database's secondary character set is AL16UTF16. See :ref:`globalization`. Tracing in Thin and Thick Modes =============================== -In the python-oracledb Thin mode, low level tracing is different because there -are no Oracle Client libraries. See :ref:`tracingsql`. +In python-oracledb Thin mode, low level tracing is different because there are +no Oracle Client libraries. See :ref:`tracingsql`. diff --git a/doc/src/user_guide/appendix_c.rst b/doc/src/user_guide/appendix_c.rst index d892797a..576621a6 100644 --- a/doc/src/user_guide/appendix_c.rst +++ b/doc/src/user_guide/appendix_c.rst @@ -99,7 +99,7 @@ following steps: 1. Install the new python-oracledb module:: - python -m pip install oracledb + python -m pip install oracledb --upgrade See :ref:`installation` for more details. @@ -248,13 +248,13 @@ following steps: used by cx_Oracle and python-oracledb in Thick mode are mostly returned unchanged from cx_Oracle 8.3. Some exceptions shown below. - Note that the python-oracledb driver error messages can also vary between Thin - and Thick modes. See :ref:`errorhandling`. + Note that the python-oracledb driver error messages can also vary between + Thin and Thick modes. See :ref:`errorhandling`. **ConnectionPool.acquire() Message Differences** - :meth:`ConnectionPool.acquire()` ORA errors will be mapped to DPY errors. For - example:: + :meth:`ConnectionPool.acquire()` ORA errors will be mapped to DPY errors. + For example:: DPY-4005: timed out waiting for the connection pool to return a connection @@ -385,9 +385,9 @@ need to be made in addition to the common :ref:`commonupgrade`: The :func:`oracledb.clientversion()` function shows the version of the Oracle Client libraries being used. Since Oracle Client libraries are not - used in the python-oracledb Thin mode, this function cannot be called. If it - is called before calling :func:`oracledb.init_oracle_client()`, an exception - is thrown. + used in python-oracledb Thin mode, this function cannot be called. If it is + called before calling :func:`oracledb.init_oracle_client()`, an exception is + thrown. 5. To connect using a :ref:`TNS Alias ` from a ``tnsnames.ora`` file (see :ref:`optnetfiles`) in python-oracledb Thin mode, you should diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index cb5e8537..bc1b30df 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -547,9 +547,9 @@ The valid keys for the "pyo" object are shown in :ref:`pyoparams`. JDBC and Oracle SQL Developer Connection Strings ------------------------------------------------ -The python-oracledb connection string syntax is different from Java JDBC and the -common Oracle SQL Developer syntax. If these JDBC connection strings reference -a service name like:: +The python-oracledb connection string syntax is different from Java JDBC and +the common Oracle SQL Developer syntax. If these JDBC connection strings +reference a service name like:: jdbc:oracle:thin:@hostname:port/service_name @@ -2711,7 +2711,7 @@ session states. In order to retrieve a connection with a desired state, the .. note:: - Connection tagging is only supported in the python-oracledb Thick mode. See + Connection tagging is only supported in python-oracledb Thick mode. See :ref:`enablingthick` . When python-oracledb is using Oracle Client libraries 12.2 or later, then @@ -2757,7 +2757,7 @@ PL/SQL Callback .. note:: - PL/SQL Callbacks are only supported in the python-oracledb Thick mode. See + PL/SQL Callbacks are only supported in python-oracledb Thick mode. See :ref:`enablingthick`. When python-oracledb uses Oracle Client 12.2 or later, the session callback can @@ -3087,9 +3087,10 @@ better DRCP usage tracking in the database. In the database monitoring views, the class name shown will be the value specified in the application prefixed with the user name. -If ``cclass`` was not specified during pool creation, then the python-oracledb -Thin mode generates a unique connection class with the prefix "DPY" while the -Thick mode generates a unique connection class with the prefix "OCI". +If ``cclass`` was not specified during pool creation, then python-oracledb Thin +mode generates a unique connection class with the prefix "DPY" while +python-oracledb Thick mode generates a unique connection class with the prefix +"OCI". To create a connection pool requesting DRCP pooled servers be used, and specifying a class name, you can call: @@ -3639,7 +3640,7 @@ service. .. note:: Connecting to Oracle Database using external authentication is only - supported in the python-oracledb Thick mode. See :ref:`enablingthick`. + supported in python-oracledb Thick mode. See :ref:`enablingthick`. .. _extauthwithwallet: @@ -4089,8 +4090,8 @@ introduced in python-oracledb 1.1 instead. See .. note:: - OAuth 2.0 Token-Based Authentication Connection Strings is only supported in - the python-oracledb Thick mode. See :ref:`enablingthick`. + OAuth 2.0 Token-Based Authentication Connection Strings is only supported + in python-oracledb Thick mode. See :ref:`enablingthick`. There are different ways to retrieve Entra ID OAuth2 tokens. Some of the ways to retrieve OAuth2 tokens are detailed in `Examples of Retrieving Entra ID OAuth2 @@ -4559,7 +4560,7 @@ parameters introduced in python-oracledb 1.1 instead. See .. note:: OCI IAM Token-Based Authentication Connection Strings is only supported in - the python-oracledb Thick mode. See :ref:`enablingthick`. + python-oracledb Thick mode. See :ref:`enablingthick`. The Oracle Cloud Infrastructure command line interface (OCI-CLI) can be used externally to get tokens and private keys from OCI IAM, for example with the @@ -5140,9 +5141,9 @@ One-way TLS Connection to Oracle Autonomous Database With one-way TLS, python-oracledb applications can connect to Oracle ADB without using a wallet. Both Thin and Thick modes of the python-oracledb -driver support one-way TLS. Applications that use the python-oracledb Thick -mode, can connect to the Oracle ADB through one-way TLS only when using Oracle -Client library versions 19.14 (or later) or 21.5 (or later). +driver support one-way TLS. Applications that use python-oracledb Thick mode +can connect to the Oracle ADB through one-way TLS only when using Oracle Client +library versions 19.14 (or later) or 21.5 (or later). To enable one-way TLS for an ADB instance, complete the following steps in an Oracle Cloud console in the **Autonomous Database Information** section of the @@ -5237,9 +5238,9 @@ parameter to the desired network alias from the :ref:`tnsnames.ora containing :ref:`tnsnames.ora `. The ``wallet_location`` parameter is the directory containing the PEM file. In this example the files are in the same directory. The ``wallet_password`` parameter should be set to -the password created in the cloud console when downloading the wallet. For -example, to connect as the ADMIN user using the ``mydb_low`` network service -name: +the password created in the cloud console when downloading the wallet. It is +not the database password. For example, to connect as the ADMIN user using the +``mydb_low`` network service name: .. code-block:: python diff --git a/doc/src/user_guide/cqn.rst b/doc/src/user_guide/cqn.rst index 4dca8da6..78530b4b 100644 --- a/doc/src/user_guide/cqn.rst +++ b/doc/src/user_guide/cqn.rst @@ -18,8 +18,8 @@ table changes, the cached values must then be updated with the new information. .. note:: - Continuous Query Notification (CQN) is only supported in the - python-oracledb Thick mode. See :ref:`enablingthick`. + Continuous Query Notification (CQN) is only supported in python-oracledb + Thick mode. See :ref:`enablingthick`. CQN notification behavior is widely configurable. Choices include specifying what types of SQL should trigger a notification, whether notifications should @@ -63,11 +63,12 @@ later, subscriptions can set the optional ``client_initiated`` parameter to True, see ``Connection.subscribe()`` below. The default CQN connection mode typically means that the machine running -python-oracledb needs a fixed IP address. Note :meth:`Connection.subscribe()` does -not verify that this reverse connection is possible. If there is any problem -sending a notification, then the callback method will not be invoked. -Configuration options can include an IP address and port on which python-oracledb will -listen for notifications; otherwise, the database chooses values. +python-oracledb needs a fixed IP address. Note :meth:`Connection.subscribe()` +does not verify that this reverse connection is possible. If there is any +problem sending a notification, then the callback method will not be invoked. +Configuration options can include an IP address and port on which +python-oracledb will listen for notifications; otherwise, the database chooses +values. Creating a Subscription @@ -91,8 +92,8 @@ See :ref:`subscr-qos` for the quality of service values that are supported. See :ref:`subscr-namespaces` and :ref:`subscr-protocols` for the namespaces and protocols that are supported. -See :ref:`Subscription Objects ` for more details on the subscription object that is -created. +See :ref:`Subscription Objects ` for more details on the +subscription object that is created. When using Oracle Database and Oracle client libraries 19.4, or later, the optional subscription parameter ``client_initiated`` can be set: @@ -102,10 +103,10 @@ optional subscription parameter ``client_initiated`` can be set: connection.subscribe(callback=my_callback, client_initiated=True) This enables CQN "client initiated" connections which internally use the same -approach as normal python-oracledb connections to the database, and do not require the -database to be able to connect back to the application. Since client initiated -connections do not need special network configuration they have ease-of-use and -security advantages. +approach as normal python-oracledb connections to the database, and do not +require the database to be able to connect back to the application. Since +client initiated connections do not need special network configuration they +have ease-of-use and security advantages. Registering Queries diff --git a/doc/src/user_guide/exception_handling.rst b/doc/src/user_guide/exception_handling.rst index 13138c61..4c715732 100644 --- a/doc/src/user_guide/exception_handling.rst +++ b/doc/src/user_guide/exception_handling.rst @@ -10,7 +10,7 @@ All exceptions raised by python-oracledb are inherited from :attr:`oracledb.Error`. See :ref:`exceptions` and :ref:`exchandling` for information about attributes. -See :ref:`errorhandling` for differences between the python-oracledb Thin and +See :ref:`errorhandling` for differences between python-oracledb Thin and :ref:`Thick ` modes. Applications can catch exceptions as needed. For example, when trying to add a @@ -56,8 +56,7 @@ Error Handling in Thin and Thick Modes The Thin and Thick modes of python-oracledb return some errors differently. -The python-oracledb Thin mode code generates error messages with the prefix -"DPY". +The python-oracledb Thin mode generates error messages with the prefix "DPY". In python-oracledb :ref:`Thick ` mode: @@ -67,7 +66,7 @@ In python-oracledb :ref:`Thick ` mode: - The `ODPI-C `__ code layer generates error messages with the prefix "DPI". -- The python-oracledb Thick mode code generates error messages with the prefix +- The python-oracledb Thick mode generates error messages with the prefix "DPY". Errors generated by the Oracle Database itself commonly have the error prefix @@ -76,8 +75,8 @@ Errors generated by the Oracle Database itself commonly have the error prefix Some differences between python-oracledb Thin and Thick mode errors are shown in the examples below: -* Binding: When binding is incorrect, the python-oracledb Thick mode may - generate an Oracle Client library error such as:: +* Binding: When binding is incorrect, python-oracledb Thick mode may generate + an Oracle Client library error such as:: ORA-01008: not all variables bound @@ -91,7 +90,7 @@ in the examples below: Other messages are returned directly from Python and may vary accordingly. The traditional Oracle connection errors with prefix "ORA" are not shown. For example, the scenarios detailed below show how the connection and network - error messages might differ between the python-oracledb Thin and Thick modes. + error messages might differ between python-oracledb Thin and Thick modes. * Scenario 1: The given host does not have a database listener running. diff --git a/doc/src/user_guide/globalization.rst b/doc/src/user_guide/globalization.rst index 80709052..e0922476 100644 --- a/doc/src/user_guide/globalization.rst +++ b/doc/src/user_guide/globalization.rst @@ -13,14 +13,14 @@ Database Character Set ---------------------- Data fetched from and sent to Oracle Database will be mapped between the -`database character set -`__ -and the "Oracle client" character set of the Oracle Client libraries used by -python-oracledb. If data cannot be correctly mapped between client and server -character sets, then it may be corrupted or queries may fail with :ref:`"codec -can't decode byte" `. +`database character set `__ and the "Oracle client" +character set of the Oracle Client libraries used by python-oracledb. If data +cannot be correctly mapped between client and server character sets, then it +may be corrupted or queries may fail with +:ref:`"codec can't decode byte" `. -All database character sets are supported by the python-oracledb. +All database character sets are supported by python-oracledb. .. _findingcharset: @@ -35,12 +35,12 @@ To find the database character set, execute the query: Database National Character Set ------------------------------- -For the secondary `national character set -`__ -used for NCHAR, NVARCHAR2, and NCLOB data types: +For the secondary `national character set `__ used for +NCHAR, NVARCHAR2, and NCLOB data types: -- AL16UTF16 is supported by both the python-oracledb Thin and Thick modes -- UTF8 is not supported by the python-oracledb Thin mode +- AL16UTF16 is supported by both python-oracledb Thin and Thick modes +- UTF8 is not supported by python-oracledb Thin mode To find the database's national character set, execute the query: @@ -194,8 +194,8 @@ names. The territory ("JAPAN") specifies conventions such as the default date, monetary, and numeric formats. If the language is not specified, then the value defaults to AMERICAN. If the territory is not specified, then the value is derived from the language value. See `Choosing a Locale with the NLS_LANG -Environment Variable -`__ +Environment Variable `__. If the ``NLS_LANG`` environment variable is set in the application with ``os.environ['NLS_LANG']``, it must be set before any connection pool is @@ -206,9 +206,9 @@ Any client character set value in the ``NLS_LANG`` variable, for example Client Character Set`_. Other Oracle globalization variables, such as ``NLS_DATE_FORMAT`` can also be -set to change the behavior of python-oracledb Thick, see `Setting NLS Parameters -`__. +set to change the behavior of python-oracledb Thick, see `Setting NLS +Parameters `__. For more information, see the `Database Globalization Support Guide `__. @@ -220,7 +220,7 @@ Thin Mode Locale-aware Number and Date Conversions .. note:: - All NLS environment variables are ignored by the python-oracledb Thin mode. + All NLS environment variables are ignored by python-oracledb Thin mode. Also the ``ORA_TZFILE`` variable is ignored. .. note:: @@ -230,10 +230,10 @@ Thin Mode Locale-aware Number and Date Conversions mode``. Data stored with a numeric offset such as ``+00:00`` can be fetched. -In the python-oracledb Thin mode, output type handlers need to be used to -perform date and number localizations. The examples below show a simple -conversion and also how the Python locale module can be used. Type handlers -like those below can also be used in python-oracledb Thick mode. +In python-oracledb Thin mode, output type handlers need to be used to perform +date and number localizations. The examples below show a simple conversion and +also how the Python locale module can be used. Type handlers like those below +can also be used in python-oracledb Thick mode. To convert numbers: diff --git a/doc/src/user_guide/ha.rst b/doc/src/user_guide/ha.rst index 7777aea3..47b40621 100644 --- a/doc/src/user_guide/ha.rst +++ b/doc/src/user_guide/ha.rst @@ -97,7 +97,7 @@ must connect to a FAN-enabled database service. The application should have .. note:: - FAN is only supported in the python-oracledb Thick mode. See + FAN is only supported in python-oracledb Thick mode. See :ref:`enablingthick`. FAN support is useful for planned and unplanned outages. It provides immediate @@ -141,8 +141,8 @@ applications. .. note:: - Oracle AC and TAC functionality is only supported in the python-oracledb - Thick mode. See :ref:`enablingthick`. + Oracle AC and TAC functionality is only supported in python-oracledb Thick + mode. See :ref:`enablingthick`. When AC or TAC are configured on the database service, they are transparently available to python-oracledb applications. diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 72ebf20b..4c79af70 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -6,7 +6,7 @@ Installing python-oracledb *************************** -The python-oracledb driver allows Python 3 applications to connect to Oracle +The python-oracledb driver allows Python applications to connect to Oracle Database. The python-oracledb driver is the renamed, major version successor to cx_Oracle diff --git a/doc/src/user_guide/introduction.rst b/doc/src/user_guide/introduction.rst index 8c9b1a6c..098264d1 100644 --- a/doc/src/user_guide/introduction.rst +++ b/doc/src/user_guide/introduction.rst @@ -64,11 +64,12 @@ or later. This Thin mode does not need Oracle Client libraries. The figure shows the architecture of python-oracledb. Users interact with a Python application, for example by making web requests. The application program makes calls to python-oracledb functions. The connection from python-oracledb -Thin mode to the Oracle Database is established directly. The database can be -on the same machine as Python, or it can be remote. +Thin mode to Oracle Database is established directly by python-oracledb over +the Oracle Net protocol. The database can be on the same machine as Python, or +it can be remote. -The Oracle Net behavior can optionally be configured by using a -``tnsnames.ora`` file and with application settings. See :ref:`optnetfiles`. +The behavior of Oracle Net can optionally be configured with application +settings, or by using a ``tnsnames.ora`` file, see :ref:`optnetfiles`. python-oracledb Thick Mode Architecture --------------------------------------- @@ -89,10 +90,10 @@ later. The figure shows the architecture of the python-oracledb Thick mode. Users interact with a Python application, for example by making web requests. The application program makes calls to python-oracledb functions. Internally, -python-oracledb dynamically loads Oracle Client libraries. Connections from -python-oracledb Thick mode to Oracle Database are established using the Oracle -Client libraries. The database can be on the same machine as Python, or it can -be remote. +python-oracledb dynamically loads Oracle Client libraries. Connections from +python-oracledb Thick mode to Oracle Database are established by the Oracle +Client libraries over the Oracle Net protocol. The database can be on the same +machine as Python, or it can be remote. To use python-oracledb Thick mode, the Oracle Client libraries must be installed separately, see :ref:`installation`. The libraries can be from an @@ -108,7 +109,7 @@ Some behaviors of the Oracle Client libraries can optionally be configured with an ``oraaccess.xml`` file, for example to enable auto-tuning of a statement cache. See :ref:`optclientfiles`. -The Oracle Net behavior can optionally be configured with files such as +The behavior of Oracle Net can optionally be configured with files such as ``tnsnames.ora`` and ``sqlnet.ora``, for example to enable :ref:`network encryption `. See :ref:`optnetfiles`. diff --git a/doc/src/user_guide/soda.rst b/doc/src/user_guide/soda.rst index 4261542c..971c43c0 100644 --- a/doc/src/user_guide/soda.rst +++ b/doc/src/user_guide/soda.rst @@ -17,7 +17,7 @@ existing :ref:`Oracle Database 23ai JSON-Relational Duality Views .. note:: - SODA is only supported in the python-oracledb Thick mode. See + SODA is only supported in python-oracledb Thick mode. See :ref:`enablingthick`. SODA uses a SQL schema to store documents, but you do not need to know SQL or diff --git a/doc/src/user_guide/startup.rst b/doc/src/user_guide/startup.rst index b0fa97d5..634cf075 100644 --- a/doc/src/user_guide/startup.rst +++ b/doc/src/user_guide/startup.rst @@ -11,7 +11,7 @@ python-oracledb. .. note:: - Database start up and shut down functionality is only supported in the + Database start up and shut down functionality is only supported in python-oracledb Thick mode. See :ref:`enablingthick`. =========================== diff --git a/doc/src/user_guide/tracing.rst b/doc/src/user_guide/tracing.rst index 991847fd..81a77e1b 100644 --- a/doc/src/user_guide/tracing.rst +++ b/doc/src/user_guide/tracing.rst @@ -563,23 +563,41 @@ Finding the python-oracledb Mode ================================ The boolean attributes :attr:`Connection.thin` and :attr:`ConnectionPool.thin` -can be used to show the current mode of a python-oracledb connection or pool, -respectively. The method :meth:`oracledb.is_thin_mode()` can also be used, but -review its usage notes about when its return value may change. +can be used to find whether python-oracledb is in Thin or Thick mode. -For example, to show the mode used by a connection: +For example, to show the current python-oracledb mode: .. code-block:: python print(connection.thin) -The python-oracledb version can be shown with :data:`oracledb.__version__`: +The method :meth:`oracledb.is_thin_mode()` can also be used to find the +mode. Immediately after python-oracledb is imported, +:meth:`oracledb.is_thin_mode()` will return *True* indicating that +python-oracledb defaults to Thin mode. However if a call to +:meth:`oracledb.init_oracle_client()` is made and it returns successfully, then +:meth:`oracledb.is_thin_mode()` will return *False*, indicating that Thick mode +is enabled. Once the first standalone connection or connection pool is +created, or a successful call to :meth:`~oracledb.init_oracle_client()` is +made, or :meth:`oracledb.enable_thin_mode()` is called, then python-oracledb’s +mode is fixed and the value returned by :meth:`oracledb.is_thin_mode()` will +never change for the lifetime of the process. + +For example: .. code-block:: python - print(oracledb.__version__) + print(oracledb.is_thin_mode()) + oracledb.init_oracle_client() + print(oracledb.is_thin_mode()) + +If the call to :meth:`~oracledb.init_oracle_client()`, succeeds, the code above +prints:: -Version and mode information can also be seen in the Oracle Database data + True + False + +Mode and version information can also be seen in the Oracle Database data dictionary table `V$SESSION_CONNECT_INFO `__: @@ -595,16 +613,23 @@ id=GUID-9F0DCAEA-A67E-4183-89E7-B1555DC591CE>`__: In python-oracledb Thin mode, the output will be like:: - python-oracledb thn : 3.2.0 + python-oracledb thn : 3.4.0 In python-oracledb Thick mode, the output will be like:: - python-oracledb thk : 3.2.0 + python-oracledb thk : 3.4.0 Note that you may not see these values if you have set :attr:`oracledb.defaults.driver_name ` or the ``driver_name`` parameter in :meth:`oracledb.init_oracle_client()`. +The python-oracledb version can also be shown with +:data:`oracledb.__version__`: + +.. code-block:: python + + print(oracledb.__version__) + Low Level Python-oracledb Driver Tracing ======================================== diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index da0c7dd1..1bc2d479 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -265,8 +265,8 @@ Application-wide defaults can be set using oracledb.defaults.prefetchrows = 1000 oracledb.defaults.arraysize = 1000 -When using python-oracledb in the Thick mode, prefetching can also be tuned in -an external :ref:`oraaccess.xml ` file, which may be useful for +When using python-oracledb in Thick mode, prefetching can also be tuned in an +external :ref:`oraaccess.xml ` file, which may be useful for tuning an application when modifying its code is not feasible. Setting the sizes with ``oracledb.defaults`` attributes or with @@ -649,8 +649,8 @@ mostly static, lookup tables, such as for postal codes. CRC reduces network .. note:: - Client Result Caching is only supported in the python-oracledb Thick mode. - See :ref:`enablingthick`. + Client Result Caching is only supported in python-oracledb Thick mode. See + :ref:`enablingthick`. The cache is at the application process level. Access and invalidation is managed by the Oracle Client libraries. This removes the need for extra diff --git a/src/oracledb/connect_params.py b/src/oracledb/connect_params.py index dca162b4..28a19e67 100644 --- a/src/oracledb/connect_params.py +++ b/src/oracledb/connect_params.py @@ -131,7 +131,9 @@ def __init__( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in python-oracledb Thin mode + is encrypted. This is not the database password. For Oracle + Autonomous Database this is the password created when downloading the + wallet. This value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a @@ -1033,7 +1035,9 @@ def set( the database - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in python-oracledb Thin mode + is encrypted. This is not the database password. For Oracle + Autonomous Database this is the password created when downloading the + wallet. This value is only used in python-oracledb Thin mode. - ``access_token``: a string, or a 2-tuple, or a callable. If it is a string, it specifies an Entra ID OAuth2 token used for Open diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 6fe43814..99c15c51 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -741,9 +741,8 @@ def tag(self, value: str) -> None: @property def thin(self) -> bool: """ - This read-only attribute returns a boolean indicating if the connection - was established with the python-oracledb Thin mode (*True*) or - python-oracledb Thick mode (*False*). + This read-only attribute returns a boolean indicating if + python-oracledb is in Thin mode (*True*) or Thick mode (*False*). """ self._verify_connected() return self._impl.thin @@ -1770,7 +1769,9 @@ def connect( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode + encrypted. This is not the database password. For Oracle Autonomous + Database this is the password created when downloading the wallet. This + value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a @@ -2984,7 +2985,9 @@ def connect_async( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode + encrypted. This is not the database password. For Oracle Autonomous + Database this is the password created when downloading the wallet. This + value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a diff --git a/src/oracledb/pool.py b/src/oracledb/pool.py index e16d4f95..43088d03 100644 --- a/src/oracledb/pool.py +++ b/src/oracledb/pool.py @@ -298,12 +298,8 @@ def stmtcachesize(self, value: int) -> None: @property def thin(self) -> bool: """ - This read-only attribute returns a boolean which indicates the - python-oracledb mode in which the pool was created. If the value of - this attribute is *True*, it indicates that the pool was created in the - python-oracledb Thin mode. If the value of this attribute is *False*, - it indicates that the pool was created in the python-oracledb Thick - mode. + This read-only attribute returns a boolean indicating if + python-oracledb is in Thin mode (*True*) or Thick mode (*False*). """ self._verify_open() return not isinstance(self._impl, thick_impl.ThickPoolImpl) @@ -820,7 +816,9 @@ def create_pool( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode + encrypted. This is not the database password. For Oracle Autonomous + Database this is the password created when downloading the wallet. This + value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a @@ -1410,7 +1408,9 @@ def create_pool_async( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it is - encrypted. This value is only used in python-oracledb Thin mode + encrypted. This is not the database password. For Oracle Autonomous + Database this is the password created when downloading the wallet. This + value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a diff --git a/src/oracledb/pool_params.py b/src/oracledb/pool_params.py index a9f7a680..f35cefa9 100644 --- a/src/oracledb/pool_params.py +++ b/src/oracledb/pool_params.py @@ -212,7 +212,9 @@ def __init__( (default: None) - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in python-oracledb Thin mode + is encrypted. This is not the database password. For Oracle + Autonomous Database this is the password created when downloading the + wallet. This value is only used in python-oracledb Thin mode. (default: None) - ``access_token``: a string, or a 2-tuple, or a callable. If it is a @@ -811,7 +813,9 @@ def set( the database - ``wallet_password``: the password to use to decrypt the wallet, if it - is encrypted. This value is only used in python-oracledb Thin mode + is encrypted. This is not the database password. For Oracle + Autonomous Database this is the password created when downloading the + wallet. This value is only used in python-oracledb Thin mode. - ``access_token``: a string, or a 2-tuple, or a callable. If it is a string, it specifies an Entra ID OAuth2 token used for Open diff --git a/tests/README.md b/tests/README.md index be963101..68a38677 100644 --- a/tests/README.md +++ b/tests/README.md @@ -11,7 +11,7 @@ This directory contains the test suite for python-oracledb. 2. Install tox: - python -m pip install tox + python -m pip install tox --upgrade 3. Run the test suite by issuing the following command in the top-level directory of your oracledb installation: diff --git a/utils/fields.cfg b/utils/fields.cfg index 18d5abd9..281735eb 100644 --- a/utils/fields.cfg +++ b/utils/fields.cfg @@ -177,8 +177,10 @@ description = type = str hidden = True description = - the password to use to decrypt the wallet, if it is encrypted. This value - is only used in python-oracledb Thin mode + the password to use to decrypt the wallet, if it is encrypted. This is not + the database password. For Oracle Autonomous Database this is the password + created when downloading the wallet. This value is only used in + python-oracledb Thin mode. [access_token] type = Union[str, tuple, Callable] @@ -323,7 +325,6 @@ description = type = str source = description description = - the directory where the wallet can be found. In python-oracledb Thin mode this must be the directory containing the PEM-encoded wallet file ewallet.pem. In python-oracledb Thick mode this must be the directory diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 83f83044..887ba0a4 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -739,9 +739,8 @@ def tag(self, value: str) -> None: @property def thin(self) -> bool: """ - This read-only attribute returns a boolean indicating if the connection - was established with the python-oracledb Thin mode (*True*) or - python-oracledb Thick mode (*False*). + This read-only attribute returns a boolean indicating if + python-oracledb is in Thin mode (*True*) or Thick mode (*False*). """ self._verify_connected() return self._impl.thin diff --git a/utils/templates/pool.py b/utils/templates/pool.py index 6e89751f..9c3cc8f3 100644 --- a/utils/templates/pool.py +++ b/utils/templates/pool.py @@ -296,12 +296,8 @@ def stmtcachesize(self, value: int) -> None: @property def thin(self) -> bool: """ - This read-only attribute returns a boolean which indicates the - python-oracledb mode in which the pool was created. If the value of - this attribute is *True*, it indicates that the pool was created in the - python-oracledb Thin mode. If the value of this attribute is *False*, - it indicates that the pool was created in the python-oracledb Thick - mode. + This read-only attribute returns a boolean indicating if + python-oracledb is in Thin mode (*True*) or Thick mode (*False*). """ self._verify_open() return not isinstance(self._impl, thick_impl.ThickPoolImpl) From 98a9db4d2c5dec9b746d26de97b1b52793358aff Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:24:29 -0600 Subject: [PATCH 190/239] Refactored encoding of Oracle data types; fixed bug when binding values of type datetime.date to timestamp variables. --- doc/src/release_notes.rst | 5 + src/oracledb/base_impl.pxd | 15 +- src/oracledb/impl/base/buffer.pyx | 310 ++++----------------------- src/oracledb/impl/base/constants.pxi | 15 ++ src/oracledb/impl/base/encoders.pyx | 298 +++++++++++++++++++++++++ src/oracledb/impl/base/oson.pyx | 13 +- src/oracledb/impl/base/types.pyx | 24 +-- src/oracledb/impl/base/vector.pyx | 7 +- src/oracledb/impl/thin/dbobject.pyx | 3 +- tests/test_2600_timestamp_var.py | 13 ++ tests/test_4800_timestamp_ltz_var.py | 13 ++ tests/test_4900_timestamp_tz_var.py | 15 +- 12 files changed, 431 insertions(+), 300 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index c25e7ac1..ad659619 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,6 +19,11 @@ oracledb `3.4.0 &value - all_bits = ptr[0] - b7 = all_bits & 0xff - b6 = (all_bits >> 8) & 0xff - b5 = (all_bits >> 16) & 0xff - b4 = (all_bits >> 24) & 0xff - b3 = (all_bits >> 32) & 0xff - b2 = (all_bits >> 40) & 0xff - b1 = (all_bits >> 48) & 0xff - b0 = (all_bits >> 56) & 0xff - if b0 & 0x80 == 0: - b0 = b0 | 0x80 - else: - b0 = ~b0 - b1 = ~b1 - b2 = ~b2 - b3 = ~b3 - b4 = ~b4 - b5 = ~b5 - b6 = ~b6 - b7 = ~b7 - buf[0] = b0 - buf[1] = b1 - buf[2] = b2 - buf[3] = b3 - buf[4] = b4 - buf[5] = b5 - buf[6] = b6 - buf[7] = b7 - if write_length: - self.write_uint8(8) - self.write_raw(buf, 8) + cdef char_type buf[ORA_TYPE_SIZE_BINARY_DOUBLE] + encode_binary_double(buf, value) + self._write_raw_bytes_and_length(buf, sizeof(buf)) - cdef int write_binary_float(self, float value, - bint write_length=True) except -1: + cdef int write_binary_float(self, float value) except -1: """ Writes a float value to the buffer in Oracle canonical floating point format. """ - cdef: - uint8_t b0, b1, b2, b3 - uint32_t all_bits - char_type buf[4] - uint32_t *ptr - ptr = &value - all_bits = ptr[0] - b3 = all_bits & 0xff - b2 = (all_bits >> 8) & 0xff - b1 = (all_bits >> 16) & 0xff - b0 = (all_bits >> 24) & 0xff - if b0 & 0x80 == 0: - b0 = b0 | 0x80 - else: - b0 = ~b0 - b1 = ~b1 - b2 = ~b2 - b3 = ~b3 - buf[0] = b0 - buf[1] = b1 - buf[2] = b2 - buf[3] = b3 - if write_length: - self.write_uint8(4) - self.write_raw(buf, 4) + cdef char_type buf[ORA_TYPE_SIZE_BINARY_FLOAT] + encode_binary_float(buf, value) + self._write_raw_bytes_and_length(buf, sizeof(buf)) cdef int write_bool(self, bint value) except -1: """ Writes a boolean value to the buffer. """ - if value: - self.write_uint8(2) - self.write_uint16be(0x0101) - else: - self.write_uint16be(0x0100) + cdef: + char_type buf[ORA_TYPE_SIZE_BOOLEAN] + ssize_t buflen + encode_boolean(buf, &buflen, value) + self._write_raw_bytes_and_length(buf, buflen) cdef int write_bytes(self, bytes value) except -1: """ @@ -579,75 +521,40 @@ cdef class Buffer: cpython.PyBytes_AsStringAndSize(value, &ptr, &value_len) self._write_raw_bytes_and_length(ptr, value_len) - cdef int write_interval_ds(self, object value, - bint write_length=True) except -1: + cdef int write_interval_ds(self, object value) except -1: """ Writes an interval to the buffer in Oracle Interval Day To Second format. """ - cdef: - int32_t days, seconds, fseconds - char_type buf[11] - days = cydatetime.timedelta_days(value) - encode_uint32be(buf, days + TNS_DURATION_MID) - seconds = cydatetime.timedelta_seconds(value) - buf[4] = (seconds // 3600) + TNS_DURATION_OFFSET - seconds = seconds % 3600 - buf[5] = (seconds // 60) + TNS_DURATION_OFFSET - buf[6] = (seconds % 60) + TNS_DURATION_OFFSET - fseconds = cydatetime.timedelta_microseconds(value) * 1000 - encode_uint32be(&buf[7], fseconds + TNS_DURATION_MID) - if write_length: - self.write_uint8(sizeof(buf)) - self.write_raw(buf, sizeof(buf)) - - cdef int write_interval_ym(self, object value, - bint write_length=True) except -1: + cdef char_type buf[ORA_TYPE_SIZE_INTERVAL_DS] + encode_interval_ds(buf, value) + self._write_raw_bytes_and_length(buf, sizeof(buf)) + + cdef int write_interval_ym(self, object value) except -1: """ - Writes an interval to the buffer in Oracle Interval Day To Second + Writes an interval to the buffer in Oracle Interval Year To Month format. """ - cdef: - int32_t years, months - char_type buf[5] - years = ( value)[0] - months = ( value)[1] - encode_uint32be(buf, years + TNS_DURATION_MID) - buf[4] = months + TNS_DURATION_OFFSET - if write_length: - self.write_uint8(sizeof(buf)) - self.write_raw(buf, sizeof(buf)) - - cdef int write_oracle_date(self, object value, uint8_t length, - bint write_length=True) except -1: + cdef char_type buf[ORA_TYPE_SIZE_INTERVAL_YM] + encode_interval_ym(buf, value) + self._write_raw_bytes_and_length(buf, sizeof(buf)) + + cdef int write_oracle_date(self, object value, uint8_t length) except -1: """ Writes a date to the buffer in Oracle Date format. """ - cdef: - unsigned int year - char_type buf[13] - uint32_t fsecond - year = cydatetime.PyDateTime_GET_YEAR(value) - buf[0] = ((year // 100) + 100) - buf[1] = ((year % 100) + 100) - buf[2] = cydatetime.PyDateTime_GET_MONTH(value) - buf[3] = cydatetime.PyDateTime_GET_DAY(value) - buf[4] = cydatetime.PyDateTime_DATE_GET_HOUR(value) + 1 - buf[5] = cydatetime.PyDateTime_DATE_GET_MINUTE(value) + 1 - buf[6] = cydatetime.PyDateTime_DATE_GET_SECOND(value) + 1 - if length > 7: - fsecond = \ - cydatetime.PyDateTime_DATE_GET_MICROSECOND(value) * 1000 - if fsecond == 0 and length <= 11: + cdef char_type buf[ORA_TYPE_SIZE_TIMESTAMP_TZ] + if length == 7: + encode_date(buf, value) + elif length == 11: + encode_timestamp(buf, value) + # the protocol requires that if the fractional seconds are zero + # that the value be transmitted as a date, not a timestamp! + if decode_uint32be(&buf[7]) == 0: length = 7 - else: - encode_uint32be(&buf[7], fsecond) - if length > 11: - buf[11] = TZ_HOUR_OFFSET - buf[12] = TZ_MINUTE_OFFSET - if write_length: - self.write_uint8(length) - self.write_raw(buf, length) + else: + encode_timestamp_tz(buf, value) + self._write_raw_bytes_and_length(buf, length) cdef int write_oracle_number(self, bytes num_bytes) except -1: """ @@ -655,147 +562,10 @@ cdef class Buffer: buffer. """ cdef: - uint8_t num_digits = 0, digit, num_pairs, pair_num, digits_pos - bint exponent_is_negative = False, append_sentinel = False - ssize_t num_bytes_length, exponent_pos, pos = 0 - bint is_negative = False, prepend_zero = False - uint8_t digits[NUMBER_AS_TEXT_CHARS] - int16_t decimal_point_index - int8_t exponent_on_wire - const char_type *ptr - int16_t exponent - - # zero length string cannot be converted - num_bytes_length = len(num_bytes) - if num_bytes_length == 0: - errors._raise_err(errors.ERR_NUMBER_STRING_OF_ZERO_LENGTH) - elif num_bytes_length > NUMBER_AS_TEXT_CHARS: - errors._raise_err(errors.ERR_NUMBER_STRING_TOO_LONG) - - # check to see if number is negative (first character is '-') - ptr = num_bytes - if ptr[0] == b'-': - is_negative = True - pos += 1 - - # scan for digits until the decimal point or exponent indicator found - while pos < num_bytes_length: - if ptr[pos] == b'.' or ptr[pos] == b'e' or ptr[pos] == b'E': - break - if ptr[pos] < b'0' or ptr[pos] > b'9': - errors._raise_err(errors.ERR_INVALID_NUMBER) - digit = ptr[pos] - b'0' - pos += 1 - if digit == 0 and num_digits == 0: - continue - digits[num_digits] = digit - num_digits += 1 - decimal_point_index = num_digits - - # scan for digits following the decimal point, if applicable - if pos < num_bytes_length and ptr[pos] == b'.': - pos += 1 - while pos < num_bytes_length: - if ptr[pos] == b'e' or ptr[pos] == b'E': - break - digit = ptr[pos] - b'0' - pos += 1 - if digit == 0 and num_digits == 0: - decimal_point_index -= 1 - continue - digits[num_digits] = digit - num_digits += 1 - - # handle exponent, if applicable - if pos < num_bytes_length and (ptr[pos] == b'e' or ptr[pos] == b'E'): - pos += 1 - if pos < num_bytes_length: - if ptr[pos] == b'-': - exponent_is_negative = True - pos += 1 - elif ptr[pos] == b'+': - pos += 1 - exponent_pos = pos - while pos < num_bytes_length: - if ptr[pos] < b'0' or ptr[pos] > b'9': - errors._raise_err(errors.ERR_NUMBER_WITH_INVALID_EXPONENT) - pos += 1 - if exponent_pos == pos: - errors._raise_err(errors.ERR_NUMBER_WITH_EMPTY_EXPONENT) - exponent = int(ptr[exponent_pos:pos]) - if exponent_is_negative: - exponent = -exponent - decimal_point_index += exponent - - # if there is anything left in the string, that indicates an invalid - # number as well - if pos < num_bytes_length: - errors._raise_err(errors.ERR_CONTENT_INVALID_AFTER_NUMBER) - - # skip trailing zeros - while num_digits > 0 and digits[num_digits - 1] == 0: - num_digits -= 1 - - # value must be less than 1e126 and greater than 1e-129; the number of - # digits also cannot exceed the maximum precision of Oracle numbers - if num_digits > NUMBER_MAX_DIGITS or decimal_point_index > 126 \ - or decimal_point_index < -129: - errors._raise_err(errors.ERR_ORACLE_NUMBER_NO_REPR) - - # if the exponent is odd, prepend a zero - if decimal_point_index % 2 == 1: - prepend_zero = True - if num_digits > 0: - digits[num_digits] = 0 - num_digits += 1 - decimal_point_index += 1 - - # determine the number of digit pairs; if the number of digits is odd, - # append a zero to make the number of digits even - if num_digits % 2 == 1: - digits[num_digits] = 0 - num_digits += 1 - num_pairs = num_digits // 2 - - # append a sentinel 102 byte for negative numbers if there is room - if is_negative and num_digits > 0 and num_digits < NUMBER_MAX_DIGITS: - append_sentinel = True - - # write length of number - self.write_uint8(num_pairs + 1 + append_sentinel) - - # if the number of digits is zero, the value is itself zero since all - # leading and trailing zeros are removed from the digits string; this - # is a special case - if num_digits == 0: - self.write_uint8(128) - return 0 - - # write the exponent - exponent_on_wire = (decimal_point_index / 2) + 192 - if is_negative: - exponent_on_wire = ~exponent_on_wire - self.write_uint8(exponent_on_wire) - - # write the mantissa bytes - digits_pos = 0 - for pair_num in range(num_pairs): - if pair_num == 0 and prepend_zero: - digit = digits[digits_pos] - digits_pos += 1 - else: - digit = digits[digits_pos] * 10 + digits[digits_pos + 1] - digits_pos += 2 - if is_negative: - digit = 101 - digit - else: - digit += 1 - self.write_uint8(digit) - - # append 102 byte for negative numbers if the number of digits is less - # than the maximum allowable - if append_sentinel: - self.write_uint8(102) + char_type buf[ORA_TYPE_SIZE_NUMBER] + ssize_t buflen + encode_number(buf, &buflen, num_bytes) + self._write_raw_bytes_and_length(buf, buflen) cdef int write_raw(self, const char_type *data, ssize_t length) except -1: """ diff --git a/src/oracledb/impl/base/constants.pxi b/src/oracledb/impl/base/constants.pxi index 96bedc11..b8101316 100644 --- a/src/oracledb/impl/base/constants.pxi +++ b/src/oracledb/impl/base/constants.pxi @@ -86,6 +86,21 @@ cdef enum: TNS_VECTOR_FLAG_NORM_RESERVED = 0x0010 TNS_VECTOR_FLAG_SPARSE = 0x0020 +# data type buffer sizes +cdef enum: + ORA_TYPE_SIZE_BINARY_DOUBLE = 8 + ORA_TYPE_SIZE_BINARY_FLOAT = 4 + ORA_TYPE_SIZE_BOOLEAN = 4 + ORA_TYPE_SIZE_DATE = 7 + ORA_TYPE_SIZE_INTERVAL_DS = 11 + ORA_TYPE_SIZE_INTERVAL_YM = 5 + ORA_TYPE_SIZE_MAX = 22 + ORA_TYPE_SIZE_NUMBER = 22 + ORA_TYPE_SIZE_ROWID = 18 + ORA_TYPE_SIZE_TIMESTAMP = 11 + ORA_TYPE_SIZE_TIMESTAMP_TZ = 13 + + # general constants cdef enum: TNS_MAX_SHORT_LENGTH = 252 diff --git a/src/oracledb/impl/base/encoders.pyx b/src/oracledb/impl/base/encoders.pyx index 76d94d1e..aed7e420 100644 --- a/src/oracledb/impl/base/encoders.pyx +++ b/src/oracledb/impl/base/encoders.pyx @@ -28,6 +28,304 @@ # Cython file defining the low-level encoding routines used by the driver. #------------------------------------------------------------------------------ +cdef inline void encode_binary_double(char_type *buf, double value): + """ + Encodes a double in the format expected by the Oracle Database for + BINARY_DOUBLE. + """ + cdef: + uint8_t b0, b1, b2, b3, b4, b5, b6, b7 + uint64_t all_bits + uint64_t *ptr + ptr = &value + all_bits = ptr[0] + b7 = all_bits & 0xff + b6 = (all_bits >> 8) & 0xff + b5 = (all_bits >> 16) & 0xff + b4 = (all_bits >> 24) & 0xff + b3 = (all_bits >> 32) & 0xff + b2 = (all_bits >> 40) & 0xff + b1 = (all_bits >> 48) & 0xff + b0 = (all_bits >> 56) & 0xff + if b0 & 0x80 == 0: + b0 = b0 | 0x80 + else: + b0 = ~b0 + b1 = ~b1 + b2 = ~b2 + b3 = ~b3 + b4 = ~b4 + b5 = ~b5 + b6 = ~b6 + b7 = ~b7 + buf[0] = b0 + buf[1] = b1 + buf[2] = b2 + buf[3] = b3 + buf[4] = b4 + buf[5] = b5 + buf[6] = b6 + buf[7] = b7 + + +cdef inline void encode_binary_float(char_type *buf, float value): + """ + Encodes a float in the format expected by the Oracle Database for + BINARY_FLOAT. + """ + cdef: + uint8_t b0, b1, b2, b3 + uint32_t all_bits + uint32_t *ptr + ptr = &value + all_bits = ptr[0] + b3 = all_bits & 0xff + b2 = (all_bits >> 8) & 0xff + b1 = (all_bits >> 16) & 0xff + b0 = (all_bits >> 24) & 0xff + if b0 & 0x80 == 0: + b0 = b0 | 0x80 + else: + b0 = ~b0 + b1 = ~b1 + b2 = ~b2 + b3 = ~b3 + buf[0] = b0 + buf[1] = b1 + buf[2] = b2 + buf[3] = b3 + + +cdef inline void encode_boolean(char_type *buf, ssize_t *buflen, bint value): + """ + Encodes a boolean in the format expected by the Oracle Database for + BOOLEAN. + """ + if value: + buflen[0] = 3 + buf[0] = 2 + encode_uint16be(&buf[1], 0x0101) + else: + buflen[0] = 2 + encode_uint16be(buf, 0x0100) + + +cdef inline void encode_date(char_type *buf, object value): + """ + Encodes a datetime.date or datetime.datetime object in the format exepcted + by the Oracle Database for DATE. + """ + cdef unsigned int year + year = cydatetime.PyDateTime_GET_YEAR(value) + buf[0] = ((year // 100) + 100) + buf[1] = ((year % 100) + 100) + buf[2] = cydatetime.PyDateTime_GET_MONTH(value) + buf[3] = cydatetime.PyDateTime_GET_DAY(value) + buf[4] = cydatetime.PyDateTime_DATE_GET_HOUR(value) + 1 + buf[5] = cydatetime.PyDateTime_DATE_GET_MINUTE(value) + 1 + buf[6] = cydatetime.PyDateTime_DATE_GET_SECOND(value) + 1 + + +cdef inline void encode_interval_ds(char_type *buf, object value): + """ + Encodes a datetime.timedelta object in the format exepcted by the Oracle + Database for INTERVAL DAY TO SECOND. + """ + cdef int32_t days, seconds, fseconds + days = cydatetime.timedelta_days(value) + encode_uint32be(buf, days + TNS_DURATION_MID) + seconds = cydatetime.timedelta_seconds(value) + buf[4] = (seconds // 3600) + TNS_DURATION_OFFSET + seconds = seconds % 3600 + buf[5] = (seconds // 60) + TNS_DURATION_OFFSET + buf[6] = (seconds % 60) + TNS_DURATION_OFFSET + fseconds = cydatetime.timedelta_microseconds(value) * 1000 + encode_uint32be(&buf[7], fseconds + TNS_DURATION_MID) + + +cdef int encode_interval_ym(char_type *buf, object value) except -1: + """ + Encodes a IntervalYM object in the format exepcted by the Oracle Database + for INTERVAL YEAR TO MONTH. + """ + cdef int32_t years, months + years = ( value)[0] + months = ( value)[1] + encode_uint32be(buf, years + TNS_DURATION_MID) + buf[4] = months + TNS_DURATION_OFFSET + + +cdef int encode_number(char_type *buf, ssize_t *buflen, bytes value) except -1: + """ + Encodes bytes representing numeric data in the format exepcted by the + Oracle Database for NUMBER. + """ + cdef: + uint8_t num_digits = 0, digit, num_pairs, pair_num, digits_pos + bint is_negative = False, prepend_zero = False + ssize_t value_length, exponent_pos, pos = 0 + uint8_t digits[NUMBER_AS_TEXT_CHARS] + bint exponent_is_negative = False + int16_t decimal_point_index + int8_t exponent_on_wire + const char_type *ptr + int16_t exponent + + # zero length string cannot be converted + value_length = len(value) + if value_length == 0: + errors._raise_err(errors.ERR_NUMBER_STRING_OF_ZERO_LENGTH) + elif value_length > NUMBER_AS_TEXT_CHARS: + errors._raise_err(errors.ERR_NUMBER_STRING_TOO_LONG) + + # check to see if number is negative (first character is '-') + ptr = value + if ptr[0] == b'-': + is_negative = True + pos += 1 + + # scan for digits until the decimal point or exponent indicator found + while pos < value_length: + if ptr[pos] == b'.' or ptr[pos] == b'e' or ptr[pos] == b'E': + break + if ptr[pos] < b'0' or ptr[pos] > b'9': + errors._raise_err(errors.ERR_INVALID_NUMBER) + digit = ptr[pos] - b'0' + pos += 1 + if digit == 0 and num_digits == 0: + continue + digits[num_digits] = digit + num_digits += 1 + decimal_point_index = num_digits + + # scan for digits following the decimal point, if applicable + if pos < value_length and ptr[pos] == b'.': + pos += 1 + while pos < value_length: + if ptr[pos] == b'e' or ptr[pos] == b'E': + break + digit = ptr[pos] - b'0' + pos += 1 + if digit == 0 and num_digits == 0: + decimal_point_index -= 1 + continue + digits[num_digits] = digit + num_digits += 1 + + # handle exponent, if applicable + if pos < value_length and (ptr[pos] == b'e' or ptr[pos] == b'E'): + pos += 1 + if pos < value_length: + if ptr[pos] == b'-': + exponent_is_negative = True + pos += 1 + elif ptr[pos] == b'+': + pos += 1 + exponent_pos = pos + while pos < value_length: + if ptr[pos] < b'0' or ptr[pos] > b'9': + errors._raise_err(errors.ERR_NUMBER_WITH_INVALID_EXPONENT) + pos += 1 + if exponent_pos == pos: + errors._raise_err(errors.ERR_NUMBER_WITH_EMPTY_EXPONENT) + exponent = int(ptr[exponent_pos:pos]) + if exponent_is_negative: + exponent = -exponent + decimal_point_index += exponent + + # if there is anything left in the string, that indicates an invalid + # number as well + if pos < value_length: + errors._raise_err(errors.ERR_CONTENT_INVALID_AFTER_NUMBER) + + # skip trailing zeros + while num_digits > 0 and digits[num_digits - 1] == 0: + num_digits -= 1 + + # value must be less than 1e126 and greater than 1e-129; the number of + # digits also cannot exceed the maximum precision of Oracle numbers + if num_digits > NUMBER_MAX_DIGITS or decimal_point_index > 126 \ + or decimal_point_index < -129: + errors._raise_err(errors.ERR_ORACLE_NUMBER_NO_REPR) + + # if the exponent is odd, prepend a zero + if decimal_point_index % 2 == 1: + prepend_zero = True + if num_digits > 0: + digits[num_digits] = 0 + num_digits += 1 + decimal_point_index += 1 + + # determine the number of digit pairs; if the number of digits is odd, + # append a zero to make the number of digits even + if num_digits % 2 == 1: + digits[num_digits] = 0 + num_digits += 1 + num_pairs = num_digits // 2 + + # if the number of digits is zero, the value is itself zero since all + # leading and trailing zeros are removed from the digits string; this + # is a special case + if num_digits == 0: + buf[0] = 128 + buflen[0] = 1 + return 0 + + # the total length of the buffer will be the number of pairs (each of which + # are encoded in a single byte) plus a single byte for the exponent + buflen[0] = num_pairs + 1 + + # encode the exponent + exponent_on_wire = (decimal_point_index / 2) + 192 + if is_negative: + exponent_on_wire = ~exponent_on_wire + buf[0] = exponent_on_wire + + # encode the mantissa bytes + digits_pos = 0 + for pair_num in range(num_pairs): + if pair_num == 0 and prepend_zero: + digit = digits[digits_pos] + digits_pos += 1 + else: + digit = digits[digits_pos] * 10 + digits[digits_pos + 1] + digits_pos += 2 + if is_negative: + digit = 101 - digit + else: + digit += 1 + buf[pair_num + 1] = digit + + # append a sentinel 102 byte for negative numbers if the number of digits + # is less than the maximum allowable + if is_negative and num_digits < NUMBER_MAX_DIGITS: + buf[num_pairs + 1] = 102 + buflen[0] += 1 + + +cdef inline void encode_timestamp(char_type *buf, object value): + """ + Encodes a datetime.date or datetime.datetime object in the format exepcted + by the Oracle Database for TIMESTAMP (WITH LOCAL TIME ZONE). + """ + cdef uint32_t fsecond = 0 + encode_date(buf, value) + if isinstance(value, PY_TYPE_DATETIME): + fsecond = \ + cydatetime.PyDateTime_DATE_GET_MICROSECOND(value) * 1000 + encode_uint32be(&buf[7], fsecond) + + +cdef inline void encode_timestamp_tz(char_type *buf, object value): + """ + Encodes a datetime.date or datetime.datetime object in the format exepcted + by the Oracle Database for TIMESTAMP WITH TIME ZONE. + """ + encode_timestamp(buf, value) + buf[11] = TZ_HOUR_OFFSET + buf[12] = TZ_MINUTE_OFFSET + + cdef inline void encode_uint16be(char_type *buf, uint16_t value): """ Encodes a 16-bit integer in big endian order (most significant byte first). diff --git a/src/oracledb/impl/base/oson.pyx b/src/oracledb/impl/base/oson.pyx index b11db429..86607df3 100644 --- a/src/oracledb/impl/base/oson.pyx +++ b/src/oracledb/impl/base/oson.pyx @@ -587,6 +587,7 @@ cdef class OsonTreeSegment(GrowableBuffer): Encode a value (node) in the OSON tree segment. """ cdef: + char_type buf[ORA_TYPE_SIZE_MAX] VectorEncoder vector_encoder uint32_t value_len bytes value_bytes @@ -626,20 +627,24 @@ cdef class OsonTreeSegment(GrowableBuffer): elif isinstance(value, PY_TYPE_DATETIME): if cydatetime.PyDateTime_DATE_GET_MICROSECOND(value) == 0: self.write_uint8(TNS_JSON_TYPE_TIMESTAMP7) - self.write_oracle_date(value, 7, write_length=False) + encode_date(buf, value) + self.write_raw(buf, ORA_TYPE_SIZE_DATE) else: self.write_uint8(TNS_JSON_TYPE_TIMESTAMP) - self.write_oracle_date(value, 11, write_length=False) + encode_timestamp(buf, value) + self.write_raw(buf, ORA_TYPE_SIZE_TIMESTAMP) # handle dates elif isinstance(value, PY_TYPE_DATE): self.write_uint8(TNS_JSON_TYPE_DATE) - self.write_oracle_date(value, 7, write_length=False) + encode_date(buf, value) + self.write_raw(buf, ORA_TYPE_SIZE_DATE) # handle timedeltas elif isinstance(value, PY_TYPE_TIMEDELTA): self.write_uint8(TNS_JSON_TYPE_INTERVAL_DS) - self.write_interval_ds(value, write_length=False) + encode_interval_ds(buf, value) + self.write_raw(buf, ORA_TYPE_SIZE_INTERVAL_DS) # handle strings elif isinstance(value, str): diff --git a/src/oracledb/impl/base/types.pyx b/src/oracledb/impl/base/types.pyx index 39a71543..c00ce582 100644 --- a/src/oracledb/impl/base/types.pyx +++ b/src/oracledb/impl/base/types.pyx @@ -128,7 +128,7 @@ DB_TYPE_BINARY_DOUBLE = DbType( NATIVE_TYPE_NUM_DOUBLE, ORA_TYPE_NUM_BINARY_DOUBLE, PY_TYPE_NUM_FLOAT, - buffer_size_factor=8 + buffer_size_factor=ORA_TYPE_SIZE_BINARY_DOUBLE ) DB_TYPE_BINARY_FLOAT = DbType( @@ -138,7 +138,7 @@ DB_TYPE_BINARY_FLOAT = DbType( NATIVE_TYPE_NUM_FLOAT, ORA_TYPE_NUM_BINARY_FLOAT, PY_TYPE_NUM_FLOAT, - buffer_size_factor=4 + buffer_size_factor=ORA_TYPE_SIZE_BINARY_FLOAT ) DB_TYPE_BINARY_INTEGER = DbType( @@ -148,7 +148,7 @@ DB_TYPE_BINARY_INTEGER = DbType( NATIVE_TYPE_NUM_INT64, ORA_TYPE_NUM_BINARY_INTEGER, PY_TYPE_NUM_INT, - buffer_size_factor=22 + buffer_size_factor=ORA_TYPE_SIZE_NUMBER ) DB_TYPE_BLOB = DbType( @@ -168,7 +168,7 @@ DB_TYPE_BOOLEAN = DbType( NATIVE_TYPE_NUM_BOOLEAN, ORA_TYPE_NUM_BOOLEAN, PY_TYPE_NUM_BOOL, - buffer_size_factor=4 + buffer_size_factor=ORA_TYPE_SIZE_BOOLEAN ) DB_TYPE_CHAR = DbType( @@ -211,7 +211,7 @@ DB_TYPE_DATE = DbType( NATIVE_TYPE_NUM_TIMESTAMP, ORA_TYPE_NUM_DATE, PY_TYPE_NUM_DATETIME, - buffer_size_factor=7 + buffer_size_factor=ORA_TYPE_SIZE_DATE ) DB_TYPE_INTERVAL_DS = DbType( @@ -221,7 +221,7 @@ DB_TYPE_INTERVAL_DS = DbType( NATIVE_TYPE_NUM_INTERVAL_DS, ORA_TYPE_NUM_INTERVAL_DS, PY_TYPE_NUM_TIMEDELTA, - buffer_size_factor=11 + buffer_size_factor=ORA_TYPE_SIZE_INTERVAL_DS ) DB_TYPE_INTERVAL_YM = DbType( @@ -231,7 +231,7 @@ DB_TYPE_INTERVAL_YM = DbType( NATIVE_TYPE_NUM_INTERVAL_YM, ORA_TYPE_NUM_INTERVAL_YM, PY_TYPE_NUM_ORACLE_INTERVAL_YM, - buffer_size_factor=5 + buffer_size_factor=ORA_TYPE_SIZE_INTERVAL_YM ) DB_TYPE_JSON = DbType( @@ -305,7 +305,7 @@ DB_TYPE_NUMBER = DbType( NATIVE_TYPE_NUM_BYTES, ORA_TYPE_NUM_NUMBER, PY_TYPE_NUM_FLOAT, - buffer_size_factor=22 + buffer_size_factor=ORA_TYPE_SIZE_NUMBER ) DB_TYPE_NVARCHAR = DbType( @@ -347,7 +347,7 @@ DB_TYPE_ROWID = DbType( NATIVE_TYPE_NUM_ROWID, ORA_TYPE_NUM_ROWID, PY_TYPE_NUM_STR, - buffer_size_factor=18 + buffer_size_factor=ORA_TYPE_SIZE_ROWID ) DB_TYPE_TIMESTAMP = DbType( @@ -357,7 +357,7 @@ DB_TYPE_TIMESTAMP = DbType( NATIVE_TYPE_NUM_TIMESTAMP, ORA_TYPE_NUM_TIMESTAMP, PY_TYPE_NUM_DATETIME, - buffer_size_factor=11 + buffer_size_factor=ORA_TYPE_SIZE_TIMESTAMP ) DB_TYPE_TIMESTAMP_LTZ = DbType( @@ -367,7 +367,7 @@ DB_TYPE_TIMESTAMP_LTZ = DbType( NATIVE_TYPE_NUM_TIMESTAMP, ORA_TYPE_NUM_TIMESTAMP_LTZ, PY_TYPE_NUM_DATETIME, - buffer_size_factor=11 + buffer_size_factor=ORA_TYPE_SIZE_TIMESTAMP ) DB_TYPE_TIMESTAMP_TZ = DbType( @@ -377,7 +377,7 @@ DB_TYPE_TIMESTAMP_TZ = DbType( NATIVE_TYPE_NUM_TIMESTAMP, ORA_TYPE_NUM_TIMESTAMP_TZ, PY_TYPE_NUM_DATETIME, - buffer_size_factor=13 + buffer_size_factor=ORA_TYPE_SIZE_TIMESTAMP_TZ ) DB_TYPE_UNKNOWN = DbType( diff --git a/src/oracledb/impl/base/vector.pyx b/src/oracledb/impl/base/vector.pyx index 449c0ade..48fbf88a 100644 --- a/src/oracledb/impl/base/vector.pyx +++ b/src/oracledb/impl/base/vector.pyx @@ -164,6 +164,7 @@ cdef class VectorEncoder(GrowableBuffer): uint8_t *uint8_ptr = value.data.as_uchars float *float_ptr = value.data.as_floats int8_t *int8_ptr = value.data.as_schars + char_type buf[8] uint32_t i if vector_format == VECTOR_FORMAT_INT8: self.write_raw( int8_ptr, num_elements) @@ -172,9 +173,11 @@ cdef class VectorEncoder(GrowableBuffer): else: for i in range(num_elements): if vector_format == VECTOR_FORMAT_FLOAT32: - self.write_binary_float(float_ptr[i], write_length=False) + encode_binary_float(buf, float_ptr[i]) + self.write_raw(buf, 4) elif vector_format == VECTOR_FORMAT_FLOAT64: - self.write_binary_double(double_ptr[i], write_length=False) + encode_binary_double(buf, double_ptr[i]) + self.write_raw(buf, 8) cdef uint8_t _get_vector_format(self, array.array value): """ diff --git a/src/oracledb/impl/thin/dbobject.pyx b/src/oracledb/impl/thin/dbobject.pyx index b9d147c8..5b3e48da 100644 --- a/src/oracledb/impl/thin/dbobject.pyx +++ b/src/oracledb/impl/thin/dbobject.pyx @@ -280,7 +280,8 @@ cdef class ThinDbObjectImpl(BaseDbObjectImpl): elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: buf.write_uint8(4) buf.write_uint32be(value) - elif ora_type_num in (ORA_TYPE_NUM_DATE, ORA_TYPE_NUM_TIMESTAMP, + elif ora_type_num in (ORA_TYPE_NUM_DATE, + ORA_TYPE_NUM_TIMESTAMP, ORA_TYPE_NUM_TIMESTAMP_TZ, ORA_TYPE_NUM_TIMESTAMP_LTZ): buf.write_oracle_date(value, metadata.dbtype._buffer_size_factor) diff --git a/tests/test_2600_timestamp_var.py b/tests/test_2600_timestamp_var.py index 6784c645..99b3857f 100644 --- a/tests/test_2600_timestamp_var.py +++ b/tests/test_2600_timestamp_var.py @@ -222,6 +222,19 @@ def test_2610(self): ) self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) + def test_2611(self): + "2611 - test binding a timestamp with datetime.date as input" + self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + self.cursor.execute( + """ + select * + from TestTimestamps + where trunc(TimestampCol) = :value + """, + value=datetime.date(2002, 12, 14), + ) + self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_4800_timestamp_ltz_var.py b/tests/test_4800_timestamp_ltz_var.py index a726f4c5..8b9bcd13 100644 --- a/tests/test_4800_timestamp_ltz_var.py +++ b/tests/test_4800_timestamp_ltz_var.py @@ -226,6 +226,19 @@ def test_4810(self): ) self.assertEqual(self.cursor.fetchall(), [self.data_by_key[10]]) + def test_4811(self): + "4811 - test binding a timestamp with datetime.date as input" + self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + self.cursor.execute( + """ + select * + from TestTimestampLTZs + where trunc(TimestampLTZCol) = :value + """, + value=datetime.date(2022, 6, 12), + ) + self.assertEqual(self.cursor.fetchall(), [self.data_by_key[10]]) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_4900_timestamp_tz_var.py b/tests/test_4900_timestamp_tz_var.py index c8b60298..197e67e0 100644 --- a/tests/test_4900_timestamp_tz_var.py +++ b/tests/test_4900_timestamp_tz_var.py @@ -207,7 +207,7 @@ def test_4909(self): def test_4910(self): "4910 - test binding a timestamp with zero fractional seconds" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) self.cursor.execute( """ select * @@ -218,6 +218,19 @@ def test_4910(self): ) self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) + def test_4911(self): + "4911 - test binding a timestamp with datetime.date as input" + self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + self.cursor.execute( + """ + select * + from TestTimestampTZs + where trunc(TimestampTZCol) = :value + """, + value=datetime.date(2022, 6, 8), + ) + self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) + if __name__ == "__main__": test_env.run_test_cases() From 8f498c64ec9236124d155ae3385ab998a55419b0 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:24:59 -0600 Subject: [PATCH 191/239] Fixed bug validating the database host during connection. --- doc/src/release_notes.rst | 1 + src/oracledb/errors.py | 4 ++++ src/oracledb/impl/thin/capabilities.pyx | 3 ++- src/oracledb/impl/thin/constants.pxi | 1 + src/oracledb/impl/thin/messages/auth.pyx | 30 ++++++++---------------- src/oracledb/impl/thin/protocol.pyx | 21 ++++++++++++++++- 6 files changed, 38 insertions(+), 22 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ad659619..2d9b34ff 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -23,6 +23,7 @@ Thin Mode Changes types :attr:`oracledb.DB_TYPE_TIMESTAMP`, :attr:`oracledb.DB_TYPE_TIMESTAMP_TZ` and :attr:`oracledb.DB_TYPE_TIMESTAMP_LTZ`. +#) Fixed bug validating the database host during connection. #) Internal change: refactor encoding of Oracle data types. #) Internal change: small performance improvement sending bytes on the network transport. diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 404b9cee..8bacd232 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -363,6 +363,7 @@ def _raise_not_supported(feature: str) -> None: ERR_INVALID_SSL_VERSION = 4032 ERR_EXCEEDED_IDLE_TIME = 4033 ERR_INVALID_PASSWORD_TYPE = 4034 +ERR_INVALID_SERVER_RESPONSE = 4035 # error numbers that result in InternalError ERR_MESSAGE_TYPE_UNKNOWN = 5000 @@ -674,6 +675,9 @@ def _raise_not_supported(feature: str) -> None: "The name on the server certificate does not match the expected " 'value: "{expected_name}"' ), + ERR_INVALID_SERVER_RESPONSE: ( + "invalid server response to connection request" + ), ERR_INVALID_SERVER_TYPE: "invalid server_type: {server_type}", ERR_INVALID_SERVICE_NAME: ( 'Service "{service_name}" is not registered with the listener at ' diff --git a/src/oracledb/impl/thin/capabilities.pyx b/src/oracledb/impl/thin/capabilities.pyx index b07cd143..59380bc2 100644 --- a/src/oracledb/impl/thin/capabilities.pyx +++ b/src/oracledb/impl/thin/capabilities.pyx @@ -106,7 +106,8 @@ cdef class Capabilities: TNS_CCAP_O7LOGON | TNS_CCAP_O8LOGON_LONG_IDENTIFIER | \ TNS_CCAP_O9LOGON_LONG_PASSWORD self.compile_caps[TNS_CCAP_FEATURE_BACKPORT] = \ - TNS_CCAP_CTB_IMPLICIT_POOL + TNS_CCAP_CTB_IMPLICIT_POOL | \ + TNS_CCAP_CTB_OAUTH_MSG_ON_ERR self.compile_caps[TNS_CCAP_FIELD_VERSION] = self.ttc_field_version self.compile_caps[TNS_CCAP_SERVER_DEFINE_CONV] = 1 self.compile_caps[TNS_CCAP_DEQUEUE_WITH_SELECTOR] = 1 diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index 3acc08cd..9d72320f 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -455,6 +455,7 @@ cdef enum: TNS_CCAP_O8LOGON_LONG_IDENTIFIER = 64 TNS_CCAP_O9LOGON_LONG_PASSWORD = 0x80 TNS_CCAP_CTB_IMPLICIT_POOL = 0x08 + TNS_CCAP_CTB_OAUTH_MSG_ON_ERR = 0x10 TNS_CCAP_END_OF_CALL_STATUS = 0x01 TNS_CCAP_IND_RCD = 0x08 TNS_CCAP_FAST_BVEC = 0x20 diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx index b6a3f621..d52c2c20 100644 --- a/src/oracledb/impl/thin/messages/auth.pyx +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -206,6 +206,7 @@ cdef class AuthMessage(Message): cdef int _process_return_parameters(self, ReadBuffer buf) except -1: cdef: + bytes encoded_response, response uint16_t num_params, i str key, value buf.read_ub2(&num_params) @@ -222,26 +223,15 @@ cdef class AuthMessage(Message): if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: self.function_code = TNS_FUNC_AUTH_PHASE_TWO elif not self.change_password: - self.conn_impl._session_id = \ - int(self.session_data["AUTH_SESSION_ID"]) - self.conn_impl._serial_num = \ - int(self.session_data["AUTH_SERIAL_NUM"]) - self.conn_impl._db_domain = \ - self.session_data.get("AUTH_SC_DB_DOMAIN") - self.conn_impl._db_name = \ - self.session_data.get("AUTH_SC_DBUNIQUE_NAME") - self.conn_impl._max_open_cursors = \ - int(self.session_data.get("AUTH_MAX_OPEN_CURSORS", 0)) - self.conn_impl._service_name = \ - self.session_data.get("AUTH_SC_SERVICE_NAME") - self.conn_impl._instance_name = \ - self.session_data.get("AUTH_INSTANCENAME") - self.conn_impl._max_identifier_length = \ - int(self.session_data.get("AUTH_MAX_IDEN_LENGTH", 30)) - self.conn_impl.server_version = self._get_version_tuple(buf) - self.conn_impl.supports_bool = \ - buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1 - self.conn_impl._edition = self.edition + response = None + value = self.session_data.get("AUTH_SVR_RESPONSE") + if value is not None: + encoded_response = bytes.fromhex(value) + response = decrypt_cbc( + self.conn_impl._combo_key, encoded_response + ) + if response is None or response[16:32] != b"SERVER_TO_CLIENT": + errors._raise_err(errors.ERR_INVALID_SERVER_RESPONSE) cdef int _set_params(self, ConnectParamsImpl params, Description description) except -1: diff --git a/src/oracledb/impl/thin/protocol.pyx b/src/oracledb/impl/thin/protocol.pyx index df36f47a..82946fc5 100644 --- a/src/oracledb/impl/thin/protocol.pyx +++ b/src/oracledb/impl/thin/protocol.pyx @@ -176,8 +176,27 @@ cdef class BaseProtocol: the packet may indicate EOF for the initial connection that is established. """ + cdef: + dict session_data = auth_message.session_data + ReadBuffer buf = self._read_buf + conn_impl._session_id = \ + int(session_data["AUTH_SESSION_ID"]) + conn_impl._serial_num = \ + int(session_data["AUTH_SERIAL_NUM"]) + conn_impl._db_domain = session_data.get("AUTH_SC_DB_DOMAIN") + conn_impl._db_name = session_data.get("AUTH_SC_DBUNIQUE_NAME") + conn_impl._max_open_cursors = \ + int(session_data.get("AUTH_MAX_OPEN_CURSORS", 0)) + conn_impl._service_name = session_data.get("AUTH_SC_SERVICE_NAME") + conn_impl._instance_name = session_data.get("AUTH_INSTANCENAME") + conn_impl._max_identifier_length = \ + int(session_data.get("AUTH_MAX_IDEN_LENGTH", 30)) + conn_impl.server_version = auth_message._get_version_tuple(buf) + conn_impl.supports_bool = \ + buf._caps.ttc_field_version >= TNS_CCAP_FIELD_VERSION_23_1 + conn_impl._edition = auth_message.edition conn_impl.warning = auth_message.warning - self._read_buf._pending_error_num = 0 + buf._pending_error_num = 0 self._in_connect = False cdef int _send_marker(self, WriteBuffer buf, uint8_t marker_type): From 331a33b880020392e03b90af8b8945c054e4ecf3 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:25:40 -0600 Subject: [PATCH 192/239] Added missing cdef declaration. --- src/oracledb/impl/thin/transport.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thin/transport.pyx b/src/oracledb/impl/thin/transport.pyx index f49dbb8f..38f9eaae 100644 --- a/src/oracledb/impl/thin/transport.pyx +++ b/src/oracledb/impl/thin/transport.pyx @@ -367,7 +367,7 @@ cdef class Transport: """ Writes a packet on the transport. """ - data = buf._data[:buf._pos] + cdef bytes data = buf._data[:buf._pos] if DEBUG_PACKETS: self._print_packet("Sending packet", data) try: From 4dd92abab8ed9bed0bf4fa11d7392cb62289086b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:27:08 -0600 Subject: [PATCH 193/239] Internal change: refactoring of Arrow schema handling. --- src/oracledb/arrow_impl.pxd | 26 ++- src/oracledb/arrow_impl.pyx | 1 + src/oracledb/base_impl.pxd | 15 +- src/oracledb/impl/arrow/array.pyx | 320 +++++--------------------- src/oracledb/impl/arrow/dataframe.pyx | 14 +- src/oracledb/impl/arrow/schema.pyx | 248 ++++++++++++++++++++ src/oracledb/impl/base/bind_var.pyx | 9 +- src/oracledb/impl/base/converters.pyx | 86 +++---- src/oracledb/impl/base/metadata.pyx | 106 ++++++--- src/oracledb/impl/base/var.pyx | 46 +--- 10 files changed, 463 insertions(+), 408 deletions(-) create mode 100644 src/oracledb/impl/arrow/schema.pyx diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index f944cc03..49d7276e 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -86,8 +86,8 @@ cdef extern from "nanoarrow.h": NANOARROW_TIME_UNIT_MICRO NANOARROW_TIME_UNIT_NANO +cdef class ArrowSchemaImpl: -cdef class ArrowArrayImpl: cdef: int32_t precision int32_t scale @@ -96,17 +96,28 @@ cdef class ArrowArrayImpl: ArrowType arrow_type ArrowTimeUnit time_unit int time_factor - ArrowArray *arrow_array ArrowSchema *arrow_schema ArrowType child_arrow_type int child_element_size + cdef bint _is_sparse_vector(self) except* + cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1 + cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1 + cdef int populate_from_schema(self, ArrowSchema* schema) except -1 + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, + int8_t precision, int8_t scale, + ArrowTimeUnit time_unit, + ArrowType child_arrow_type) except -1 + + +cdef class ArrowArrayImpl: + cdef: + ArrowArray *arrow_array + ArrowSchemaImpl schema_impl + cdef int _get_is_null(self, int64_t index, bint* is_null) except -1 cdef int _get_list_info(self, int64_t index, ArrowArray* arrow_array, int64_t* offset, int64_t* num_elements) except -1 - cdef bint _is_sparse_vector(self) except * - cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1 - cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1 cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1 cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int append_double(self, double value) except -1 @@ -135,10 +146,7 @@ cdef class ArrowArrayImpl: cdef object get_vector(self, int64_t index, bint* is_null) cdef int populate_from_array(self, ArrowSchema* schema, ArrowArray* array) except -1 - cdef int populate_from_metadata(self, ArrowType arrow_type, str name, - int8_t precision, int8_t scale, - ArrowTimeUnit time_unit, - ArrowType child_arrow_type) except -1 + cdef int populate_from_schema(self, ArrowSchemaImpl schema_impl) except -1 cdef class DataFrameImpl: diff --git a/src/oracledb/arrow_impl.pyx b/src/oracledb/arrow_impl.pyx index a0aea3c3..5cbe629a 100644 --- a/src/oracledb/arrow_impl.pyx +++ b/src/oracledb/arrow_impl.pyx @@ -52,5 +52,6 @@ else: uint32_template = array.array("L") include "impl/arrow/utils.pyx" +include "impl/arrow/schema.pyx" include "impl/arrow/array.pyx" include "impl/arrow/dataframe.pyx" diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index f64f7004..10437495 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -43,6 +43,7 @@ from .arrow_impl cimport ( ArrowTimeUnit, ArrowType, ArrowArrayImpl, + ArrowSchemaImpl ) cdef enum: @@ -444,14 +445,14 @@ cdef class OracleMetadata: readonly uint32_t vector_dimensions readonly uint8_t vector_format readonly uint8_t vector_flags - ArrowType _arrow_type + ArrowSchemaImpl _schema_impl uint8_t _py_type_num cdef int _finalize_init(self) except -1 - cdef int _set_arrow_type(self) except -1 + cdef int _set_arrow_schema(self) except -1 cdef OracleMetadata copy(self) @staticmethod - cdef OracleMetadata from_arrow_array(ArrowArrayImpl array) + cdef OracleMetadata from_arrow_schema(ArrowSchemaImpl schema_impl) @staticmethod cdef OracleMetadata from_type(object typ) @staticmethod @@ -750,8 +751,6 @@ cdef class BaseVarImpl: cdef object _get_scalar_value(self, uint32_t pos) cdef int _on_reset_bind(self, uint32_t num_rows) except -1 cdef int _resize(self, uint32_t new_size) except -1 - cdef int _set_metadata_from_arrow_array(self, - ArrowArrayImpl array) except -1 cdef int _set_metadata_from_type(self, object typ) except -1 cdef int _set_metadata_from_value(self, object value, bint is_plsql) except -1 @@ -986,12 +985,12 @@ cdef struct OracleData: cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, OracleData* data, - ArrowArrayImpl arrow_array, + ArrowArrayImpl array_impl, ssize_t array_index) cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, OracleMetadata to_metadatda, OracleData* data, - ArrowArrayImpl arrow_array) except -1 + ArrowArrayImpl array_impl) except -1 cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, OracleMetadata to_metadatda, OracleData* data, @@ -1000,7 +999,7 @@ cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, cdef object convert_python_to_oracle_data(OracleMetadata metadata, OracleData* data, object value) -cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, +cdef int convert_vector_to_arrow(ArrowArrayImpl array_impl, object vector) except -1 cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer) cdef uint16_t decode_uint16be(const char_type *buf) diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 149cbb5d..1edc25b9 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -33,18 +33,12 @@ cdef class ArrowArrayImpl: def __cinit__(self): self.arrow_array = \ cpython.PyMem_Calloc(1, sizeof(ArrowArray)) - self.arrow_schema = \ - cpython.PyMem_Calloc(1, sizeof(ArrowSchema)) def __dealloc__(self): if self.arrow_array != NULL: if self.arrow_array.release != NULL: ArrowArrayRelease(self.arrow_array) cpython.PyMem_Free(self.arrow_array) - if self.arrow_schema != NULL: - if self.arrow_schema.release != NULL: - ArrowSchemaRelease(self.arrow_schema) - cpython.PyMem_Free(self.arrow_schema) cdef int _get_is_null(self, int64_t index, bint* is_null) except -1: """ @@ -77,65 +71,6 @@ cdef class ArrowArrayImpl: end_offset = offsets[index + 1] num_elements[0] = end_offset - offsets[index] - cdef bint _is_sparse_vector(self) except *: - """ - Returns a boolean indicating if the schema refers to a sparse vector. - This requires a structure containing the keys for number of dimensions, - indices and values. - """ - cdef: - ArrowSchemaView view - ArrowSchema *schema - if self.arrow_schema.n_children != 3: - return False - schema = self.arrow_schema.children[0] - _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) - if view.type != NANOARROW_TYPE_INT64 \ - or schema.name != b"num_dimensions": - return False - schema = self.arrow_schema.children[1] - _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) - if view.type != NANOARROW_TYPE_LIST or schema.name != b"indices": - return False - _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) - if view.type != NANOARROW_TYPE_UINT32: - return False - schema = self.arrow_schema.children[2] - _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) - if view.type != NANOARROW_TYPE_LIST or schema.name != b"values": - return False - _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) - self._set_child_arrow_type(view.type) - return True - - cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1: - """ - Set the child Arrow type and the corresponding element size in bytes. - """ - self.child_arrow_type = child_arrow_type - if child_arrow_type == NANOARROW_TYPE_DOUBLE: - self.child_element_size = sizeof(double) - elif child_arrow_type == NANOARROW_TYPE_FLOAT: - self.child_element_size = sizeof(float) - elif child_arrow_type == NANOARROW_TYPE_INT8: - self.child_element_size = sizeof(int8_t) - elif child_arrow_type == NANOARROW_TYPE_UINT8: - self.child_element_size = sizeof(uint8_t) - - cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1: - """ - Sets the time unit and the corresponding factor. - """ - self.time_unit = time_unit - if time_unit == NANOARROW_TIME_UNIT_MILLI: - self.time_factor = 1_000 - elif time_unit == NANOARROW_TIME_UNIT_MICRO: - self.time_factor = 1_000_000 - elif time_unit == NANOARROW_TIME_UNIT_NANO: - self.time_factor = 1_000_000_000 - else: - self.time_factor = 1 - cdef int append_bytes(self, void* ptr, int64_t num_bytes) except -1: """ Append a value of type bytes to the array. @@ -159,7 +94,8 @@ cdef class ArrowArrayImpl: ArrowDecimal decimal decimal_view.data = ptr decimal_view.size_bytes = num_bytes - ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ArrowDecimalInit(&decimal, 128, self.schema_impl.precision, + self.schema_impl.scale) _check_nanoarrow(ArrowDecimalSetDigits(&decimal, decimal_view)) _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) @@ -205,31 +141,34 @@ cdef class ArrowArrayImpl: array._get_is_null(index, &is_null) if is_null: self.append_null() - elif array.arrow_type in (NANOARROW_TYPE_INT64, - NANOARROW_TYPE_TIMESTAMP): + elif array.schema_impl.arrow_type in ( + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_TIMESTAMP + ): data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_int64 = data_buffer.data self.append_int64(as_int64[index]) - elif array.arrow_type == NANOARROW_TYPE_DOUBLE: + elif array.schema_impl.arrow_type == NANOARROW_TYPE_DOUBLE: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_double = data_buffer.data self.append_double(as_double[index]) - elif array.arrow_type == NANOARROW_TYPE_FLOAT: + elif array.schema_impl.arrow_type == NANOARROW_TYPE_FLOAT: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_float = data_buffer.data self.append_double(as_float[index]) - elif array.arrow_type == NANOARROW_TYPE_BOOL: + elif array.schema_impl.arrow_type == NANOARROW_TYPE_BOOL: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_bool = ArrowBitGet(data_buffer.data, index) self.append_int64(as_bool) - elif array.arrow_type == NANOARROW_TYPE_DECIMAL128: + elif array.schema_impl.arrow_type == NANOARROW_TYPE_DECIMAL128: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ArrowDecimalInit(&decimal, 128, self.schema_impl.precision, + self.schema_impl.scale) ptr = data_buffer.data + index * 16 ArrowDecimalSetBytes(&decimal, ptr) _check_nanoarrow(ArrowArrayAppendDecimal(self.arrow_array, &decimal)) - elif array.arrow_type in ( + elif array.schema_impl.arrow_type in ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_STRING ): @@ -246,7 +185,7 @@ cdef class ArrowArrayImpl: finally: cpython.PyMem_Free(temp) - elif array.arrow_type in ( + elif array.schema_impl.arrow_type in ( NANOARROW_TYPE_LARGE_BINARY, NANOARROW_TYPE_LARGE_STRING ): @@ -273,13 +212,13 @@ cdef class ArrowArrayImpl: """ Append a vector to the array. """ - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + if self.schema_impl.child_arrow_type == NANOARROW_TYPE_FLOAT: append_float_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_DOUBLE: append_double_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_INT8: append_int8_array(self.arrow_array, value) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_UINT8: append_uint8_array(self.arrow_array, value) cdef int append_sparse_vector(self, @@ -292,7 +231,7 @@ cdef class ArrowArrayImpl: cdef ArrowArray *array # validate that the array supports sparse vectors - if self.arrow_type != NANOARROW_TYPE_STRUCT: + if self.schema_impl.arrow_type != NANOARROW_TYPE_STRUCT: errors._raise_err(errors.ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED) # append number of dimensions @@ -305,13 +244,13 @@ cdef class ArrowArrayImpl: # append values array array = self.arrow_array.children[2] - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + if self.schema_impl.child_arrow_type == NANOARROW_TYPE_FLOAT: append_float_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_DOUBLE: append_double_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_INT8: append_int8_array(array, values) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_UINT8: append_uint8_array(array, values) # indicate structure is completed @@ -347,11 +286,11 @@ cdef class ArrowArrayImpl: char *source_ptr self._get_is_null(index, is_null) if not is_null[0]: - if self.arrow_type == NANOARROW_TYPE_FIXED_SIZE_BINARY: + if self.schema_impl.arrow_type == NANOARROW_TYPE_FIXED_SIZE_BINARY: source_ptr = self.arrow_array.buffers[1] - start_offset = index * self.fixed_size - end_offset = start_offset + self.fixed_size - elif self.arrow_type in ( + start_offset = index * self.schema_impl.fixed_size + end_offset = start_offset + self.schema_impl.fixed_size + elif self.schema_impl.arrow_type in ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_STRING ): @@ -378,7 +317,8 @@ cdef class ArrowArrayImpl: self._get_is_null(index, is_null) if not is_null[0]: ptr = self.arrow_array.buffers[1] - ArrowDecimalInit(&decimal, 128, self.precision, self.scale) + ArrowDecimalInit(&decimal, 128, self.schema_impl.precision, + self.schema_impl.scale) ArrowDecimalSetBytes(&decimal, ptr + index * 16) ArrowBufferInit(&buf) try: @@ -459,20 +399,20 @@ cdef class ArrowArrayImpl: arrow_array = self.arrow_array.children[2] self._get_list_info(index, arrow_array, &offset, &num_elements) source_buf = arrow_array.children[0].buffers[1] + \ - offset * self.child_element_size - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + offset * self.schema_impl.child_element_size + if self.schema_impl.child_arrow_type == NANOARROW_TYPE_FLOAT: values = array.clone(float_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_DOUBLE: values = array.clone(double_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_INT8: values = array.clone(int8_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_UINT8: values = array.clone(uint8_template, num_elements, False) else: errors._raise_err(errors.ERR_UNEXPECTED_DATA, - data=self.child_arrow_type) + data=self.schema_impl.child_arrow_type) memcpy(values.data.as_voidptr, source_buf, - num_elements * self.child_element_size) + num_elements * self.schema_impl.child_element_size) return (num_dimensions, indices, values) cdef object get_vector(self, int64_t index, bint* is_null): @@ -487,27 +427,27 @@ cdef class ArrowArrayImpl: char *source_buf self._get_is_null(index, is_null) if not is_null[0]: - if self.arrow_type == NANOARROW_TYPE_FIXED_SIZE_LIST: - offset = index * self.fixed_size - num_elements = self.fixed_size + if self.schema_impl.arrow_type == NANOARROW_TYPE_FIXED_SIZE_LIST: + offset = index * self.schema_impl.fixed_size + num_elements = self.schema_impl.fixed_size else: self._get_list_info(index, self.arrow_array, &offset, &num_elements) source_buf = self.arrow_array.children[0].buffers[1] + \ - offset * self.child_element_size - if self.child_arrow_type == NANOARROW_TYPE_FLOAT: + offset * self.schema_impl.child_element_size + if self.schema_impl.child_arrow_type == NANOARROW_TYPE_FLOAT: result = array.clone(float_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_DOUBLE: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_DOUBLE: result = array.clone(double_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_INT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_INT8: result = array.clone(int8_template, num_elements, False) - elif self.child_arrow_type == NANOARROW_TYPE_UINT8: + elif self.schema_impl.child_arrow_type == NANOARROW_TYPE_UINT8: result = array.clone(uint8_template, num_elements, False) else: errors._raise_err(errors.ERR_UNEXPECTED_DATA, - data=self.child_arrow_type) + data=self.schema_impl.child_arrow_type) memcpy(result.data.as_voidptr, source_buf, - num_elements * self.child_element_size) + num_elements * self.schema_impl.child_element_size) return result @classmethod @@ -528,6 +468,7 @@ cdef class ArrowArrayImpl: array_capsule, "arrow_array" ) array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) + array_impl.schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) array_impl.populate_from_array(arrow_schema, arrow_array) return array_impl @@ -536,135 +477,22 @@ cdef class ArrowArrayImpl: """ Populate the array from another array. """ - cdef ArrowSchemaView schema_view - ArrowSchemaMove(schema, self.arrow_schema) + self.schema_impl.populate_from_schema(schema) ArrowArrayMove(array, self.arrow_array) - memset(&schema_view, 0, sizeof(ArrowSchemaView)) - _check_nanoarrow( - ArrowSchemaViewInit(&schema_view, self.arrow_schema, NULL) - ) - self.arrow_type = schema_view.type - self.name = schema.name.decode() - self.precision = schema_view.decimal_precision - self.scale = schema_view.decimal_scale - self.fixed_size = schema_view.fixed_size - if schema_view.type == NANOARROW_TYPE_TIMESTAMP: - self._set_time_unit(schema_view.time_unit) - elif schema_view.type in ( - NANOARROW_TYPE_FIXED_SIZE_LIST, - NANOARROW_TYPE_LIST - ): - _check_nanoarrow( - ArrowSchemaViewInit( - &schema_view, self.arrow_schema.children[0], NULL - ) - ) - self._set_child_arrow_type(schema_view.type) - elif schema_view.type not in ( - NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_BOOL, - NANOARROW_TYPE_DECIMAL128, - NANOARROW_TYPE_DOUBLE, - NANOARROW_TYPE_FIXED_SIZE_BINARY, - NANOARROW_TYPE_FLOAT, - NANOARROW_TYPE_INT64, - NANOARROW_TYPE_LARGE_BINARY, - NANOARROW_TYPE_LARGE_STRING, - NANOARROW_TYPE_STRING, - ) and not ( - schema_view.type == NANOARROW_TYPE_STRUCT - and self._is_sparse_vector() - ): - errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_FORMAT, - schema_format=schema.format.decode()) - if self.child_arrow_type != 0 and self.child_element_size == 0: - errors._raise_err( - errors.ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT, - schema_format=schema.children[0].format.decode() - ) - - cdef int populate_from_metadata(self, ArrowType arrow_type, str name, - int8_t precision, int8_t scale, - ArrowTimeUnit time_unit, - ArrowType child_arrow_type) except -1: - """ - Populate the array from the supplied metadata. - """ - cdef ArrowType storage_type = arrow_type - self.arrow_type = arrow_type - self._set_time_unit(time_unit) - self._set_child_arrow_type(child_arrow_type) - self.name = name - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - storage_type = NANOARROW_TYPE_INT64 - - _check_nanoarrow(ArrowArrayInitFromType(self.arrow_array, - storage_type)) - if arrow_type == NANOARROW_TYPE_DECIMAL128: - self.precision = precision - self.scale = scale - ArrowSchemaInit(self.arrow_schema) - _check_nanoarrow( - ArrowSchemaSetTypeDecimal( - self.arrow_schema, - arrow_type, - precision, - scale - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - # Currently struct is used for Sparse vector only - build_arrow_schema_for_sparse_vector(self.arrow_schema, - child_arrow_type) - else: - _check_nanoarrow( - ArrowSchemaInitFromType( - self.arrow_schema, - storage_type - ) - ) - if arrow_type == NANOARROW_TYPE_TIMESTAMP: - _check_nanoarrow( - ArrowSchemaSetTypeDateTime( - self.arrow_schema, - arrow_type, - time_unit, - NULL - ) - ) - if arrow_type == NANOARROW_TYPE_LIST: - # Set the schema for child using child_arrow_type - _check_nanoarrow( - ArrowSchemaSetType( - self.arrow_schema.children[0], - child_arrow_type - ) - ) - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - elif arrow_type == NANOARROW_TYPE_STRUCT: - _check_nanoarrow( - ArrowArrayInitFromSchema( - self.arrow_array, - self.arrow_schema, - NULL - ) - ) - else: # primitive type array init - _check_nanoarrow( - ArrowArrayInitFromType( - self.arrow_array, - storage_type - ) + cdef int populate_from_schema(self, ArrowSchemaImpl schema_impl) except -1: + """ + Populate the array from a schema. + """ + self.schema_impl = schema_impl + _check_nanoarrow( + ArrowArrayInitFromSchema( + self.arrow_array, + self.schema_impl.arrow_schema, + NULL ) + ) _check_nanoarrow(ArrowArrayStartAppending(self.arrow_array)) - _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) def get_array_capsule(self): """ @@ -686,14 +514,15 @@ cdef class ArrowArrayImpl: Internal method for getting the data type associated with the array. """ cdef char buffer[81] - ArrowSchemaToString(self.arrow_schema, buffer, sizeof(buffer), 0) + ArrowSchemaToString(self.schema_impl.arrow_schema, buffer, + sizeof(buffer), 0) return buffer.decode() def get_name(self): """ Internal method for getting the name associated with the array. """ - return self.name + return self.schema_impl.name def get_null_count(self): """ @@ -711,16 +540,7 @@ cdef class ArrowArrayImpl: """ Internal method for getting a PyCapsule pointer to the schema. """ - cdef ArrowSchema *schema - schema = cpython.PyMem_Malloc(sizeof(ArrowSchema)) - try: - _check_nanoarrow(ArrowSchemaDeepCopy(self.arrow_schema, schema)) - except: - cpython.PyMem_Free(schema) - raise - return cpython.PyCapsule_New( - schema, 'arrow_schema', &pycapsule_schema_deleter - ) + return self.schema_impl.get_schema_capsule() cdef void pycapsule_array_deleter(object array_capsule) noexcept: @@ -735,17 +555,3 @@ cdef void pycapsule_array_deleter(object array_capsule) noexcept: if array.release != NULL: ArrowArrayRelease(array) cpython.PyMem_Free(array) - - -cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: - """ - Called when the PyCapsule pointer is no longer required and performs the - necessary cleanup. - """ - cdef ArrowSchema* schema - schema = cpython.PyCapsule_GetPointer( - schema_capsule, "arrow_schema" - ) - if schema.release != NULL: - ArrowSchemaRelease(schema) - cpython.PyMem_Free(schema) diff --git a/src/oracledb/impl/arrow/dataframe.pyx b/src/oracledb/impl/arrow/dataframe.pyx index 7a4f3ceb..0af5a590 100644 --- a/src/oracledb/impl/arrow/dataframe.pyx +++ b/src/oracledb/impl/arrow/dataframe.pyx @@ -41,7 +41,8 @@ cdef class DataFrameImpl: ArrowSchema arrow_schema ArrowArray arrow_array DataFrameImpl df_impl - ArrowArrayImpl array + ArrowArrayImpl array_impl + ArrowSchemaImpl schema_impl ssize_t i df_impl = DataFrameImpl.__new__(DataFrameImpl) df_impl.arrays = [] @@ -52,10 +53,11 @@ cdef class DataFrameImpl: _check_nanoarrow(arrow_stream.get_schema(arrow_stream, &arrow_schema)) _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) for i in range(arrow_schema.n_children): - array = ArrowArrayImpl.__new__(ArrowArrayImpl) - array.populate_from_array(arrow_schema.children[i], - arrow_array.children[i]) - df_impl.arrays.append(array) + array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) + array_impl.schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + array_impl.populate_from_array(arrow_schema.children[i], + arrow_array.children[i]) + df_impl.arrays.append(array_impl) _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) if arrow_array.release != NULL: raise NotImplementedError("multiple chunks not supported") @@ -105,7 +107,7 @@ cdef class DataFrameImpl: ) _check_nanoarrow( ArrowSchemaDeepCopy( - array_impl.arrow_schema, schema.children[i] + array_impl.schema_impl.arrow_schema, schema.children[i] ) ) diff --git a/src/oracledb/impl/arrow/schema.pyx b/src/oracledb/impl/arrow/schema.pyx new file mode 100644 index 00000000..3cf5c9f9 --- /dev/null +++ b/src/oracledb/impl/arrow/schema.pyx @@ -0,0 +1,248 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# schema.pyx +# +# Cython implementation of the ArrowSchemaImpl class. +#------------------------------------------------------------------------------ + +cdef class ArrowSchemaImpl: + + def __cinit__(self): + self.arrow_schema = \ + cpython.PyMem_Calloc(1, sizeof(ArrowSchema)) + + def __dealloc__(self): + if self.arrow_schema != NULL: + if self.arrow_schema.release != NULL: + ArrowSchemaRelease(self.arrow_schema) + cpython.PyMem_Free(self.arrow_schema) + + cdef bint _is_sparse_vector(self) except*: + """ + Returns a boolean indicating if the schema refers to a sparse vector. + This requires a structure containing the keys for number of dimensions, + indices and values. + """ + cdef: + ArrowSchemaView view + ArrowSchema *schema + if self.arrow_schema.n_children != 3: + return False + schema = self.arrow_schema.children[0] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_INT64 \ + or schema.name != b"num_dimensions": + return False + schema = self.arrow_schema.children[1] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_LIST or schema.name != b"indices": + return False + _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) + if view.type != NANOARROW_TYPE_UINT32: + return False + schema = self.arrow_schema.children[2] + _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) + if view.type != NANOARROW_TYPE_LIST or schema.name != b"values": + return False + _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) + self._set_child_arrow_type(view.type) + return True + + cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1: + """ + Set the child Arrow type and the corresponding element size in bytes. + """ + self.child_arrow_type = child_arrow_type + if child_arrow_type == NANOARROW_TYPE_DOUBLE: + self.child_element_size = sizeof(double) + elif child_arrow_type == NANOARROW_TYPE_FLOAT: + self.child_element_size = sizeof(float) + elif child_arrow_type == NANOARROW_TYPE_INT8: + self.child_element_size = sizeof(int8_t) + elif child_arrow_type == NANOARROW_TYPE_UINT8: + self.child_element_size = sizeof(uint8_t) + + cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1: + """ + Sets the time unit and the corresponding factor. + """ + self.time_unit = time_unit + if time_unit == NANOARROW_TIME_UNIT_MILLI: + self.time_factor = 1_000 + elif time_unit == NANOARROW_TIME_UNIT_MICRO: + self.time_factor = 1_000_000 + elif time_unit == NANOARROW_TIME_UNIT_NANO: + self.time_factor = 1_000_000_000 + else: + self.time_factor = 1 + + @classmethod + def from_arrow_schema(cls, obj): + cdef: + ArrowSchema* arrow_schema + ArrowSchemaImpl schema_impl + schema_capsule = obj.__arrow_c_schema__() + arrow_schema = cpython.PyCapsule_GetPointer( + schema_capsule, "arrow_schema" + ) + schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + schema_impl.populate_from_schema(arrow_schema) + return schema_impl + + cdef int populate_from_schema(self, ArrowSchema* schema) except -1: + """ + Populate the schema from another schema. + """ + cdef ArrowSchemaView schema_view + ArrowSchemaMove(schema, self.arrow_schema) + memset(&schema_view, 0, sizeof(ArrowSchemaView)) + _check_nanoarrow( + ArrowSchemaViewInit(&schema_view, self.arrow_schema, NULL) + ) + self.arrow_type = schema_view.type + self.name = schema.name.decode() + self.precision = schema_view.decimal_precision + self.scale = schema_view.decimal_scale + self.fixed_size = schema_view.fixed_size + if schema_view.type == NANOARROW_TYPE_TIMESTAMP: + self._set_time_unit(schema_view.time_unit) + elif schema_view.type in ( + NANOARROW_TYPE_FIXED_SIZE_LIST, + NANOARROW_TYPE_LIST + ): + _check_nanoarrow( + ArrowSchemaViewInit( + &schema_view, self.arrow_schema.children[0], NULL + ) + ) + self._set_child_arrow_type(schema_view.type) + elif schema_view.type not in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_BOOL, + NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING, + NANOARROW_TYPE_STRING, + ) and not ( + schema_view.type == NANOARROW_TYPE_STRUCT + and self._is_sparse_vector() + ): + errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_FORMAT, + schema_format=schema.format.decode()) + if self.child_arrow_type != 0 and self.child_element_size == 0: + errors._raise_err( + errors.ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT, + schema_format=schema.children[0].format.decode() + ) + + cdef int populate_from_metadata(self, ArrowType arrow_type, str name, + int8_t precision, int8_t scale, + ArrowTimeUnit time_unit, + ArrowType child_arrow_type) except -1: + """ + Populate the schema from the supplied metadata. + """ + cdef ArrowType storage_type = arrow_type + self.arrow_type = arrow_type + self._set_time_unit(time_unit) + self._set_child_arrow_type(child_arrow_type) + self.name = name + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + storage_type = NANOARROW_TYPE_INT64 + + if arrow_type == NANOARROW_TYPE_DECIMAL128: + self.precision = precision + self.scale = scale + ArrowSchemaInit(self.arrow_schema) + _check_nanoarrow( + ArrowSchemaSetTypeDecimal( + self.arrow_schema, + arrow_type, + precision, + scale + ) + ) + elif arrow_type == NANOARROW_TYPE_STRUCT: + # Currently struct is used for Sparse vector only + build_arrow_schema_for_sparse_vector(self.arrow_schema, + child_arrow_type) + else: + _check_nanoarrow( + ArrowSchemaInitFromType( + self.arrow_schema, + storage_type + ) + ) + if arrow_type == NANOARROW_TYPE_TIMESTAMP: + _check_nanoarrow( + ArrowSchemaSetTypeDateTime( + self.arrow_schema, + arrow_type, + time_unit, + NULL + ) + ) + if arrow_type == NANOARROW_TYPE_LIST: + # Set the schema for child using child_arrow_type + _check_nanoarrow( + ArrowSchemaSetType( + self.arrow_schema.children[0], + child_arrow_type + ) + ) + _check_nanoarrow(ArrowSchemaSetName(self.arrow_schema, name.encode())) + + def get_schema_capsule(self): + """ + Internal method for getting a PyCapsule pointer to the schema. + """ + cdef ArrowSchema *schema + schema = cpython.PyMem_Malloc(sizeof(ArrowSchema)) + try: + _check_nanoarrow(ArrowSchemaDeepCopy(self.arrow_schema, schema)) + except: + cpython.PyMem_Free(schema) + raise + return cpython.PyCapsule_New( + schema, 'arrow_schema', &pycapsule_schema_deleter + ) + +cdef void pycapsule_schema_deleter(object schema_capsule) noexcept: + """ + Called when the PyCapsule pointer is no longer required and performs the + necessary cleanup. + """ + cdef ArrowSchema* schema + schema = cpython.PyCapsule_GetPointer( + schema_capsule, "arrow_schema" + ) + if schema.release != NULL: + ArrowSchemaRelease(schema) + cpython.PyMem_Free(schema) diff --git a/src/oracledb/impl/base/bind_var.pyx b/src/oracledb/impl/base/bind_var.pyx index f754cc01..4bd64e1f 100644 --- a/src/oracledb/impl/base/bind_var.pyx +++ b/src/oracledb/impl/base/bind_var.pyx @@ -34,7 +34,7 @@ cdef class BindVar: cdef int _create_var_from_arrow_array(self, object conn, BaseCursorImpl cursor_impl, - ArrowArrayImpl array) except -1: + ArrowArrayImpl array_impl) except -1: """ Creates a variable given an Arrow array. """ @@ -42,10 +42,11 @@ cdef class BindVar: BaseVarImpl var_impl int64_t length var_impl = cursor_impl._create_var_impl(conn) - array.get_length(&length) + array_impl.get_length(&length) var_impl.num_elements = length - var_impl._set_metadata_from_arrow_array(array) - var_impl._arrow_array = array + var_impl.metadata = \ + OracleMetadata.from_arrow_schema(array_impl.schema_impl) + var_impl._arrow_array = array_impl var_impl._finalize_init() self.var_impl = var_impl diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 1a9cdeee..6c20cd8a 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -31,7 +31,7 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, OracleData* data, - ArrowArrayImpl arrow_array, + ArrowArrayImpl array_impl, ssize_t array_index): """ Converts the value stored in Arrow format to an OracleData structure. @@ -44,21 +44,21 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, tuple sparse_info bytes temp_bytes - arrow_type = metadata._arrow_type + arrow_type = metadata._schema_impl.arrow_type if arrow_type == NANOARROW_TYPE_INT64: - arrow_array.get_int64(array_index, &data.is_null, &int64_value) + array_impl.get_int64(array_index, &data.is_null, &int64_value) if not data.is_null: temp_bytes = str(int64_value).encode() convert_bytes_to_oracle_data(&data.buffer, temp_bytes) return temp_bytes elif arrow_type == NANOARROW_TYPE_DOUBLE: - arrow_array.get_double(array_index, &data.is_null, + array_impl.get_double(array_index, &data.is_null, &data.buffer.as_double) elif arrow_type == NANOARROW_TYPE_FLOAT: - arrow_array.get_float(array_index, &data.is_null, + array_impl.get_float(array_index, &data.is_null, &data.buffer.as_float) elif arrow_type == NANOARROW_TYPE_BOOL: - arrow_array.get_bool(array_index, &data.is_null, &data.buffer.as_bool) + array_impl.get_bool(array_index, &data.is_null, &data.buffer.as_bool) elif arrow_type in ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_STRING, @@ -67,30 +67,30 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, NANOARROW_TYPE_LARGE_STRING ): rb = &data.buffer.as_raw_bytes - arrow_array.get_bytes(array_index, &data.is_null, &rb.ptr, + array_impl.get_bytes(array_index, &data.is_null, &rb.ptr, &rb.num_bytes) elif arrow_type == NANOARROW_TYPE_TIMESTAMP: - arrow_array.get_int64(array_index, &data.is_null, &int64_value) + array_impl.get_int64(array_index, &data.is_null, &int64_value) if not data.is_null: - seconds = int64_value // arrow_array.time_factor - useconds = int64_value % arrow_array.time_factor + seconds = int64_value // array_impl.schema_impl.time_factor + useconds = int64_value % array_impl.schema_impl.time_factor days = seconds // (24 * 60 * 60) seconds = seconds % (24 * 60 * 60) - if arrow_array.time_factor == 1_000: + if array_impl.schema_impl.time_factor == 1_000: useconds *= 1_000 - elif arrow_array.time_factor == 1_000_000_000: + elif array_impl.schema_impl.time_factor == 1_000_000_000: useconds //= 1_000 return EPOCH_DATE + \ cydatetime.timedelta_new(days, seconds, useconds) elif arrow_type == NANOARROW_TYPE_DECIMAL128: - temp_bytes = arrow_array.get_decimal(array_index, &data.is_null) + temp_bytes = array_impl.get_decimal(array_index, &data.is_null) if not data.is_null: convert_bytes_to_oracle_data(&data.buffer, temp_bytes) return temp_bytes elif arrow_type in (NANOARROW_TYPE_LIST, NANOARROW_TYPE_FIXED_SIZE_LIST): - return arrow_array.get_vector(array_index, &data.is_null) + return array_impl.get_vector(array_index, &data.is_null) elif arrow_type == NANOARROW_TYPE_STRUCT: - sparse_info = arrow_array.get_sparse_vector(array_index, &data.is_null) + sparse_info = array_impl.get_sparse_vector(array_index, &data.is_null) if sparse_info is not None: sparse_impl = SparseVectorImpl.__new__(SparseVectorImpl) sparse_impl.num_dimensions = sparse_info[0] @@ -117,7 +117,7 @@ cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer): return output -cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl arrow_array, +cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTAMP @@ -129,8 +129,8 @@ cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl arrow_array, int64_t ts dt = convert_date_to_python(buffer) td = dt - EPOCH_DATE - ts = int(cydatetime.total_seconds(td) * arrow_array.time_factor) - arrow_array.append_int64(ts) + ts = int(cydatetime.total_seconds(td) * array_impl.schema_impl.time_factor) + array_impl.append_int64(ts) cdef object convert_interval_ds_to_python(OracleDataBuffer *buffer): @@ -155,7 +155,7 @@ cdef object convert_interval_ym_to_python(OracleDataBuffer *buffer): return PY_TYPE_INTERVAL_YM(value.years, value.months) -cdef int convert_number_to_arrow_decimal(ArrowArrayImpl arrow_array, +cdef int convert_number_to_arrow_decimal(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow DECIMAL128. @@ -178,8 +178,8 @@ cdef int convert_number_to_arrow_decimal(ArrowArrayImpl arrow_array, raise ValueError("Value cannot be represented as Arrow Decimal128") # integers can be handled directly - if value.is_integer and arrow_array.scale == 0: - return arrow_array.append_decimal(value.chars, value.num_chars) + if value.is_integer and array_impl.schema_impl.scale == 0: + return array_impl.append_decimal(value.chars, value.num_chars) # Arrow expects a string of digits without the decimal point; if the number # does not contain at least the number of digits after the decimal point @@ -198,32 +198,32 @@ cdef int convert_number_to_arrow_decimal(ArrowArrayImpl arrow_array, if actual_scale > 0: memcpy(&digits[num_digits], &value.chars[num_digits + 1], actual_scale) num_digits += actual_scale - while actual_scale < arrow_array.scale: + while actual_scale < array_impl.schema_impl.scale: digits[num_digits] = b'0' num_digits += 1 actual_scale += 1 - arrow_array.append_decimal(digits, num_digits) + array_impl.append_decimal(digits, num_digits) -cdef int convert_number_to_arrow_double(ArrowArrayImpl arrow_array, +cdef int convert_number_to_arrow_double(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow DOUBLE. """ cdef OracleNumber *value = &buffer.as_number if value.is_max_negative_value: - arrow_array.append_double(-1.0e126) + array_impl.append_double(-1.0e126) else: - arrow_array.append_double(atof(value.chars[:value.num_chars])) + array_impl.append_double(atof(value.chars[:value.num_chars])) -cdef int convert_number_to_arrow_int64(ArrowArrayImpl arrow_array, +cdef int convert_number_to_arrow_int64(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ Converts a NUMBER value stored in the buffer to Arrow INT64. """ cdef OracleNumber *value = &buffer.as_number - arrow_array.append_int64(atoi(value.chars[:value.num_chars])) + array_impl.append_int64(atoi(value.chars[:value.num_chars])) cdef object convert_number_to_python_decimal(OracleDataBuffer *buffer): @@ -301,7 +301,7 @@ cdef object convert_str_to_python(OracleDataBuffer *buffer, uint8_t csfrm, cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, OracleMetadata to_metadata, OracleData* data, - ArrowArrayImpl arrow_array) except -1: + ArrowArrayImpl array_impl) except -1: """ Converts the value stored in OracleData to Arrow format. """ @@ -312,21 +312,21 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, # NULL values if data.is_null: - return arrow_array.append_null() + return array_impl.append_null() - arrow_type = to_metadata._arrow_type + arrow_type = to_metadata._schema_impl.arrow_type db_type_num = from_metadata.dbtype.num if arrow_type == NANOARROW_TYPE_INT64: - convert_number_to_arrow_int64(arrow_array, &data.buffer) + convert_number_to_arrow_int64(array_impl, &data.buffer) elif arrow_type == NANOARROW_TYPE_DOUBLE: if db_type_num == DB_TYPE_NUM_NUMBER: - convert_number_to_arrow_double(arrow_array, &data.buffer) + convert_number_to_arrow_double(array_impl, &data.buffer) else: - arrow_array.append_double(data.buffer.as_double) + array_impl.append_double(data.buffer.as_double) elif arrow_type == NANOARROW_TYPE_FLOAT: - arrow_array.append_float(data.buffer.as_float) + array_impl.append_float(data.buffer.as_float) elif arrow_type == NANOARROW_TYPE_BOOL: - arrow_array.append_int64(data.buffer.as_bool) + array_impl.append_int64(data.buffer.as_bool) elif arrow_type in ( NANOARROW_TYPE_BINARY, NANOARROW_TYPE_STRING, @@ -334,11 +334,11 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, NANOARROW_TYPE_LARGE_STRING ): rb = &data.buffer.as_raw_bytes - arrow_array.append_bytes( rb.ptr, rb.num_bytes) + array_impl.append_bytes( rb.ptr, rb.num_bytes) elif arrow_type == NANOARROW_TYPE_TIMESTAMP: - convert_date_to_arrow_timestamp(arrow_array, &data.buffer) + convert_date_to_arrow_timestamp(array_impl, &data.buffer) elif arrow_type == NANOARROW_TYPE_DECIMAL128: - convert_number_to_arrow_decimal(arrow_array, &data.buffer) + convert_number_to_arrow_decimal(array_impl, &data.buffer) cdef object convert_oracle_data_to_python(OracleMetadata from_metadata, @@ -554,16 +554,16 @@ cdef object convert_python_to_oracle_data(OracleMetadata metadata, return value -cdef int convert_vector_to_arrow(ArrowArrayImpl arrow_array, +cdef int convert_vector_to_arrow(ArrowArrayImpl array_impl, object vector) except -1: """ Converts the vector to the format required by the Arrow array. """ if vector is None: - arrow_array.append_null() + array_impl.append_null() elif isinstance(vector, PY_TYPE_SPARSE_VECTOR): - arrow_array.append_sparse_vector(vector.num_dimensions, + array_impl.append_sparse_vector(vector.num_dimensions, vector.indices, vector.values) else: - arrow_array.append_vector( vector) + array_impl.append_vector( vector) diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 28b2b03e..8f8af5ea 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -54,54 +54,83 @@ cdef class OracleMetadata: else: self._py_type_num = PY_TYPE_NUM_FLOAT - cdef int _set_arrow_type(self) except -1: + cdef int _set_arrow_schema(self) except -1: """ - Determine the arrow type to use for the data. + Sets the Arrow schema given the metadata. """ cdef: + ArrowType arrow_type, child_arrow_type = NANOARROW_TYPE_NA + ArrowTimeUnit time_unit = NANOARROW_TIME_UNIT_SECOND uint8_t py_type_num = self._py_type_num uint32_t db_type_num = self.dbtype.num + if db_type_num == DB_TYPE_NUM_NUMBER: if py_type_num == PY_TYPE_NUM_DECIMAL \ and self.precision > 0 and self.precision <= 38: - self._arrow_type = NANOARROW_TYPE_DECIMAL128 + arrow_type = NANOARROW_TYPE_DECIMAL128 elif py_type_num == PY_TYPE_NUM_STR: - self._arrow_type = NANOARROW_TYPE_STRING + arrow_type = NANOARROW_TYPE_STRING elif py_type_num == PY_TYPE_NUM_INT and self.scale == 0 \ and 0 < self.precision <= 18: - self._arrow_type = NANOARROW_TYPE_INT64 + arrow_type = NANOARROW_TYPE_INT64 else: - self._arrow_type = NANOARROW_TYPE_DOUBLE + arrow_type = NANOARROW_TYPE_DOUBLE elif db_type_num in (DB_TYPE_NUM_CHAR, DB_TYPE_NUM_VARCHAR, DB_TYPE_NUM_NCHAR, DB_TYPE_NUM_NVARCHAR): - self._arrow_type = NANOARROW_TYPE_STRING + arrow_type = NANOARROW_TYPE_STRING elif db_type_num == DB_TYPE_NUM_BINARY_FLOAT: - self._arrow_type = NANOARROW_TYPE_FLOAT + arrow_type = NANOARROW_TYPE_FLOAT elif db_type_num == DB_TYPE_NUM_BINARY_DOUBLE: - self._arrow_type = NANOARROW_TYPE_DOUBLE + arrow_type = NANOARROW_TYPE_DOUBLE elif db_type_num == DB_TYPE_NUM_BOOLEAN: - self._arrow_type = NANOARROW_TYPE_BOOL + arrow_type = NANOARROW_TYPE_BOOL elif db_type_num in (DB_TYPE_NUM_DATE, DB_TYPE_NUM_TIMESTAMP, DB_TYPE_NUM_TIMESTAMP_LTZ, DB_TYPE_NUM_TIMESTAMP_TZ): - self._arrow_type = NANOARROW_TYPE_TIMESTAMP + arrow_type = NANOARROW_TYPE_TIMESTAMP + if self.scale > 0 and self.scale <= 3: + time_unit = NANOARROW_TIME_UNIT_MILLI + elif self.scale > 3 and self.scale <= 6: + time_unit = NANOARROW_TIME_UNIT_MICRO + elif self.scale > 6 and self.scale <= 9: + time_unit = NANOARROW_TIME_UNIT_NANO elif db_type_num == DB_TYPE_NUM_LONG_RAW: - self._arrow_type = NANOARROW_TYPE_LARGE_BINARY + arrow_type = NANOARROW_TYPE_LARGE_BINARY elif db_type_num in (DB_TYPE_NUM_LONG_VARCHAR, DB_TYPE_NUM_LONG_NVARCHAR): - self._arrow_type = NANOARROW_TYPE_LARGE_STRING + arrow_type = NANOARROW_TYPE_LARGE_STRING elif db_type_num == DB_TYPE_NUM_RAW: - self._arrow_type = NANOARROW_TYPE_BINARY + arrow_type = NANOARROW_TYPE_BINARY elif db_type_num == DB_TYPE_NUM_VECTOR: if self.vector_flags & VECTOR_META_FLAG_SPARSE_VECTOR: - self._arrow_type = NANOARROW_TYPE_STRUCT + arrow_type = NANOARROW_TYPE_STRUCT + else: + arrow_type = NANOARROW_TYPE_LIST + if self.vector_format == VECTOR_FORMAT_FLOAT32: + child_arrow_type = NANOARROW_TYPE_FLOAT + elif self.vector_format == VECTOR_FORMAT_FLOAT64: + child_arrow_type = NANOARROW_TYPE_DOUBLE + elif self.vector_format == VECTOR_FORMAT_INT8: + child_arrow_type = NANOARROW_TYPE_INT8 + elif self.vector_format == VECTOR_FORMAT_BINARY: + child_arrow_type = NANOARROW_TYPE_UINT8 else: - self._arrow_type = NANOARROW_TYPE_LIST + errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT) else: errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_TYPE, db_type_name=self.dbtype.name) + self._schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + self._schema_impl.populate_from_metadata( + arrow_type, + self.name, + self.precision, + self.scale, + time_unit, + child_arrow_type, + ) + cdef OracleMetadata copy(self): """ Create a copy of the metadata and return it. @@ -125,43 +154,44 @@ cdef class OracleMetadata: return metadata @staticmethod - cdef OracleMetadata from_arrow_array(ArrowArrayImpl array): + cdef OracleMetadata from_arrow_schema(ArrowSchemaImpl schema_impl): """ Returns a new OracleMetadata instance with attributes set from an Arrow array. """ - cdef OracleMetadata metadata = OracleMetadata.__new__(OracleMetadata) - metadata.name = array.name - if array.arrow_type in (NANOARROW_TYPE_DECIMAL128, - NANOARROW_TYPE_INT64): + cdef: + OracleMetadata metadata = OracleMetadata.__new__(OracleMetadata) + ArrowType arrow_type = schema_impl.arrow_type + if arrow_type in (NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_INT64): metadata.dbtype = DB_TYPE_NUMBER - elif array.arrow_type == NANOARROW_TYPE_STRING: + elif arrow_type == NANOARROW_TYPE_STRING: metadata.dbtype = DB_TYPE_VARCHAR - elif array.arrow_type in (NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_FIXED_SIZE_BINARY): + elif arrow_type in (NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_FIXED_SIZE_BINARY): metadata.dbtype = DB_TYPE_RAW - elif array.arrow_type == NANOARROW_TYPE_FLOAT: + elif arrow_type == NANOARROW_TYPE_FLOAT: metadata.dbtype = DB_TYPE_BINARY_FLOAT - elif array.arrow_type == NANOARROW_TYPE_DOUBLE: + elif arrow_type == NANOARROW_TYPE_DOUBLE: metadata.dbtype = DB_TYPE_BINARY_DOUBLE - elif array.arrow_type == NANOARROW_TYPE_BOOL: + elif arrow_type == NANOARROW_TYPE_BOOL: metadata.dbtype = DB_TYPE_BOOLEAN - elif array.arrow_type == NANOARROW_TYPE_TIMESTAMP: + elif arrow_type == NANOARROW_TYPE_TIMESTAMP: metadata.dbtype = DB_TYPE_TIMESTAMP - elif array.arrow_type == NANOARROW_TYPE_LARGE_STRING: + elif arrow_type == NANOARROW_TYPE_LARGE_STRING: metadata.dbtype = DB_TYPE_LONG - elif array.arrow_type == NANOARROW_TYPE_LARGE_BINARY: + elif arrow_type == NANOARROW_TYPE_LARGE_BINARY: metadata.dbtype = DB_TYPE_LONG_RAW - elif array.arrow_type in (NANOARROW_TYPE_LIST, - NANOARROW_TYPE_STRUCT, - NANOARROW_TYPE_FIXED_SIZE_LIST): + elif arrow_type in (NANOARROW_TYPE_LIST, + NANOARROW_TYPE_STRUCT, + NANOARROW_TYPE_FIXED_SIZE_LIST): metadata.dbtype = DB_TYPE_VECTOR else: - errors._raise_err(errors.ERR_UNEXPECTED_DATA, - data=array.arrow_type) - metadata._arrow_type = array.arrow_type - metadata.precision = array.precision - metadata.scale = array.scale + errors._raise_err(errors.ERR_UNEXPECTED_DATA, data=arrow_type) + metadata._schema_impl = schema_impl + metadata.name = schema_impl.name + metadata.precision = schema_impl.precision + metadata.scale = schema_impl.scale return metadata @staticmethod diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 149d907e..20e9b616 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -252,43 +252,10 @@ cdef class BaseVarImpl: Creates an Arrow array based on the type information selected by the user. """ - cdef: - ArrowTimeUnit time_unit = NANOARROW_TIME_UNIT_SECOND - ArrowType child_arrow_type = NANOARROW_TYPE_NA - - self.metadata._set_arrow_type() - if self.metadata._arrow_type == NANOARROW_TYPE_TIMESTAMP: - if self.metadata.scale > 0 and self.metadata.scale <= 3: - time_unit = NANOARROW_TIME_UNIT_MILLI - elif self.metadata.scale > 3 and self.metadata.scale <= 6: - time_unit = NANOARROW_TIME_UNIT_MICRO - elif self.metadata.scale > 6 and self.metadata.scale <= 9: - time_unit = NANOARROW_TIME_UNIT_NANO - - if self.metadata._arrow_type in ( - NANOARROW_TYPE_LIST, - NANOARROW_TYPE_STRUCT - ): - if self.metadata.vector_format == VECTOR_FORMAT_FLOAT32: - child_arrow_type = NANOARROW_TYPE_FLOAT - elif self.metadata.vector_format == VECTOR_FORMAT_FLOAT64: - child_arrow_type = NANOARROW_TYPE_DOUBLE - elif self.metadata.vector_format == VECTOR_FORMAT_INT8: - child_arrow_type = NANOARROW_TYPE_INT8 - elif self.metadata.vector_format == VECTOR_FORMAT_BINARY: - child_arrow_type = NANOARROW_TYPE_UINT8 - else: - errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT) - + if self.metadata._schema_impl is None: + self.metadata._set_arrow_schema() self._arrow_array = ArrowArrayImpl.__new__(ArrowArrayImpl) - self._arrow_array.populate_from_metadata( - arrow_type=self.metadata._arrow_type, - name=self.metadata.name, - precision=self.metadata.precision, - scale=self.metadata.scale, - time_unit=time_unit, - child_arrow_type=child_arrow_type, - ) + self._arrow_array.populate_from_schema(self.metadata._schema_impl) cdef int _finalize_init(self) except -1: """ @@ -359,13 +326,6 @@ cdef class BaseVarImpl: self.metadata.buffer_size = 0 self.metadata._finalize_init() - cdef int _set_metadata_from_arrow_array(self, - ArrowArrayImpl array) except -1: - """ - Sets the type and size of the variable given an Arrow Array. - """ - self.metadata = OracleMetadata.from_arrow_array(array) - cdef int _set_metadata_from_type(self, object typ) except -1: """ Sets the type and size of the variable given a Python type. From 597a4d4ab240933ce68fdfc40caf9d735b0154d8 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:28:03 -0600 Subject: [PATCH 194/239] Added support for all of the signed and unsigned fixed width integer types when ingesting data frames supporting the Arrow PyCapsule interface. Previously only int64 was supported. --- doc/src/release_notes.rst | 3 ++ src/oracledb/arrow_impl.pxd | 10 ++++- src/oracledb/base_impl.pyx | 6 +++ src/oracledb/impl/arrow/array.pyx | 39 +++++++++++++++++--- src/oracledb/impl/arrow/schema.pyx | 7 ++++ src/oracledb/impl/base/converters.pyx | 35 ++++++++++++++---- src/oracledb/impl/base/metadata.pyx | 13 ++++++- tests/sql/create_schema.sql | 1 + tests/test_8900_dataframe_ingestion.py | 39 ++++++++++++++++++++ tests/test_9000_dataframe_ingestion_async.py | 39 ++++++++++++++++++++ 10 files changed, 175 insertions(+), 17 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 2d9b34ff..b8086b41 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -37,6 +37,9 @@ Thick Mode Changes Common Changes ++++++++++++++ +#) Added support for all of the signed and unsigned fixed width integer types + when ingesting data frames supporting the Arrow PyCapsule interface. + Previously only int64 was supported. #) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to the methods used for fetching rows or dataframes from the database. Note that for the creation of pipeline operations, if these parameters are not diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index 49d7276e..8223350f 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -68,6 +68,8 @@ cdef extern from "nanoarrow.h": NANOARROW_TYPE_FIXED_SIZE_LIST NANOARROW_TYPE_FLOAT NANOARROW_TYPE_INT8 + NANOARROW_TYPE_INT16 + NANOARROW_TYPE_INT32 NANOARROW_TYPE_INT64 NANOARROW_TYPE_LARGE_BINARY NANOARROW_TYPE_LARGE_STRING @@ -77,7 +79,9 @@ cdef extern from "nanoarrow.h": NANOARROW_TYPE_STRUCT NANOARROW_TYPE_TIMESTAMP NANOARROW_TYPE_UINT8 + NANOARROW_TYPE_UINT16 NANOARROW_TYPE_UINT32 + NANOARROW_TYPE_UINT64 NANOARROW_TYPE_UNINITIALIZED cpdef enum ArrowTimeUnit: @@ -139,10 +143,12 @@ cdef class ArrowArrayImpl: double* value) except -1 cdef int get_float(self, int64_t index, bint* is_null, float* value) except -1 - cdef int get_int64(self, int64_t index, bint* is_null, - int64_t* value) except -1 + cdef int get_int(self, ArrowType arrow_type, int64_t index, bint* is_null, + int64_t* value) except -1 cdef int get_length(self, int64_t* length) except -1 cdef object get_sparse_vector(self, int64_t index, bint* is_null) + cdef int get_uint(self, ArrowType arrow_type, int64_t index, bint* is_null, + uint64_t* value) except -1 cdef object get_vector(self, int64_t index, bint* is_null) cdef int populate_from_array(self, ArrowSchema* schema, ArrowArray* array) except -1 diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index a0553d4e..208a27f7 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -41,6 +41,7 @@ from libc.stdint cimport UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX from libc.stdlib cimport atoi, atof from libc.string cimport memcpy from cpython cimport array +from cpython.conversion cimport PyOS_snprintf from .constants import VECTOR_META_FLAG_SPARSE_VECTOR @@ -59,6 +60,8 @@ from .arrow_impl cimport ( NANOARROW_TYPE_FIXED_SIZE_LIST, NANOARROW_TYPE_FLOAT, NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, NANOARROW_TYPE_INT64, NANOARROW_TYPE_LIST, NANOARROW_TYPE_LARGE_BINARY, @@ -67,6 +70,9 @@ from .arrow_impl cimport ( NANOARROW_TYPE_STRUCT, NANOARROW_TYPE_TIMESTAMP, NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, ArrowArrayImpl, ) diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 1edc25b9..83f6ffca 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -351,16 +351,24 @@ cdef class ArrowArrayImpl: ptr = self.arrow_array.buffers[1] value[0] = ptr[index] - cdef int get_int64(self, int64_t index, bint* is_null, - int64_t* value) except -1: + cdef int get_int(self, ArrowType arrow_type, int64_t index, bint* is_null, + int64_t* value) except -1: """ - Return an int64_t value at the specified index from the Arrow array. + Return an int64_t value at the specified index from the Arrow array + for all signed integer types. """ - cdef int64_t* ptr + cdef const void* ptr self._get_is_null(index, is_null) if not is_null[0]: - ptr = self.arrow_array.buffers[1] - value[0] = ptr[index] + ptr = self.arrow_array.buffers[1] + if arrow_type == NANOARROW_TYPE_INT8: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_INT16: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_INT32: + value[0] = ( ptr)[index] + else: + value[0] = ( ptr)[index] cdef int get_length(self, int64_t* length) except -1: """ @@ -415,6 +423,25 @@ cdef class ArrowArrayImpl: num_elements * self.schema_impl.child_element_size) return (num_dimensions, indices, values) + cdef int get_uint(self, ArrowType arrow_type, int64_t index, bint* is_null, + uint64_t* value) except -1: + """ + Return a uint64_t value at the specified index from the Arrow array + for all unsigned integer types. + """ + cdef const void* ptr + self._get_is_null(index, is_null) + if not is_null[0]: + ptr = self.arrow_array.buffers[1] + if arrow_type == NANOARROW_TYPE_UINT8: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_UINT16: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_UINT32: + value[0] = ( ptr)[index] + else: + value[0] = ( ptr)[index] + cdef object get_vector(self, int64_t index, bint* is_null): """ Return a vector value at the specified index from the Arrow array. diff --git a/src/oracledb/impl/arrow/schema.pyx b/src/oracledb/impl/arrow/schema.pyx index 3cf5c9f9..28f6b50f 100644 --- a/src/oracledb/impl/arrow/schema.pyx +++ b/src/oracledb/impl/arrow/schema.pyx @@ -146,10 +146,17 @@ cdef class ArrowSchemaImpl: NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FIXED_SIZE_BINARY, NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, NANOARROW_TYPE_INT64, NANOARROW_TYPE_LARGE_BINARY, NANOARROW_TYPE_LARGE_STRING, NANOARROW_TYPE_STRING, + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, ) and not ( schema_view.type == NANOARROW_TYPE_STRUCT and self._is_sparse_vector() diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 6c20cd8a..bf5fa3ce 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -37,18 +37,39 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, Converts the value stored in Arrow format to an OracleData structure. """ cdef: - int64_t int64_value, days, seconds, useconds + int64_t int_value, days, seconds, useconds SparseVectorImpl sparse_impl ArrowType arrow_type + uint64_t uint_value OracleRawBytes* rb tuple sparse_info bytes temp_bytes + ssize_t buf_len + char buf[21] arrow_type = metadata._schema_impl.arrow_type - if arrow_type == NANOARROW_TYPE_INT64: - array_impl.get_int64(array_index, &data.is_null, &int64_value) + if arrow_type in ( + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + ): + array_impl.get_int(arrow_type, array_index, &data.is_null, &int_value) + if not data.is_null: + buf_len = PyOS_snprintf(buf, sizeof(buf), "%lld", int_value) + temp_bytes = buf[:buf_len] + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif arrow_type in ( + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, + ): + array_impl.get_uint(arrow_type, array_index, &data.is_null, &uint_value) if not data.is_null: - temp_bytes = str(int64_value).encode() + buf_len = PyOS_snprintf(buf, sizeof(buf), "%llu", uint_value) + temp_bytes = buf[:buf_len] convert_bytes_to_oracle_data(&data.buffer, temp_bytes) return temp_bytes elif arrow_type == NANOARROW_TYPE_DOUBLE: @@ -70,10 +91,10 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, array_impl.get_bytes(array_index, &data.is_null, &rb.ptr, &rb.num_bytes) elif arrow_type == NANOARROW_TYPE_TIMESTAMP: - array_impl.get_int64(array_index, &data.is_null, &int64_value) + array_impl.get_int(arrow_type, array_index, &data.is_null, &int_value) if not data.is_null: - seconds = int64_value // array_impl.schema_impl.time_factor - useconds = int64_value % array_impl.schema_impl.time_factor + seconds = int_value // array_impl.schema_impl.time_factor + useconds = int_value % array_impl.schema_impl.time_factor days = seconds // (24 * 60 * 60) seconds = seconds % (24 * 60 * 60) if array_impl.schema_impl.time_factor == 1_000: diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 8f8af5ea..36167d7f 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -162,8 +162,17 @@ cdef class OracleMetadata: cdef: OracleMetadata metadata = OracleMetadata.__new__(OracleMetadata) ArrowType arrow_type = schema_impl.arrow_type - if arrow_type in (NANOARROW_TYPE_DECIMAL128, - NANOARROW_TYPE_INT64): + if arrow_type in ( + NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, + ): metadata.dbtype = DB_TYPE_NUMBER elif arrow_type == NANOARROW_TYPE_STRING: metadata.dbtype = DB_TYPE_VARCHAR diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index 6ffc4076..da4b29c7 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -410,6 +410,7 @@ create table &main_user..TestDataframe ( CreditScore number(3, 0), LastUpdated timestamp, DecimalData number(15, 4), + IntegerData number(20), FloatData binary_float, DoubleData binary_double, RawData raw(100), diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index 95428cb3..02eefb33 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -859,6 +859,45 @@ def test_8918(self): fetched_df = pyarrow.table(odf) self.assertTrue(fetched_df.equals(df)) + def test_8919(self): + "8919 - test ingestion with various integer data types" + scenarios = [ + ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), + ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), + ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), + ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), + ([0, 2**7, 2**8 - 1], pyarrow.uint8()), + ([0, 2**15, 2**16 - 1], pyarrow.uint16()), + ([0, 2**31, 2**32 - 1], pyarrow.uint32()), + ([0, 2**63, 2**64 - 1], pyarrow.uint64()), + ] + names = ["Id", "IntegerData"] + for values, dtype in scenarios: + with self.subTest(dtype=str(dtype)): + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, IntegerData) + values (:1, :2) + """, + df, + ) + self.conn.commit() + self.cursor.execute( + """ + select to_char(IntegerData) + from TestDataFrame + order by Id + """ + ) + fetched_values = [int(s) for s, in self.cursor] + self.assertEqual(fetched_values, values) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 5179dd74..96fbd9ea 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -860,6 +860,45 @@ async def test_9018(self): fetched_df = pyarrow.table(odf) self.assertTrue(fetched_df.equals(df)) + async def test_9019(self): + "9019 - test ingestion with various integer data types" + scenarios = [ + ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), + ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), + ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), + ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), + ([0, 2**7, 2**8 - 1], pyarrow.uint8()), + ([0, 2**15, 2**16 - 1], pyarrow.uint16()), + ([0, 2**31, 2**32 - 1], pyarrow.uint32()), + ([0, 2**63, 2**64 - 1], pyarrow.uint64()), + ] + names = ["Id", "IntegerData"] + for values, dtype in scenarios: + with self.subTest(dtype=str(dtype)): + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, IntegerData) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + await self.cursor.execute( + """ + select to_char(IntegerData) + from TestDataFrame + order by Id + """ + ) + fetched_values = [int(s) async for s, in self.cursor] + self.assertEqual(fetched_values, values) + if __name__ == "__main__": test_env.run_test_cases() From 7a1e9e0be9b848be795185c1a9e103b13c38f6f2 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:34:53 -0600 Subject: [PATCH 195/239] Fixed bug when attempting to convert an integer that cannot be represented as a native C int value to an Arrow data frame. --- doc/src/release_notes.rst | 2 ++ src/oracledb/base_impl.pxd | 2 +- src/oracledb/base_impl.pyx | 3 ++- src/oracledb/errors.py | 8 +++++++ src/oracledb/impl/base/converters.pyx | 22 ++++++++++++++++---- src/oracledb/impl/base/decoders.pyx | 4 ++++ src/oracledb/impl/thick/var.pyx | 2 +- tests/sql/create_schema.sql | 3 ++- tests/test_8000_dataframe.py | 15 +++++++++++++ tests/test_8100_dataframe_async.py | 15 +++++++++++++ tests/test_8900_dataframe_ingestion.py | 6 +++--- tests/test_9000_dataframe_ingestion_async.py | 6 +++--- 12 files changed, 74 insertions(+), 14 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b8086b41..d213a7ce 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -57,6 +57,8 @@ Common Changes (`issue 525 `__). #) Pin Cython to 3.1.x instead of 3.1.0 as requested (`issue 530 `__). +#) Fixed bug when attempting to convert an integer that cannot be represented + as a native C ``int`` value to an Arrow data frame. #) API documentation is now generated from the source code. #) Internal change: typing_extensions is now a dependency. diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 10437495..0c986abb 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -958,7 +958,7 @@ cdef struct OracleNumber: bint is_integer bint is_max_negative_value uint8_t num_chars - char_type chars[172] + char_type chars[173] cdef struct OracleRawBytes: diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 208a27f7..92b17320 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -35,10 +35,11 @@ cimport cython cimport cpython cimport cpython.datetime as cydatetime +from libc cimport errno from libc.stdint cimport int8_t, int16_t, int32_t, int64_t from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from libc.stdint cimport UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX -from libc.stdlib cimport atoi, atof +from libc.stdlib cimport strtod, strtoll from libc.string cimport memcpy from cpython cimport array from cpython.conversion cimport PyOS_snprintf diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 8bacd232..175e0778 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -364,6 +364,8 @@ def _raise_not_supported(feature: str) -> None: ERR_EXCEEDED_IDLE_TIME = 4033 ERR_INVALID_PASSWORD_TYPE = 4034 ERR_INVALID_SERVER_RESPONSE = 4035 +ERR_CANNOT_CONVERT_TO_ARROW_INTEGER = 4036 +ERR_CANNOT_CONVERT_TO_ARROW_DOUBLE = 4037 # error numbers that result in InternalError ERR_MESSAGE_TYPE_UNKNOWN = 5000 @@ -559,6 +561,12 @@ def _raise_not_supported(feature: str) -> None: "insufficient to hold {required_buffer_len} bytes" ), ERR_CALL_TIMEOUT_EXCEEDED: "call timeout of {timeout} ms exceeded", + ERR_CANNOT_CONVERT_TO_ARROW_DOUBLE: ( + "{value} cannot be converted to an Arrow double" + ), + ERR_CANNOT_CONVERT_TO_ARROW_INTEGER: ( + "{value} cannot be converted to an Arrow integer" + ), ERR_CANNOT_PARSE_CONNECT_STRING: 'cannot parse connect string "{data}"', ERR_COLUMN_TRUNCATED: ( "column truncated to {col_value_len} {unit}. " diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index bf5fa3ce..17148f8f 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -231,11 +231,18 @@ cdef int convert_number_to_arrow_double(ArrowArrayImpl array_impl, """ Converts a NUMBER value stored in the buffer to Arrow DOUBLE. """ - cdef OracleNumber *value = &buffer.as_number + cdef: + OracleNumber *value = &buffer.as_number + double double_value if value.is_max_negative_value: array_impl.append_double(-1.0e126) else: - array_impl.append_double(atof(value.chars[:value.num_chars])) + errno.errno = 0 + double_value = strtod(( value.chars), NULL) + if errno.errno != 0: + errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_DOUBLE, + value=value.chars[:value.num_chars].decode()) + array_impl.append_double(double_value) cdef int convert_number_to_arrow_int64(ArrowArrayImpl array_impl, @@ -243,8 +250,15 @@ cdef int convert_number_to_arrow_int64(ArrowArrayImpl array_impl, """ Converts a NUMBER value stored in the buffer to Arrow INT64. """ - cdef OracleNumber *value = &buffer.as_number - array_impl.append_int64(atoi(value.chars[:value.num_chars])) + cdef: + OracleNumber *value = &buffer.as_number + int64_t int64_value + errno.errno = 0 + int64_value = strtoll(( value.chars), NULL, 0) + if errno.errno != 0: + errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_INTEGER, + value=value.chars[:value.num_chars].decode()) + array_impl.append_int64(int64_value) cdef object convert_number_to_python_decimal(OracleDataBuffer *buffer): diff --git a/src/oracledb/impl/base/decoders.pyx b/src/oracledb/impl/base/decoders.pyx index 373d9356..11f3894d 100644 --- a/src/oracledb/impl/base/decoders.pyx +++ b/src/oracledb/impl/base/decoders.pyx @@ -196,6 +196,7 @@ cdef int decode_number(const uint8_t* ptr, ssize_t num_bytes, if is_positive: output.num_chars = 1 output.chars[0] = 48 # zero + output.chars[1] = 0 # null terminator else: output.is_max_negative_value = True return 0 @@ -270,6 +271,9 @@ cdef int decode_number(const uint8_t* ptr, ssize_t num_bytes, output.chars[output.num_chars] = 48 # zero output.num_chars += 1 + # include null terminator for use by strtoll() and strtoull() + output.chars[output.num_chars] = 0 + cdef inline uint16_t decode_uint16be(const char_type *buf): """ diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 16d09fc3..9b9d79ae 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -490,7 +490,7 @@ cdef class ThickVarImpl(BaseVarImpl): ora_data.buffer.as_number.is_integer = \ memchr(as_bytes.ptr, b'.', as_bytes.length) == NULL; memcpy(ora_data.buffer.as_number.chars, as_bytes.ptr, - as_bytes.length); + as_bytes.length + 1); ora_data.buffer.as_number.num_chars = as_bytes.length; elif ora_type_num == DPI_ORACLE_TYPE_VECTOR: vector = _convert_vector_to_python(data.value.asVector) diff --git a/tests/sql/create_schema.sql b/tests/sql/create_schema.sql index da4b29c7..0e144b9b 100644 --- a/tests/sql/create_schema.sql +++ b/tests/sql/create_schema.sql @@ -410,7 +410,8 @@ create table &main_user..TestDataframe ( CreditScore number(3, 0), LastUpdated timestamp, DecimalData number(15, 4), - IntegerData number(20), + IntegerData number(15), + LongIntegerData number(38), FloatData binary_float, DoubleData binary_double, RawData raw(100), diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index d8c7fbec..4837c67e 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -1868,6 +1868,21 @@ def test_8076(self): ] self.__test_df_interop(data) + def test_8077(self): + "8077 - test fetching large integers" + data = (-(2**40), 2**41) + ora_df = self.conn.fetch_df_all( + """ + select + cast(:1 as number(15)), + cast(:2 as number(15)) + from dual + """, + data, + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual([data], self.__get_data_from_df(fetched_df)) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 62bb8c17..51d416a2 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -1638,6 +1638,21 @@ async def test_8165(self): ] await self.__test_df_interop(data) + async def test_8166(self): + "8166 - test fetching large integers" + data = (-(2**40), 2**41) + ora_df = await self.conn.fetch_df_all( + """ + select + cast(:1 as number(15)), + cast(:2 as number(15)) + from dual + """, + data, + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + self.assertEqual([data], self.__get_data_from_df(fetched_df)) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index 02eefb33..566aaee1 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -871,7 +871,7 @@ def test_8919(self): ([0, 2**31, 2**32 - 1], pyarrow.uint32()), ([0, 2**63, 2**64 - 1], pyarrow.uint64()), ] - names = ["Id", "IntegerData"] + names = ["Id", "LongIntegerData"] for values, dtype in scenarios: with self.subTest(dtype=str(dtype)): arrays = [ @@ -882,7 +882,7 @@ def test_8919(self): self.cursor.execute("delete from TestDataFrame") self.cursor.executemany( """ - insert into TestDataFrame (Id, IntegerData) + insert into TestDataFrame (Id, LongIntegerData) values (:1, :2) """, df, @@ -890,7 +890,7 @@ def test_8919(self): self.conn.commit() self.cursor.execute( """ - select to_char(IntegerData) + select to_char(LongIntegerData) from TestDataFrame order by Id """ diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 96fbd9ea..6c515f8e 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -872,7 +872,7 @@ async def test_9019(self): ([0, 2**31, 2**32 - 1], pyarrow.uint32()), ([0, 2**63, 2**64 - 1], pyarrow.uint64()), ] - names = ["Id", "IntegerData"] + names = ["Id", "LongIntegerData"] for values, dtype in scenarios: with self.subTest(dtype=str(dtype)): arrays = [ @@ -883,7 +883,7 @@ async def test_9019(self): await self.cursor.execute("delete from TestDataFrame") await self.cursor.executemany( """ - insert into TestDataFrame (Id, IntegerData) + insert into TestDataFrame (Id, LongIntegerData) values (:1, :2) """, df, @@ -891,7 +891,7 @@ async def test_9019(self): await self.conn.commit() await self.cursor.execute( """ - select to_char(IntegerData) + select to_char(LongIntegerData) from TestDataFrame order by Id """ From a13093a424a9a5889055f3baf61f0d6d98d581a5 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:36:02 -0600 Subject: [PATCH 196/239] Documentation updates. --- doc/src/release_notes.rst | 17 +- doc/src/user_guide/bind.rst | 2 +- doc/src/user_guide/connection_handling.rst | 292 ++++++++++++--------- doc/src/user_guide/installation.rst | 289 +++++++++++--------- doc/src/user_guide/sql_execution.rst | 57 ++-- doc/src/user_guide/tuning.rst | 2 +- src/oracledb/connection.py | 4 +- utils/templates/connection.py | 4 +- 8 files changed, 388 insertions(+), 279 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index d213a7ce..e43e9573 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,10 +19,11 @@ oracledb `3.4.0 ` and @@ -53,10 +54,10 @@ Common Changes support and :ref:`Cloud Native Authentication ` support (`issue 512 `__). -#) Fixed bug when attempting to execute an empty statement - (`issue 525 `__). #) Pin Cython to 3.1.x instead of 3.1.0 as requested (`issue 530 `__). +#) Fixed bug when attempting to execute an empty statement + (`issue 525 `__). #) Fixed bug when attempting to convert an integer that cannot be represented as a native C ``int`` value to an Arrow data frame. #) API documentation is now generated from the source code. diff --git a/doc/src/user_guide/bind.rst b/doc/src/user_guide/bind.rst index b7ba8c48..6e826a17 100644 --- a/doc/src/user_guide/bind.rst +++ b/doc/src/user_guide/bind.rst @@ -31,7 +31,7 @@ more than once with different data values. If you do not use bind variables, Oracle must reparse and cache multiple statements. When using bind variables, Oracle Database may be able to reuse the statement execution plan and context. -.. warning:: +.. important:: Never concatenate or interpolate user data into SQL statements: diff --git a/doc/src/user_guide/connection_handling.rst b/doc/src/user_guide/connection_handling.rst index bc1b30df..122a46f9 100644 --- a/doc/src/user_guide/connection_handling.rst +++ b/doc/src/user_guide/connection_handling.rst @@ -240,8 +240,8 @@ the database service you wanted ("doesnotexist") does not exist there. Technically, the error means the listener does not know about the service at the moment. So you might also get this error if the database is currently restarting. -This error is similar to the ``ORA-12514`` error that you may see when connecting -with python-oracledb in Thick mode, or with some other Oracle tools. +This error is similar to the ``ORA-12514`` error that you may see when +connecting with python-oracledb Thick mode, or with some other Oracle tools. The solution is to use a valid service name in the connection string. You can: @@ -924,8 +924,8 @@ The following configuration providers are supported by python-oracledb: - :ref:`Microsoft Azure App Centralized Configuration Provider ` -To use python-oracledb :ref:`Centralized Configuration Provider -` functionality in Thick mode, you should set +To use :ref:`Centralized Configuration Provider ` +functionality in python-oracledb Thick mode, you should set :attr:`oracledb.defaults.thick_mode_dsn_passthrough ` to *False*. Alternatively use :meth:`ConnectParams.parse_connect_string()`, see :ref:`usingconnparams`. @@ -5000,7 +5000,7 @@ You can encrypt data transferred between the Oracle Database and python-oracledb so that unauthorized parties are not able to view plain text values as the data passes over the network. -Both python-oracledb Thin and Thick modes support TLS. Refer to the `Oracle +Both python-oracledb Thin and Thick modes support TLS. Refer to the `Oracle Database Security Guide `__ for more configuration information. @@ -5123,79 +5123,103 @@ Connecting to Oracle Cloud Autonomous Databases ================================================ Python applications can connect to Oracle Autonomous Database (ADB) in Oracle -Cloud using one-way TLS (Transport Layer Security) or mutual TLS -(mTLS). One-way TLS and mTLS provide enhanced security for authentication and -encryption. +Cloud using one-way TLS (Transport Layer Security) or mutual TLS (mTLS), +depending on how the database instance is configured. One-way TLS and mTLS +provide enhanced security for authentication and encryption. A database username and password are still required for your application -connections. If you need to create a new database schema so you do not login -as the privileged ADMIN user, refer to the relevant Oracle Cloud documentation, -for example see `Create Database Users `__ in the -Oracle Autonomous Database manual. +connections. Refer to the relevant Oracle Cloud documentation, for example see +`Create Database Users `__. .. _onewaytls: One-way TLS Connection to Oracle Autonomous Database ---------------------------------------------------- -With one-way TLS, python-oracledb applications can connect to Oracle ADB -without using a wallet. Both Thin and Thick modes of the python-oracledb -driver support one-way TLS. Applications that use python-oracledb Thick mode -can connect to the Oracle ADB through one-way TLS only when using Oracle Client -library versions 19.14 (or later) or 21.5 (or later). +With one-way TLS, the python-oracledb host machine must be in the Access +Control List (ACL) of the ADB instance. Applications then connect to Oracle ADB +by passing the database username, password, and appropriate connection +string. A wallet is not used. -To enable one-way TLS for an ADB instance, complete the following steps in an -Oracle Cloud console in the **Autonomous Database Information** section of the -ADB instance details: +Both python-oracledb Thin and Thick modes support one-way TLS. + +Allowing One-way TLS Access to Oracle Autonomous Database ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +To create an ADB instance that allows one-way TLS, choose the access setting +*Secure access from allowed IPs and VCNs only* in the Oracle Cloud console +during instance creation. Then specify the IP addresses, hostnames, CIDR +blocks, Virtual Cloud networks (VCN), or Virtual Cloud network OCIDs where +Python will be running. The ACL limits access to only the resources that have +been defined and blocks all other incoming traffic. + +Alternatively, to enable one-way TLS on an existing database, complete the +following steps in the Oracle Cloud console in the **Autonomous Database +Information** section of the ADB instance: 1. Click the **Edit** link next to *Access Control List* to update the Access - Control List (ACL). The **Edit Access Control List** dialog box is displayed. + Control List (ACL). -2. In the **Edit Access Control List** dialog box, select the type of address - list entries and the corresponding values. You can include the required IP - addresses, hostnames, or Virtual Cloud Networks (VCNs). The ACL limits - access to only the IP addresses or VCNs that have been defined and blocks - all other incoming traffic. +2. In the displayed **Edit Access Control List** dialog box, select the type of + address list entries and the corresponding values. You can include the IP + addresses, hostnames, CIDR blocks, Virtual Cloud networks (VCN), or Virtual + Cloud network OCIDs where Python will be running. 3. Navigate back to the ADB instance details page and click the **Edit** link - next to *Mutual TLS (mTLS) Authentication*. The **Edit Mutual TLS Authentication** - is displayed. + next to *Mutual TLS (mTLS) Authentication*. -4. In the **Edit Mutual TLS Authentication** dialog box, deselect the +4. In the displayed **Edit Mutual TLS Authentication** dialog box, deselect the **Require mutual TLS (mTLS) authentication** check box to disable the mTLS requirement on Oracle ADB and click **Save Changes**. -5. Navigate back to the ADB instance details page and click **DB Connection** on - the top of the page. A **Database Connection** dialog box is displayed. +Connecting with python-oracledb Thin or Thick modes using One-way TLS ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +When your database has been enabled to allow one-way TLS, you can connect with +python-oracledb by following these steps: -6. In the Database Connection dialog box, select TLS from the **Connection Strings** - drop-down list. +1. Navigate to the ADB instance details page on the Cloud console and click + **Database connection** at the top of the page. -7. Copy the appropriate Connection String of the database instance used by your application. +2. In the displayed **Database Connection** dialog box, select TLS from the + **Connection Strings** drop-down list. -Applications can connect to your Oracle ADB instance using the database -credentials and the copied :ref:`Connect Descriptor `. For +3. Copy the appropriate Connection String for the connection service level you + want. + +Applications can connect using database credentials and the copied +:ref:`connection string `. Do *not* pass wallet parameters. For example, to connect as the ADMIN user: .. code-block:: python - cs = '''(description = (retry_count=20)(retry_delay=3)(address=(protocol=tcps) - (port=1522)(host=xxx.oraclecloud.com))(connect_data=(service_name=xxx.adb.oraclecloud.com)) - (security=(ssl_server_dn_match=yes)(ssl_server_cert_dn="CN=xxx.oraclecloud.com, - O=Oracle Corporation, L=Redwood City, T=California, C=US")))''' + cs = '''(description= (retry_count=20)(retry_delay=3)(address=(protocol=tcps)(port=1521) + (host=adb.abcdef.oraclecloud.com)) + (connect_data=(service_name=abcde_mydb_high.adb.oraclecloud.com)) + (security=(ssl_server_dn_match=yes)))''' connection = oracledb.connect(user="admin", password=pw, dsn=cs) -You can download the ADB connection wallet using the **DB Connection** button -and extract the :ref:`tnsnames.ora ` file, or create one yourself -if you prefer to keep connections strings out of application code, see -:ref:`netservice`. +If you prefer to keep connection descriptors out of application code, you can +add the descriptor with a :ref:`TNS Alias ` to a :ref:`tnsnames.ora +` file, and use the TNS alias as the ``dsn`` value. + +Not having the ACL correctly configured is a common cause of connection +errors. To aid troubleshooting, remove ``(retry_count=20)(retry_delay=3)`` from +the connect descriptor so that errors are returned faster. If network +configuration issues are suspected then, for initial troubleshooting with a +disposable database, you can update the ACL to contain a CIDR block of +``0.0.0.0/0``, however this means *anybody* can attempt to connect to your +database so you should recreate the database immediately after identifying a +working, more restrictive ACL. -You may be interested in the blog post `Easy wallet-less connections to Oracle -Autonomous Databases in Python -`__. +To connect with python-oracledb Thick mode requires Oracle Client library +versions 19.14 (or later), or 21.5 (or later), or 23.3 (or later). If you have +also been experimenting with mTLS and your environment has ``sqlnet.ora`` and +``tnsnames.ora`` files set up, then remove these before using python-oracledb +Thick mode with one-way TLS to avoid configuration clashes. .. _twowaytls: @@ -5206,101 +5230,123 @@ To enable python-oracledb connections to Oracle Autonomous Database in Oracle Cloud using mTLS, a wallet needs to be downloaded from the cloud console. mTLS is sometimes called Two-way TLS. -Install the Wallet and Network Configuration Files +Allowing mTLS Access to Oracle Autonomous Database ++++++++++++++++++++++++++++++++++++++++++++++++++ -From the Oracle Cloud console for the database, download the wallet zip file -using the **DB Connection** button. The zip contains the wallet and network -configuration files. When downloading the zip, the cloud console will ask you -to create a wallet password. This password is used by python-oracledb in Thin -mode, but not in Thick mode. +When creating an ADB instance in the Oracle Cloud console, choose the access +setting "Secure access from everywhere". + +.. _getwallet: + +Downloading the Database Wallet ++++++++++++++++++++++++++++++++ + +After your Autonomous Database has been enabled to allow mTLS, download its +``wallet.zip`` file which contains the certificate and network configuration +files: + +1. Navigate to the ADB instance details page on the Oracle Cloud console and + click **Database connection** at the top of the page. + +2. In the displayed **Database Connection** dialog box, select the "Download + Wallet" button in the *Download client credentials (Wallet)* section. The + cloud console will ask you to create a wallet password. This password is + required by python-oracledb in Thin mode, but not used in Thick mode. -Note: keep wallet files in a secure location and only share them and the +**Note**: Keep wallet files in a secure location and only share them and the password with authorized users. -**In python-oracledb Thin mode** +Connecting with python-oracledb Thin mode using mTLS +++++++++++++++++++++++++++++++++++++++++++++++++++++ -For python-oracledb in Thin mode, only two files from the zip are needed: +For python-oracledb Thin mode, unzip the :ref:`wallet.zip ` file. +Only two files from it are needed: -- ``tnsnames.ora`` - Maps net service names used for application connection - strings to your database services +- ``tnsnames.ora`` - Maps TNS Aliases used for application connection strings + to your database services - ``ewallet.pem`` - Enables SSL/TLS connections in Thin mode. Keep this file secure If you do not have a PEM file, see :ref:`createpem`. -Unzip the wallet zip file and move the required files to a location such as +Move the two files to a directory that is accessible by your application. In +this example, the files are located in the same directory, ``/opt/OracleCloud/MYDB``. -Connection can be made using your database credentials and setting the ``dsn`` -parameter to the desired network alias from the :ref:`tnsnames.ora -` file. The ``config_dir`` parameter indicates the directory -containing :ref:`tnsnames.ora `. The ``wallet_location`` -parameter is the directory containing the PEM file. In this example the files -are in the same directory. The ``wallet_password`` parameter should be set to -the password created in the cloud console when downloading the wallet. It is -not the database password. For example, to connect as the ADMIN user using the -``mydb_low`` network service name: +A connection can be made by using your database credentials and setting the +``dsn`` parameter to the desired :ref:`TNS Alias ` from the +:ref:`tnsnames.ora ` file. The ``config_dir`` parameter indicates +the directory containing :ref:`tnsnames.ora `. The +``wallet_location`` parameter is the directory containing the PEM file. The +``wallet_password`` parameter should be set to the password created in the +cloud console when downloading the wallet. It is not the database user or ADMIN +password. For example, to connect as the ADMIN user using the ``mydb_low`` TNS +Alias: .. code-block:: python - connection = oracledb.connect(user="admin", password=pw, dsn="mydb_low", - config_dir="/opt/OracleCloud/MYDB", - wallet_location="/opt/OracleCloud/MYDB", - wallet_password=wp) + connection = oracledb.connect( + user="admin", + password=pw, # database password for ADMIN + dsn="mydb_low", # TNS Alias from tnsnames.ora + config_dir="/opt/OracleCloud/MYDB", # directory with tnsnames.ora + wallet_location="/opt/OracleCloud/MYDB", # directory with ewallet.pem + wallet_password=wp # not a database user password + ) -**In python-oracledb Thick mode** +Connecting with python-oracledb Thick mode using mTLS ++++++++++++++++++++++++++++++++++++++++++++++++++++++ -For python-oracledb in Thick mode, only these files from the zip are needed: +For python-oracledb Thick mode, unzip the :ref:`wallet.zip ` file. +Only three files from it are needed: -- ``tnsnames.ora`` - Maps net service names used for application connection - strings to your database services +- ``tnsnames.ora`` - Maps :ref:`TNS Aliases ` used for application + connection strings to your database services - ``sqlnet.ora`` - Configures Oracle Network settings -- ``cwallet.sso`` - Enables SSL/TLS connections in Thick mode. Keep this file - secure +- ``cwallet.sso`` - Enables SSL/TLS connections in python-oracledb Thick mode. + Keep this file secure -Unzip the wallet zip file. There are two options for placing the required -files: +There are two options for placing the required files: -- Move the three files to the ``network/admin`` directory of the client - libraries used by your application. For example if you are using Instant - Client 19c and it is in ``$HOME/instantclient_19_15``, then you would put the - wallet files in ``$HOME/instantclient_19_15/network/admin/``. +1. Move the three files to the ``network/admin`` directory of the client + libraries used by your application. For example, if you are using Oracle + Instant Client 23ai and it is in ``$HOME/instantclient_23_9``, then you would + put the wallet files in ``$HOME/instantclient_23_9/network/admin/``. - Connection can be made using your database credentials and setting the - ``dsn`` parameter to the desired network alias from the :ref:`tnsnames.ora - ` file. For example, to connect as the ADMIN user using the - ``mydb_low`` network service name: + A connection can be made using your database credentials and setting the + ``dsn`` parameter to the desired :ref:`TNS Alias ` from the + :ref:`tnsnames.ora ` file. For example, to connect as the ADMIN + user using the ``mydb_low`` TNS Alias: - .. code-block:: python + .. code-block:: python - connection = oracledb.connect(user="admin", password=pw, dsn="mydb_low") + connection = oracledb.connect(user="admin", password=pw, dsn="mydb_low") -- Alternatively, move the three files to any accessible directory, for example - ``/opt/OracleCloud/MYDB``. +2. Alternatively, move the three files to any accessible directory, for example + ``/opt/OracleCloud/MYDB``. - Then edit ``sqlnet.ora`` and change the wallet location directory to the - directory containing the ``cwallet.sso`` file. For example:: + Then edit ``sqlnet.ora`` and change the wallet location directory to the + directory containing the ``cwallet.sso`` file. For example:: - WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/opt/OracleCloud/MYDB"))) - SSL_SERVER_DN_MATCH=yes + WALLET_LOCATION = (SOURCE = (METHOD = file) (METHOD_DATA = (DIRECTORY="/opt/OracleCloud/MYDB"))) + SSL_SERVER_DN_MATCH=yes - Since the ``tnsnames.ora`` and ``sqlnet.ora`` files are not in the default - location, your application needs to indicate where they are, either with the - ``config_dir`` parameter to :meth:`oracledb.init_oracle_client()`, or using - the ``TNS_ADMIN`` environment variable. See :ref:`Optional Oracle Net - Configuration Files `. (Neither of these settings are needed, - and you do not need to edit ``sqlnet.ora``, if you have put all the files in - the ``network/admin`` directory.) + Since the ``tnsnames.ora`` and ``sqlnet.ora`` files are not in the default + location, your application needs to indicate where they are, either with the + ``config_dir`` parameter to :meth:`oracledb.init_oracle_client()`, or by + using the ``TNS_ADMIN`` environment variable. See :ref:`Optional Oracle Net + Configuration Files `. (Neither of these settings are needed, + and you do not need to edit ``sqlnet.ora``, if you have put all the files in + the ``network/admin`` directory.) - For example, to connect as the ADMIN user using the ``mydb_low`` network - service name: + For example, to connect as the ADMIN user using the ``mydb_low`` TNS + alias: - .. code-block:: python + .. code-block:: python - oracledb.init_oracle_client(config_dir="/opt/OracleCloud/MYDB") + oracledb.init_oracle_client(config_dir="/opt/OracleCloud/MYDB") - connection = oracledb.connect(user="admin", password=pw, dsn="mydb_low") + connection = oracledb.connect(user="admin", password=pw, dsn="mydb_low") In python-oracle Thick mode, to create mTLS connections in one Python process @@ -5317,6 +5363,8 @@ When python-oracledb is using Oracle Client libraries 19c, or later, you can optionally use :ref:`Easy Connect ` syntax to connect to Oracle Autonomous Database. +This section discuss the parameters for mTLS connection. + The mapping from the cloud :ref:`tnsnames.ora ` entries to an Easy Connect string is:: @@ -5338,11 +5386,12 @@ Then your applications can connect using the connection string: connection = oracledb.connect(user="hr", password=userpwd, dsn=dsn) The ``wallet_location`` parameter needs to be set to the directory containing -the ``cwallet.sso`` or ``ewallet.pem`` file from the wallet zip. The other -wallet files, including ``tnsnames.ora``, are not needed when you use the Easy -Connect syntax. +the ``cwallet.sso`` or ``ewallet.pem`` file extracted from the :ref:`wallet.zip +` file. The other files, including ``tnsnames.ora``, are not needed +when you use the Easy Connect syntax. -You can add other Easy Connect parameters to the connection string, for example:: +You can add other Easy Connect parameters to the connection string, for +example:: dsn = dsn + "&https_proxy=myproxy.example.com&https_proxy_port=80" @@ -5515,11 +5564,11 @@ connection strings, wallet locations, and wallet password (if required) in each wallet_password=walletpw) The ``config_dir`` parameter is the directory containing the :ref:`tnsnames.ora -` file. The ``wallet_location`` parameter is the directory -containing the ``ewallet.pem`` file. If you are using Oracle Autonomous +` file. The ``wallet_location`` parameter is the directory +containing the ``ewallet.pem`` file. If you are using Oracle Autonomous Database, both of these paths are typically the same directory where the -``wallet.zip`` file was extracted. The ``dsn`` should specify a TCPS -connection. +:ref:`wallet.zip ` file was extracted. The ``dsn`` should specify a +TCPS connection. **In python-oracledb Thick mode** @@ -5528,7 +5577,7 @@ containing the ``MY_WALLET_DIRECTORY`` option needs to be created: .. code-block:: python - dsn = "mydb_high" # one of the network aliases from tnsnames.ora + dsn = "mydb_high" # one of the TNS Aliases from tnsnames.ora params = oracledb.ConnectParams(config_dir="path_to_unzipped_wallet", wallet_location="path_location_of_sso_file") params.parse_connect_string(dsn) @@ -5536,10 +5585,11 @@ containing the ``MY_WALLET_DIRECTORY`` option needs to be created: connection = oracledb.connect(user=user_name, password=password, dsn=dsn) The ``config_dir`` parameter should be the directory containing the -:ref:`tnsnames.ora ` and ``sqlnet.ora`` files. The +:ref:`tnsnames.ora ` and ``sqlnet.ora`` files. The ``wallet_location`` parameter is the directory containing the ``cwallet.sso`` -file. If you are using Oracle Autonomous Database, both of these paths are -typically the same directory where the ``wallet.zip`` file was extracted. +file. If you are using Oracle Autonomous Database, both of these paths are +typically the same directory where the :ref:`wallet.zip ` file was +extracted. .. note:: diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 4c79af70..676b1114 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -152,6 +152,11 @@ version is in use. The attribute :attr:`Connection.version` can be used to determine which Oracle Database version a connection is accessing. These attributes can then be used to adjust the application behavior accordingly. +Note Oracle maintains a list of database versions that problem resolution and +error correction can be obtained for, see `Release Schedule of Current Database +Releases `__. + .. _instreq: Installation Requirements @@ -173,8 +178,8 @@ To use python-oracledb, you need: Basic or Basic Light packages, from a full Oracle Client installation (such as installed by Oracle's GUI installer), or from those included in Oracle Database if Python is on the same machine as the database. Oracle Client - libraries versions 23, 21, 19, 18, 12, and 11.2 are supported where available - on Linux, Windows and macOS. Oracle's standard client-server version + libraries versions 23, 21, 19, 18, 12, and 11.2 can be used where available + on Linux, Windows, and macOS. Oracle's standard client-server version interoperability allows connection to both older and newer databases. - An Oracle Database either local or remote, on-premises or in the Cloud. @@ -233,8 +238,8 @@ Optionally Install Oracle Client By default, python-oracledb runs in a Thin mode which connects directly to Oracle Database so no further installation steps are required. However, to use additional features available in :ref:`Thick mode ` you need -Oracle Client libraries installed. Oracle Client versions 23, 21, 19, 18, 12 -and 11.2 are supported. +Oracle Client libraries installed. Oracle Client versions 23, 21, 19, 18, 12, +and 11.2 can be used. - If your database is on a remote computer, then download the free `Oracle Instant Client @@ -276,16 +281,14 @@ To use python-oracledb Thick mode with Oracle Instant Client zip files: `__ Oracle Instant Client 23ai will connect to Oracle Database 19 or later. - Oracle Instant Client 21c will connect to Oracle Database 12.1 or later. Oracle Instant Client 19c will connect to Oracle Database 11.2 or later. - It is recommended to keep up to date with the latest Oracle Instant Client - release updates of your desired major version. Oracle Database 23ai and 19c - are Long Term Support Releases whereas Oracle Database 21c is an Innovation - Release. + Oracle Database 23ai and 19c are Long Term Support Releases. Note Oracle + Database 23ai 32-bit clients are not available on any platform, however you + can use older 32-bit clients to connect to Oracle Database 23ai. - Note Oracle Database 23ai 32-bit clients are not available on any platform, - however, you can use older 32-bit clients to connect to Oracle Database 23ai. + It is recommended to keep up to date with the latest Oracle Instant Client + release updates of your desired major version. 2. Unzip the package into a single directory that is accessible to your application. For example: @@ -300,17 +303,30 @@ To use python-oracledb Thick mode with Oracle Instant Client zip files: installed in unsafe paths, such as from a user directory. You may need to install under a directory like ``/opt`` or ``/usr/local``. -3. Install the ``libaio`` package with sudo or as the root user. For example:: +3. Install the ``libaio`` package with sudo or as the root user. For example: + + .. code-block:: shell - sudo yum install libaio + sudo dnf install libaio - On some Linux distributions this package is called ``libaio1`` instead. + On some Linux distributions this package is called ``libaio1`` or + ``libaio1t64`` instead. + + If you have ``libaio1t64`` installed but Instant Client still fails to find + ``libaio.so.1``, you may need to create a symbolic link from + ``libaio.so.1t64`` to ``libaio.so.1`` (similar to `the patch here + `__). + +4. When using Oracle Instant Client 19 on Linux versions such as Oracle Linux + 8, you may need to manually install the ``libnsl`` package to make + ``libnsl.so`` available: + + .. code-block:: shell - When using Oracle Instant Client 19 on recent Linux versions such as Oracle - Linux 8, you may need to manually install the ``libnsl`` package to make - ``libnsl.so`` available. + sudo dnf install libnsl -4. If there is no other Oracle software on the machine that will be +5. If there is no other Oracle software on the machine that will be impacted, permanently add Instant Client to the runtime link path. For example, with sudo or as the root user: @@ -321,7 +337,9 @@ To use python-oracledb Thick mode with Oracle Instant Client zip files: Alternatively, set the environment variable ``LD_LIBRARY_PATH`` to the appropriate directory for the Instant Client version. For - example:: + example: + + .. code-block:: shell export LD_LIBRARY_PATH=/opt/oracle/instantclient_21_6:$LD_LIBRARY_PATH @@ -329,7 +347,7 @@ To use python-oracledb Thick mode with Oracle Instant Client zip files: other daemons commonly reset environment variables so using ``ldconfig`` is generally preferred instead. -5. If you use optional Oracle configuration files such as ``tnsnames.ora``, +6. If you use optional Oracle configuration files such as ``tnsnames.ora``, ``sqlnet.ora``, or ``oraaccess.xml`` with Instant Client, then put the files in an accessible directory, for example in ``/opt/oracle/your_config_dir``. Then use: @@ -347,7 +365,7 @@ To use python-oracledb Thick mode with Oracle Instant Client zip files: This is the default Oracle configuration directory for executables linked with this Instant Client. -6. Call :meth:`oracledb.init_oracle_client()` in your application, if it is not +7. Call :meth:`oracledb.init_oracle_client()` in your application, if it is not already used. Oracle Instant Client RPMs @@ -355,86 +373,100 @@ Oracle Instant Client RPMs To use python-oracledb with Oracle Instant Client RPMs: -1. Download an Oracle 23, 21, 19, 18, 12, or 11.2 "Basic" or "Basic Light" RPM - matching your Python architecture: +1a. Download and install Oracle Instant Client "Basic" or "Basic Light" - - `Linux 64-bit (x86-64) - `__ - - `Linux 32-bit (x86) - `__ - - `Linux Arm 64-bit (aarch64) - `__ + Download and install an Oracle 23, 21, 19, 18, 12, or 11.2 "Basic" or "Basic + Light" RPM matching your Python architecture from: - Alternatively, Oracle's yum server has convenient repositories, see `Oracle - Database Instant Client for Oracle Linux - `__ instructions. The - repositories are: + - `Linux 64-bit (x86-64) `__ + - `Linux Arm 64-bit (aarch64) `__ - - Oracle Linux 9 (x86-64) + Install the downloaded RPM with sudo or as the root user. For example: - - `Instant Client 23 for Oracle Linux 9 (x86-64) - `__ + .. code-block:: shell - - `Instant Client 19 for Oracle Linux 9 (x86-64) - `__ + sudo dnf install oracle-instantclient-basic-23.9.0.25.07-1.el9.x86_64.rpm - - Oracle Linux 8 (x86-64) + Dnf will automatically install required dependencies, such as ``libaio``. - - `Instant Client 23 for Oracle Linux 8 (x86-64) - `__ + It is recommended to keep up to date with the latest Oracle Instant Client + release updates of your desired major version. - - `Instant Client 21 for Oracle Linux 8 (x86-64) - `__ + Oracle Database 23ai and 19c are Long Term Support Releases. Oracle Instant + Client 23ai will connect to Oracle Database 19 or later. Oracle Instant + Client 19c will connect to Oracle Database 11.2 or later. - - `Instant Client 19 for Oracle Linux 8 (x86-64) - `__ + Note Oracle Database 23ai 32-bit clients are not available on any platform, + however you can use older 32-bit clients to connect to Oracle Database + 23ai. - - Oracle Linux 8 (aarch64) +1b. Alternatively, install Instant Client from Oracle's yum server - - `Instant Client 19 for Oracle Linux Arm 8 (aarch64) - `__ + See `Oracle Database Instant Client for Oracle Linux + `__ instructions. The + repositories are: - - Oracle Linux 7 (x86-64) + - 23ai on Oracle Linux 9 - - `Instant Client 21 for Oracle Linux 7 (x86-64) - `__ + - `Instant Client 23 for Oracle Linux 9 (x86-64) `__ - - `Instant Client 19 and 18 for Oracle Linux 7 (x86-64) - `__ + - `Instant Client 23 for Oracle Linux 9 (aarch64) `__ - - Oracle Linux 7 (aarch64) + .. code-block:: shell - - `Instant Client 19 for Oracle Linux Arm 7 (aarch64) - `__ + sudo dnf install oracle-instantclient-release-23ai-el9 + sudo dnf install oracle-instantclient-basic - - Oracle Linux 6 (x86-64) + - 19c on Oracle Linux 9: - - `Instant Client 18 for Oracle Linux 6 (x86-64) - `__ + - `Instant Client 19 for Oracle Linux 9 (x86-64) `__ - Oracle Instant Client 23ai will connect to Oracle Database 19 or later. - Oracle Instant Client 21c will connect to Oracle Database 12.1 or later. - Oracle Instant Client 19c will connect to Oracle Database 11.2 or later. + - `Instant Client 19 for Oracle Linux 9 (aarch64) `__ - It is recommended to keep up to date with the latest Oracle Instant Client - release updates of your desired major version. Oracle Database 23ai and 19c - are Long Term Support Releases whereas Oracle Database 21c is an Innovation - Release. + .. code-block:: shell - Note Oracle Database 23ai 32-bit clients are not available on any platform, - however, you can use older 32-bit clients to connect to Oracle Database 23ai. + sudo dnf install oracle-instantclient-release-el9 + sudo dnf install oracle-instantclient19.XX-basic -2. Install the downloaded RPM with sudo or as the root user. For example: + - 23ai on Oracle Linux 8 - .. code-block:: shell + - `Instant Client 23 for Oracle Linux 8 (x86-64) `__ + + - `Instant Client 23 for Oracle Linux 8 (aarch64) `__ + + .. code-block:: shell + + sudo dnf install oracle-instantclient-release-23ai-el8 + sudo dnf install oracle-instantclient-basic + + - 19c on Oracle Linux 8: - sudo yum install oracle-instantclient-basic-21.6.0.0.0-1.x86_64.rpm + - `Instant Client 19 for Oracle Linux 8 (x86-64) `__ - Yum will automatically install required dependencies, such as ``libaio``. + - `Instant Client 19 for Oracle Linux 8 (aarch64) `__ - When using Oracle Instant Client 19 on recent Linux versions such as Oracle - Linux 8, you may need to manually install the ``libnsl`` package to make - ``libnsl.so`` available. + .. code-block:: shell + + sudo dnf install -y oracle-release-el8 + sudo dnf install -y oracle-instantclient19.XX-basic + +2. When using Oracle Instant Client 19 on Linux versions such as Oracle Linux + 8, you may need to manually install the ``libnsl`` package to make + ``libnsl.so`` available: + + .. code-block:: shell + + sudo dnf install libnsl 3. For Instant Client 19 or later, the system library search path is automatically configured during installation. @@ -451,7 +483,9 @@ To use python-oracledb with Oracle Instant Client RPMs: Alternatively, for version 18 and earlier, every shell running Python will need to have the environment variable ``LD_LIBRARY_PATH`` set to the appropriate directory for the - Instant Client version. For example:: + Instant Client version. For example: + + .. code-block:: shell export LD_LIBRARY_PATH=/usr/lib/oracle/18.5/client64/lib:$LD_LIBRARY_PATH @@ -489,7 +523,7 @@ as installed by Oracle's GUI installer). The libraries must be either 32-bit or 64-bit, matching your Python architecture. Note Oracle Database 23ai 32-bit clients are not available on any -platform, however, you can use older 32-bit clients to connect to Oracle +platform, however you can use older 32-bit clients to connect to Oracle Database 23ai. 1. Set required Oracle environment variables by running the Oracle environment @@ -505,8 +539,9 @@ Database 23ai. source /u01/app/oracle/product/11.2.0/xe/bin/oracle_env.sh -2. Optional Oracle configuration files such as ``tnsnames.ora``, ``sqlnet.ora``, - or ``oraaccess.xml`` can be placed in ``$ORACLE_HOME/network/admin``. +2. Optional Oracle configuration files such as ``tnsnames.ora``, + ``sqlnet.ora``, or ``oraaccess.xml`` can be placed in + ``$ORACLE_HOME/network/admin``. Alternatively, Oracle configuration files can be put in another, accessible directory. Then set the environment variable ``TNS_ADMIN`` to that @@ -526,7 +561,9 @@ Install python-oracledb Use Python's `pip `__ package to install python-oracledb from Python's package repository `PyPI -`__:: +`__: + +.. code-block:: shell python -m pip install oracledb --upgrade @@ -548,12 +585,12 @@ By default, python-oracledb runs in a Thin mode which connects directly to Oracle Database so no further installation steps are required. However, to use additional features available in :ref:`Thick mode ` you need Oracle Client libraries installed. Oracle Client versions 21, 19, 18, 12, and -11.2 are supported. +11.2 can be used. - If your database is on a remote computer, then download the free `Oracle - Instant Client - `__ "Basic" - or "Basic Light" package for your operating system architecture. + Instant Client `__ "Basic" or "Basic Light" package for your operating system + architecture. - Alternatively, use the client libraries already available in a locally installed database such as the free `Oracle Database Express Edition ("XE") @@ -578,12 +615,11 @@ Oracle Instant Client Zip Files To use python-oracledb in Thick mode with Oracle Instant Client zip files: 1. Download an Oracle 21, 19, 18, 12, or 11.2 "Basic" or "Basic Light" zip - file: `64-bit - `__ - or `32-bit - `__, + file: `64-bit `__ or `32-bit `__, matching your Python architecture. Note Oracle Database 23ai 32-bit clients - are not available on any platform, however, you can use older 32-bit clients + are not available on any platform, however you can use older 32-bit clients to connect to Oracle Database 23ai. The latest version is recommended. Oracle Instant Client 19 will connect to @@ -635,7 +671,9 @@ Configure Oracle Instant Client oracledb.init_oracle_client() * Another way to set ``PATH`` is to use a batch file that sets it before - Python is executed, for example:: + Python is executed, for example: + + .. code-block:: shell REM mypy.bat SET PATH=C:\oracle\instantclient_19_22;%PATH% @@ -680,7 +718,7 @@ installed by Oracle's GUI installer). The Oracle libraries must be either 32-bit or 64-bit, matching your Python architecture. Note Oracle Database 23ai 32-bit clients are not available on -any platform, however, you can use older 32-bit clients to connect to Oracle +any platform, however you can use older 32-bit clients to connect to Oracle Database 23ai. 1. Set the environment variable ``PATH`` to include the path that contains @@ -895,7 +933,9 @@ To install python-oracledb on a computer that is not connected to the internet, download a python-oracledb wheel package from Python's package repository `PyPI `__. Use the file appropriate for your operating system and python version. Transfer this file to the offline -computer and install it with:: +computer and install it with: + +.. code-block:: shell python -m pip install "" @@ -954,7 +994,9 @@ Building a python-oracledb package locally 2. Download the source code using one of the following options: - You can clone the source code from `GitHub - `__:: + `__: + + .. code-block:: shell git clone --recurse-submodules https://github.com/oracle/python-oracledb.git @@ -968,7 +1010,9 @@ Building a python-oracledb package locally ``python-oracledb-main/src/oracledb/impl/thick/odpi``. - Alternatively, clone the source from `opensource.oracle.com - `__, which mirrors GitHub:: + `__, which mirrors GitHub: + + .. code-block:: shell git clone --recurse-submodules https://opensource.oracle.com/git/oracle/python-oracledb.git git checkout main @@ -980,7 +1024,9 @@ Building a python-oracledb package locally `__ page, download the source package archive, and extract it. -3. With the source code available, build a python-oracledb package by running:: +3. With the source code available, build a python-oracledb package by running: + + .. code-block:: shell cd python-oracledb # the name may vary depending on the download python -m pip install build --upgrade @@ -991,7 +1037,9 @@ Building a python-oracledb package locally For example when using Python 3.12 on macOS you might have the file ``dist/oracledb-3.1.0-cp312-cp312-macosx_14_0_arm64.whl``. -4. Install this package:: +4. Install this package: + + .. code-block:: shell python -m pip install dist/oracledb-3.1.0-cp312-cp312-macosx_14_0_arm64.whl @@ -1028,7 +1076,9 @@ Python versions. 6. When the build has completed, download the "python-oracledb-wheels" artifact, unzip it, and install the one for your architecture and Python - version. For example, when using Python 3.12 on macOS, install:: + version. For example, when using Python 3.12 on macOS, install: + + .. code-block:: shell python -m pip install oracledb-3.1.0-cp312-cp312-macosx_10_13_universal2.whl @@ -1050,11 +1100,15 @@ Registry: - `oraclelinux8-python `__ -For example, you can pull a container for Python 3.12 on Oracle Linux 9 using:: +For example, you can pull a container for Python 3.12 on Oracle Linux 9 using: + +.. code-block:: shell docker pull ghcr.io/oracle/oraclelinux9-python:3.12-oracledb -Or use it in a Dockerfile like:: +Or use it in a Dockerfile like: + +.. code-block:: shell FROM ghcr.io/oracle/oraclelinux9-python:3.12-oracledb @@ -1080,16 +1134,13 @@ Install Modules for the OCI Object Storage Centralized Configuration Provider For python-oracledb to use an :ref:`Oracle Cloud Infrastructure (OCI) Object Storage configuration provider `, you must install the -`OCI `__ package. This can be done: +`OCI `__ package by using the optional +``[oci_config]`` dependency: -- By using the recommended optional [oci_config] dependency:: + .. code-block:: shell python -m pip install oracledb[oci_config] -- Or, by installing the package manually:: - - python -m pip install oci - See :ref:`ociobjstorageprovider` for information on using this configuration provider with python-oracledb. @@ -1102,17 +1153,13 @@ For python-oracledb to use an :ref:`Azure App Configuration Provider `, you must install the `Azure App Configuration `__, `Azure Identity `__, and `Azure Key Vault Secrets -`__ packages. -This can be done: +`__ packages by using the +optional ``[azure_config]`` dependency: -- By using the recommended optional [azure_config] dependency:: +.. code-block:: shell python -m pip install oracledb[azure_config] -- Or, by installing the packages manually:: - - python -m pip install azure-appconfiguration azure-identity azure-keyvault-secrets - See :ref:`azureappstorageprovider` for information on using this configuration provider with python-oracledb. @@ -1131,16 +1178,13 @@ Install Modules for the OCI Cloud Native Authentication Plugin For python-oracledb to use the OCI Cloud Native Authentication Plugin, you must install the `Python SDK for Oracle Cloud Infrastructure -`__ package. This can be done: +`__ package by using the ``[oci_auth]`` +dependency: -- By using the recommended optional [oci_auth] dependency:: +.. code-block:: shell python -m pip install oracledb[oci_auth] -- Or, by installing the package manually:: - - python -m pip install oci - Review the `OCI SDK installation instructions `__ as needed. @@ -1155,16 +1199,13 @@ Install Modules for the Azure Cloud Native Authentication Plugin For python-oracledb to use the Azure Cloud Native Authentication Plugin, you must install the `Microsoft Authentication Library (MSAL) for Python -`__ package. This can be done: +`__ package by using the optional +``[azure_auth]`` dependency: -- By using the recommended optional [azure_auth] dependency:: +.. code-block:: shell python -m pip install oracledb[azure_auth] -- Or, by installing the package manually:: - - python -m pip install msal - Review the `Microsoft MSAL installation instructions `__ as needed. diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 1964e79b..d9da1013 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -23,22 +23,32 @@ executed. Statements are executed using one of these methods This chapter discusses python-oracledb's synchronous methods. The asynchronous methods and pipelining functionality are discussed in detail in :ref:`asyncio`. -PL/SQL statements are discussed in :ref:`plsqlexecution`. Other chapters -contain information on specific data types and features. See :ref:`batchstmnt`, -:ref:`lobdata`, :ref:`jsondatatype`, and :ref:`xmldatatype`. +PL/SQL statements are discussed in :ref:`plsqlexecution`. The following +chapters contain information on specific data types and features: + +- :ref:`batchstmnt` +- :ref:`pipelining` +- :ref:`lobdata` +- :ref:`jsondatatype` +- :ref:`xmldatatype` + +**Executing SQL Scripts** Python-oracledb can be used to execute individual statements, one at a time. Once a statement has finished execution, only then will the next statement execute. If you try to execute statements concurrently in a single connection, the statements are queued and run consecutively in the order they are executed -in the application code. +in the application code. This includes :ref:`Pipelined statements +`. Python-oracledb does not read SQL*Plus ".sql" files. To read SQL files, use a technique like the one in ``run_sql_script()`` in `samples/sample_env.py `__. -SQL statements should not contain a trailing semicolon (";") or forward slash -("/"). This will fail: +**SQL Statement Syntax** + +SQL statements executed in python-oracledb should not contain a trailing +semicolon (";") or forward slash ("/"). This will fail: .. code-block:: python @@ -50,17 +60,6 @@ This is correct: cursor.execute("select * from MyTable") - -SQL Queries -=========== - -Queries (statements beginning with SELECT or WITH) can be executed using the -method :meth:`Cursor.execute()`. Rows can then be iterated over, or can be -fetched using one of the methods :meth:`Cursor.fetchone()`, -:meth:`Cursor.fetchmany()` or :meth:`Cursor.fetchall()`. There is a -:ref:`default type mapping ` to Python types that can be -optionally :ref:`overridden `. - .. IMPORTANT:: Interpolating or concatenating user data with SQL statements, for example @@ -69,6 +68,18 @@ optionally :ref:`overridden `. instead, for example ``cursor.execute("SELECT * FROM mytab WHERE mycol = :mybv", mybv=myvar)``. + +SQL Queries +=========== + +Queries (statements beginning with SELECT or WITH) can be executed using the +method :meth:`Cursor.execute()`. Rows can then be iterated over, or can be +fetched using one of the methods :meth:`Cursor.fetchone()`, +:meth:`Cursor.fetchmany()` or :meth:`Cursor.fetchall()`. This lets you handle +rows directly or stream them if needed. There is a :ref:`default type mapping +` to Python types that can be optionally :ref:`overridden +`. + .. _fetching: Fetch Methods @@ -98,9 +109,10 @@ Rows can be fetched in various ways. break print(row) -- If rows need to be processed in batches, the method :meth:`Cursor.fetchmany()` - can be used. The size of the batch is controlled by the ``size`` parameter, - which defaults to the value of :attr:`Cursor.arraysize`. +- If rows need to be streamed or processed in batches, the method + :meth:`Cursor.fetchmany()` can be used. The size of the batch is controlled + by the ``size`` parameter, which defaults to the value of + :attr:`Cursor.arraysize`. .. code-block:: python @@ -116,8 +128,9 @@ Rows can be fetched in various ways. Note the ``size`` parameter only affects the number of rows returned to the application, not to the internal buffer size used for tuning fetch - performance. That internal buffer size is controlled only by changing - :attr:`Cursor.arraysize`, see :ref:`tuningfetch`. + performance. That internal buffer size is controlled only by changing + :attr:`Cursor.arraysize` or :attr:`oracledb.defaults.arraysize + `, see :ref:`tuningfetch`. - If all of the rows need to be fetched and can be contained in memory, the method :meth:`Cursor.fetchall()` can be used. diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index 1bc2d479..57e28cc6 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -279,7 +279,7 @@ Changing Prefetchrows and Arraysize for Re-executed Statements In python-oracledb, the :attr:`~Cursor.prefetchrows` and :attr:`~Cursor.arraysize` values are only examined when a statement is executed the first time. To change the values for a re-executed statement, create a new -cursor. For example, to change :attr:`~Cursor.arraysize``: +cursor. For example, to change :attr:`~Cursor.arraysize`: .. code-block:: python diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 99c15c51..6d4d365b 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -649,7 +649,9 @@ def sdu(self) -> int: (SDU) that is being used by the connection. The value will be the lesser of the requested python-oracledb size and the maximum size allowed by the database network configuration. It is available only in - python-oracledb Thin mode. + python-oracledb Thin mode. To set the SDU in Thick mode, use a + connection string SDU parameter or set a value for DEFAULT_SDU_SIZE in + a sqlnet.ora configuration file. """ self._verify_connected() return self._impl.get_sdu() diff --git a/utils/templates/connection.py b/utils/templates/connection.py index 887ba0a4..ad962c91 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -647,7 +647,9 @@ def sdu(self) -> int: (SDU) that is being used by the connection. The value will be the lesser of the requested python-oracledb size and the maximum size allowed by the database network configuration. It is available only in - python-oracledb Thin mode. + python-oracledb Thin mode. To set the SDU in Thick mode, use a + connection string SDU parameter or set a value for DEFAULT_SDU_SIZE in + a sqlnet.ora configuration file. """ self._verify_connected() return self._impl.get_sdu() From 21b7dadb5a9b28aa591559ffa11bef2140597165 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:37:05 -0600 Subject: [PATCH 197/239] Added support for ingesting Arrow data types date32 and date64 (#534). --- doc/src/release_notes.rst | 3 ++ src/oracledb/arrow_impl.pxd | 2 + src/oracledb/base_impl.pyx | 2 + src/oracledb/impl/arrow/array.pyx | 2 +- src/oracledb/impl/arrow/schema.pyx | 4 ++ src/oracledb/impl/base/converters.pyx | 6 ++- src/oracledb/impl/base/metadata.pyx | 2 + tests/test_8900_dataframe_ingestion.py | 47 ++++++++++++++++++++ tests/test_9000_dataframe_ingestion_async.py | 47 ++++++++++++++++++++ 9 files changed, 113 insertions(+), 2 deletions(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e43e9573..e553e1e7 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -41,6 +41,9 @@ Common Changes #) Added support for all of the signed and unsigned fixed width integer types when ingesting data frames supporting the Arrow PyCapsule interface. Previously only ``int64`` was supported. +#) Added support for types ``date32`` and ``date64`` when ingesting data + frames supporting the Arrow PyCapsule interface as requested + (`issue 534 `__). #) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to the methods used for fetching rows or data frames from the database. Note that for the creation of pipeline operations, if these parameters are not diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index 8223350f..ccff05dd 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -62,6 +62,8 @@ cdef extern from "nanoarrow.h": cpdef enum ArrowType: NANOARROW_TYPE_BOOL NANOARROW_TYPE_BINARY + NANOARROW_TYPE_DATE32 + NANOARROW_TYPE_DATE64 NANOARROW_TYPE_DECIMAL128 NANOARROW_TYPE_DOUBLE NANOARROW_TYPE_FIXED_SIZE_BINARY diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 92b17320..bcf9e9dd 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -55,6 +55,8 @@ from .arrow_impl cimport ( NANOARROW_TYPE_NA, NANOARROW_TYPE_BOOL, NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_DATE32, + NANOARROW_TYPE_DATE64, NANOARROW_TYPE_DECIMAL128, NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FIXED_SIZE_BINARY, diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 83f6ffca..8d404134 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -365,7 +365,7 @@ cdef class ArrowArrayImpl: value[0] = ( ptr)[index] elif arrow_type == NANOARROW_TYPE_INT16: value[0] = ( ptr)[index] - elif arrow_type == NANOARROW_TYPE_INT32: + elif arrow_type in (NANOARROW_TYPE_INT32, NANOARROW_TYPE_DATE32): value[0] = ( ptr)[index] else: value[0] = ( ptr)[index] diff --git a/src/oracledb/impl/arrow/schema.pyx b/src/oracledb/impl/arrow/schema.pyx index 28f6b50f..ea4dae0d 100644 --- a/src/oracledb/impl/arrow/schema.pyx +++ b/src/oracledb/impl/arrow/schema.pyx @@ -129,6 +129,8 @@ cdef class ArrowSchemaImpl: self.fixed_size = schema_view.fixed_size if schema_view.type == NANOARROW_TYPE_TIMESTAMP: self._set_time_unit(schema_view.time_unit) + elif schema_view.type == NANOARROW_TYPE_DATE64: + self._set_time_unit(NANOARROW_TIME_UNIT_MILLI) elif schema_view.type in ( NANOARROW_TYPE_FIXED_SIZE_LIST, NANOARROW_TYPE_LIST @@ -143,6 +145,8 @@ cdef class ArrowSchemaImpl: NANOARROW_TYPE_BINARY, NANOARROW_TYPE_BOOL, NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_DATE32, + NANOARROW_TYPE_DATE64, NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FIXED_SIZE_BINARY, NANOARROW_TYPE_FLOAT, diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 17148f8f..cbaca4ac 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -90,7 +90,7 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, rb = &data.buffer.as_raw_bytes array_impl.get_bytes(array_index, &data.is_null, &rb.ptr, &rb.num_bytes) - elif arrow_type == NANOARROW_TYPE_TIMESTAMP: + elif arrow_type in (NANOARROW_TYPE_TIMESTAMP, NANOARROW_TYPE_DATE64): array_impl.get_int(arrow_type, array_index, &data.is_null, &int_value) if not data.is_null: seconds = int_value // array_impl.schema_impl.time_factor @@ -103,6 +103,10 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, useconds //= 1_000 return EPOCH_DATE + \ cydatetime.timedelta_new(days, seconds, useconds) + elif arrow_type == NANOARROW_TYPE_DATE32: + array_impl.get_int(arrow_type, array_index, &data.is_null, &int_value) + if not data.is_null: + return EPOCH_DATE + cydatetime.timedelta_new(int_value, 0, 0) elif arrow_type == NANOARROW_TYPE_DECIMAL128: temp_bytes = array_impl.get_decimal(array_index, &data.is_null) if not data.is_null: diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 36167d7f..dcf3cad8 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -187,6 +187,8 @@ cdef class OracleMetadata: metadata.dbtype = DB_TYPE_BOOLEAN elif arrow_type == NANOARROW_TYPE_TIMESTAMP: metadata.dbtype = DB_TYPE_TIMESTAMP + elif arrow_type in (NANOARROW_TYPE_DATE32, NANOARROW_TYPE_DATE64): + metadata.dbtype = DB_TYPE_DATE elif arrow_type == NANOARROW_TYPE_LARGE_STRING: metadata.dbtype = DB_TYPE_LONG elif arrow_type == NANOARROW_TYPE_LARGE_BINARY: diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index 566aaee1..ddefbace 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -898,6 +898,53 @@ def test_8919(self): fetched_values = [int(s) for s, in self.cursor] self.assertEqual(fetched_values, values) + def test_8920(self): + "8920 - test ingestion with alternative date types" + scenarios = [ + ( + [ + datetime.datetime(1915, 9, 11), + None, + datetime.datetime(2045, 2, 28), + ], + pyarrow.date32(), + ), + ( + [ + datetime.datetime(1905, 3, 30), + None, + datetime.datetime(2060, 10, 5), + ], + pyarrow.date64(), + ), + ] + names = ["Id", "DateOfBirth"] + for values, dtype in scenarios: + with self.subTest(dtype=str(dtype)): + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + self.cursor.execute("delete from TestDataFrame") + self.cursor.executemany( + """ + insert into TestDataFrame (Id, DateOfBirth) + values (:1, :2) + """, + df, + ) + self.conn.commit() + self.cursor.execute( + """ + select DateOfBirth + from TestDataFrame + order by Id + """ + ) + fetched_values = [d for d, in self.cursor] + self.assertEqual(fetched_values, values) + if __name__ == "__main__": test_env.run_test_cases() diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 6c515f8e..553e85de 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -899,6 +899,53 @@ async def test_9019(self): fetched_values = [int(s) async for s, in self.cursor] self.assertEqual(fetched_values, values) + async def test_9020(self): + "9020 - test ingestion with alternative date types" + scenarios = [ + ( + [ + datetime.datetime(1915, 9, 11), + None, + datetime.datetime(2045, 2, 28), + ], + pyarrow.date32(), + ), + ( + [ + datetime.datetime(1905, 3, 30), + None, + datetime.datetime(2060, 10, 5), + ], + pyarrow.date64(), + ), + ] + names = ["Id", "DateOfBirth"] + for values, dtype in scenarios: + with self.subTest(dtype=str(dtype)): + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + await self.cursor.execute("delete from TestDataFrame") + await self.cursor.executemany( + """ + insert into TestDataFrame (Id, DateOfBirth) + values (:1, :2) + """, + df, + ) + await self.conn.commit() + await self.cursor.execute( + """ + select DateOfBirth + from TestDataFrame + order by Id + """ + ) + fetched_values = [d async for d, in self.cursor] + self.assertEqual(fetched_values, values) + if __name__ == "__main__": test_env.run_test_cases() From 9dbecea4256857fb1914ecef115bd6e4733596f4 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:37:38 -0600 Subject: [PATCH 198/239] Update ODPI-C. --- src/oracledb/impl/thick/odpi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 8bba6229..6f2ef1c7 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 8bba6229f1a186395ad7d2cb475d6f87972bbe14 +Subproject commit 6f2ef1c70cbe702677d8fe94fd2ba059453d3e5c From d4c10171dd95d1f0ea82e5bfc347ee3d48f08802 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 11 Sep 2025 13:42:39 -0600 Subject: [PATCH 199/239] Fix link to point to issue instead of discussion (#535). --- doc/src/release_notes.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e553e1e7..35996cab 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -43,7 +43,7 @@ Common Changes Previously only ``int64`` was supported. #) Added support for types ``date32`` and ``date64`` when ingesting data frames supporting the Arrow PyCapsule interface as requested - (`issue 534 `__). + (`issue 535 `__). #) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to the methods used for fetching rows or data frames from the database. Note that for the creation of pipeline operations, if these parameters are not From 87908fe400d79f77aa000e4aadf807c03a5d94af Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:26:30 -0600 Subject: [PATCH 200/239] Refactor: rename method to be more accurate. --- src/oracledb/base_impl.pxd | 2 +- src/oracledb/impl/base/metadata.pyx | 50 ++++++++++++++--------------- src/oracledb/impl/base/var.pyx | 2 +- 3 files changed, 27 insertions(+), 27 deletions(-) diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 0c986abb..78b9012d 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -448,8 +448,8 @@ cdef class OracleMetadata: ArrowSchemaImpl _schema_impl uint8_t _py_type_num + cdef int _create_arrow_schema(self) except -1 cdef int _finalize_init(self) except -1 - cdef int _set_arrow_schema(self) except -1 cdef OracleMetadata copy(self) @staticmethod cdef OracleMetadata from_arrow_schema(ArrowSchemaImpl schema_impl) diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index dcf3cad8..43981f04 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -31,32 +31,9 @@ @cython.freelist(30) cdef class OracleMetadata: - cdef int _finalize_init(self) except -1: - """ - Internal method that finalizes the initialization of metadata by - setting the buffer size, max size and default Python type (if they have - not already been set). - """ - if self.dbtype.default_size == 0: - self.max_size = 0 - self.buffer_size = self.dbtype._buffer_size_factor - else: - if self.max_size == 0: - self.max_size = self.dbtype.default_size - self.buffer_size = self.max_size * self.dbtype._buffer_size_factor - if self._py_type_num == 0: - if self.dbtype._ora_type_num != ORA_TYPE_NUM_NUMBER: - self._py_type_num = self.dbtype._default_py_type_num - else: - if self.scale == 0 or \ - (self.scale == -127 and self.precision == 0): - self._py_type_num = PY_TYPE_NUM_INT - else: - self._py_type_num = PY_TYPE_NUM_FLOAT - - cdef int _set_arrow_schema(self) except -1: + cdef int _create_arrow_schema(self) except -1: """ - Sets the Arrow schema given the metadata. + Creates an Arrow schema for the metadata. """ cdef: ArrowType arrow_type, child_arrow_type = NANOARROW_TYPE_NA @@ -131,6 +108,29 @@ cdef class OracleMetadata: child_arrow_type, ) + cdef int _finalize_init(self) except -1: + """ + Internal method that finalizes the initialization of metadata by + setting the buffer size, max size and default Python type (if they have + not already been set). + """ + if self.dbtype.default_size == 0: + self.max_size = 0 + self.buffer_size = self.dbtype._buffer_size_factor + else: + if self.max_size == 0: + self.max_size = self.dbtype.default_size + self.buffer_size = self.max_size * self.dbtype._buffer_size_factor + if self._py_type_num == 0: + if self.dbtype._ora_type_num != ORA_TYPE_NUM_NUMBER: + self._py_type_num = self.dbtype._default_py_type_num + else: + if self.scale == 0 or \ + (self.scale == -127 and self.precision == 0): + self._py_type_num = PY_TYPE_NUM_INT + else: + self._py_type_num = PY_TYPE_NUM_FLOAT + cdef OracleMetadata copy(self): """ Create a copy of the metadata and return it. diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index 20e9b616..d06e6153 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -253,7 +253,7 @@ cdef class BaseVarImpl: user. """ if self.metadata._schema_impl is None: - self.metadata._set_arrow_schema() + self.metadata._create_arrow_schema() self._arrow_array = ArrowArrayImpl.__new__(ArrowArrayImpl) self._arrow_array.populate_from_schema(self.metadata._schema_impl) From 71341ffcd2aced6ba274136cb9f68dc159f34750 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:26:49 -0600 Subject: [PATCH 201/239] Ensure that host response check is only performed when using o5logon. --- src/oracledb/impl/thin/messages/auth.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/oracledb/impl/thin/messages/auth.pyx b/src/oracledb/impl/thin/messages/auth.pyx index d52c2c20..01509dd0 100644 --- a/src/oracledb/impl/thin/messages/auth.pyx +++ b/src/oracledb/impl/thin/messages/auth.pyx @@ -222,7 +222,8 @@ cdef class AuthMessage(Message): self.session_data[key] = value if self.function_code == TNS_FUNC_AUTH_PHASE_ONE: self.function_code = TNS_FUNC_AUTH_PHASE_TWO - elif not self.change_password: + elif not self.change_password \ + and self.conn_impl._combo_key is not None: response = None value = self.session_data.get("AUTH_SVR_RESPONSE") if value is not None: From 72993e86d818d981cc0c15836d5f54c8ab2d9484 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:33:25 -0600 Subject: [PATCH 202/239] Fixed bug with error wrapping which could result in garbage characters being introduced. Fixed potential bug when truncation could occur with very large error messages. --- doc/src/release_notes.rst | 4 ++++ src/oracledb/impl/thick/odpi | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 35996cab..fd91b597 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -32,6 +32,9 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ +#) Fixed bug with error wrapping which could result in garbage characters + being introduced. Fixed potential bug when truncation could occur with very + large error messages. #) Executed statements are normalized by removing leading and trailing spaces before being sent to Oracle Database. @@ -1288,6 +1291,7 @@ Thick Mode Changes connection string. #) Fixed memory leak when accessing objects embedded within other objects. + Common Changes ++++++++++++++ diff --git a/src/oracledb/impl/thick/odpi b/src/oracledb/impl/thick/odpi index 6f2ef1c7..a5092566 160000 --- a/src/oracledb/impl/thick/odpi +++ b/src/oracledb/impl/thick/odpi @@ -1 +1 @@ -Subproject commit 6f2ef1c70cbe702677d8fe94fd2ba059453d3e5c +Subproject commit a5092566ac83c17292864b6d72d3f1121fbda54b From a9c11fc39187ee4cd3c9614380a91d8b13219a70 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:33:43 -0600 Subject: [PATCH 203/239] Fixed bug when attempting to append an element to a DbObject which is not actually a collection. --- doc/src/release_notes.rst | 2 ++ src/oracledb/dbobject.py | 1 + tests/test_2300_object_var.py | 33 +++++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+) diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index fd91b597..e578f2a7 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -66,6 +66,8 @@ Common Changes (`issue 525 `__). #) Fixed bug when attempting to convert an integer that cannot be represented as a native C ``int`` value to an Arrow data frame. +#) Fixed bug when attempting to append an element to a + :ref:`DbObject ` which is not actually a collection. #) API documentation is now generated from the source code. #) Internal change: typing_extensions is now a dependency. diff --git a/src/oracledb/dbobject.py b/src/oracledb/dbobject.py index 5b21eaa2..9b05e637 100644 --- a/src/oracledb/dbobject.py +++ b/src/oracledb/dbobject.py @@ -91,6 +91,7 @@ def append(self, element: Any) -> None: creates an element immediately following the highest index available in the collection. """ + self._ensure_is_collection() self._impl.append(element) def asdict(self) -> dict: diff --git a/tests/test_2300_object_var.py b/tests/test_2300_object_var.py index 7f87bdbd..da7e6d69 100644 --- a/tests/test_2300_object_var.py +++ b/tests/test_2300_object_var.py @@ -939,6 +939,39 @@ def test_2347(self): expected_data = [[4, 8], None, [1, 3, 5], None, [2, 6, 10, 7, 9]] self.assertEqual(plain_obj, expected_data) + def test_2348(self): + "2348 - test using collection methods on an object that is not one" + obj_type = self.conn.gettype("UDT_OBJECT") + obj = obj_type.newobject() + with self.assertRaisesFullCode("DPY-2036"): + obj.append(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.asdict() + with self.assertRaisesFullCode("DPY-2036"): + obj.aslist() + with self.assertRaisesFullCode("DPY-2036"): + obj.delete(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.exists(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.extend([5]) + with self.assertRaisesFullCode("DPY-2036"): + obj.first() + with self.assertRaisesFullCode("DPY-2036"): + obj.getelement(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.last() + with self.assertRaisesFullCode("DPY-2036"): + obj.next(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.prev(5) + with self.assertRaisesFullCode("DPY-2036"): + obj.setelement(5, None) + with self.assertRaisesFullCode("DPY-2036"): + obj.size() + with self.assertRaisesFullCode("DPY-2036"): + obj.trim(0) + if __name__ == "__main__": test_env.run_test_cases() From 4c4a2f577052dc9bae0b44e34e352d5688e9eb91 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:34:07 -0600 Subject: [PATCH 204/239] Refactor: add list of Arrow schema implementations to the data frame. --- src/oracledb/arrow_impl.pxd | 3 ++- src/oracledb/impl/arrow/array.pyx | 14 ++++++++++---- src/oracledb/impl/arrow/dataframe.pyx | 24 ++++++++++++++++++------ src/oracledb/impl/base/cursor.pyx | 6 +++++- 4 files changed, 35 insertions(+), 12 deletions(-) diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index ccff05dd..f8601f49 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -152,11 +152,12 @@ cdef class ArrowArrayImpl: cdef int get_uint(self, ArrowType arrow_type, int64_t index, bint* is_null, uint64_t* value) except -1 cdef object get_vector(self, int64_t index, bint* is_null) - cdef int populate_from_array(self, ArrowSchema* schema, + cdef int populate_from_array(self, ArrowSchemaImpl schema_impl, ArrowArray* array) except -1 cdef int populate_from_schema(self, ArrowSchemaImpl schema_impl) except -1 cdef class DataFrameImpl: cdef: + list schema_impls list arrays diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 8d404134..32efea15 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -484,27 +484,33 @@ cdef class ArrowArrayImpl: object implementing the PyCapsule Arrow array interface. """ cdef: + ArrowSchemaImpl schema_impl ArrowArrayImpl array_impl ArrowSchema *arrow_schema ArrowArray *arrow_array + + # convert schema schema_capsule, array_capsule = obj.__arrow_c_array__() arrow_schema = cpython.PyCapsule_GetPointer( schema_capsule, "arrow_schema" ) + schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + schema_impl.populate_from_schema(arrow_schema) + + # convert array arrow_array = cpython.PyCapsule_GetPointer( array_capsule, "arrow_array" ) array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) - array_impl.schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) - array_impl.populate_from_array(arrow_schema, arrow_array) + array_impl.populate_from_array(schema_impl, arrow_array) return array_impl - cdef int populate_from_array(self, ArrowSchema* schema, + cdef int populate_from_array(self, ArrowSchemaImpl schema_impl, ArrowArray* array) except -1: """ Populate the array from another array. """ - self.schema_impl.populate_from_schema(schema) + self.schema_impl = schema_impl ArrowArrayMove(array, self.arrow_array) cdef int populate_from_schema(self, ArrowSchemaImpl schema_impl) except -1: diff --git a/src/oracledb/impl/arrow/dataframe.pyx b/src/oracledb/impl/arrow/dataframe.pyx index 0af5a590..ebf853fc 100644 --- a/src/oracledb/impl/arrow/dataframe.pyx +++ b/src/oracledb/impl/arrow/dataframe.pyx @@ -38,24 +38,34 @@ cdef class DataFrameImpl: """ cdef: ArrowArrayStream *arrow_stream + ArrowSchemaImpl schema_impl + ArrowArrayImpl array_impl ArrowSchema arrow_schema ArrowArray arrow_array DataFrameImpl df_impl - ArrowArrayImpl array_impl - ArrowSchemaImpl schema_impl ssize_t i + + # initialization df_impl = DataFrameImpl.__new__(DataFrameImpl) + df_impl.schema_impls = [] df_impl.arrays = [] capsule = obj.__arrow_c_stream__() arrow_stream = cpython.PyCapsule_GetPointer( capsule, "arrow_array_stream" ) + + # populate list of schemas _check_nanoarrow(arrow_stream.get_schema(arrow_stream, &arrow_schema)) + for i in range(arrow_schema.n_children): + schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + schema_impl.populate_from_schema(arrow_schema.children[i]) + df_impl.schema_impls.append(schema_impl) + + # populate list of arrays _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) for i in range(arrow_schema.n_children): array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) - array_impl.schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) - array_impl.populate_from_array(arrow_schema.children[i], + array_impl.populate_from_array(df_impl.schema_impls[i], arrow_array.children[i]) df_impl.arrays.append(array_impl) _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) @@ -77,6 +87,7 @@ cdef class DataFrameImpl: encapsulates the arrays found in the data frame. """ cdef: + ArrowSchemaImpl schema_impl ArrowArrayImpl array_impl ArrowArrayStream *stream int64_t i, num_arrays @@ -100,14 +111,15 @@ cdef class DataFrameImpl: ArrowArrayInitFromType(&array, NANOARROW_TYPE_STRUCT) ) _check_nanoarrow(ArrowArrayAllocateChildren(&array, num_arrays)) - for i, array_impl in enumerate(self.arrays): + for i, schema_impl in enumerate(self.schema_impls): + array_impl = self.arrays[i] array.length = array_impl.arrow_array.length copy_arrow_array( array_impl, array_impl.arrow_array, array.children[i] ) _check_nanoarrow( ArrowSchemaDeepCopy( - array_impl.schema_impl.arrow_schema, schema.children[i] + schema_impl.arrow_schema, schema.children[i] ) ) diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index 3c512070..22a35f09 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -552,12 +552,16 @@ cdef class BaseCursorImpl: Flush all buffers and return an Oracle Data frame. """ cdef: + ArrowArrayImpl array_impl DataFrameImpl df_impl BaseVarImpl var_impl df_impl = DataFrameImpl.__new__(DataFrameImpl) + df_impl.schema_impls = [] df_impl.arrays = [] for var_impl in self.fetch_var_impls: - df_impl.arrays.append(var_impl._finish_building_arrow_array()) + array_impl = var_impl._finish_building_arrow_array() + df_impl.schema_impls.append(array_impl.schema_impl) + df_impl.arrays.append(array_impl) return PY_TYPE_DATAFRAME._from_impl(df_impl) def close(self, bint in_del=False): From 57e5b9c1ff0983ee3e1086f72e57e9862440042d Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:35:08 -0600 Subject: [PATCH 205/239] Samll documentation changes. --- NOTICE.txt | 2 +- doc/src/api_manual/deprecations.rst | 65 +++++++++++--------------- doc/src/release_notes.rst | 9 +++- doc/src/user_guide/batch_statement.rst | 8 ++++ doc/src/user_guide/initialization.rst | 14 +++--- doc/src/user_guide/installation.rst | 4 +- doc/src/user_guide/sql_execution.rst | 56 ++++++++++++++++++++-- src/oracledb/cursor.py | 3 ++ 8 files changed, 110 insertions(+), 51 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index bc88e116..4cd6fb08 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1 +1 @@ -Copyright (c) 2016, 2024, Oracle and/or its affiliates. +Copyright (c) 2016, 2025, Oracle and/or its affiliates. diff --git a/doc/src/api_manual/deprecations.rst b/doc/src/api_manual/deprecations.rst index 4125c8d5..a8f71cd9 100644 --- a/doc/src/api_manual/deprecations.rst +++ b/doc/src/api_manual/deprecations.rst @@ -15,6 +15,17 @@ python-oracledb. The relevant functionality may be removed in a future version of python-oracledb. The cx_Oracle driver itself is obsolete and should not be used for new development. +.. list-table-with-summary:: Deprecated in python-oracledb 3.4 + :header-rows: 1 + :class: wy-table-responsive + :summary: The first column, Name, displays the deprecated feature. The second column, Comments, includes information about the deprecation and the replacement to use, if applicable. + :name: _deprecations_3_4 + + * - Name + - Comments + * - The x86_64 macOS and 32-bit Windows platforms are deprecated. They will be desupported when the `cryptography `__ package desupports them, see the `cryptography deprecation announcement `__. + - Use arm64 macOS or 64-bit Windows instead. + .. list-table-with-summary:: Deprecated in python-oracledb 3.0 :header-rows: 1 :class: wy-table-responsive @@ -24,8 +35,7 @@ used for new development. * - Name - Comments * - Parameter ``pool`` of :meth:`oracledb.connect()` and :meth:`oracledb.connect_async()` - - Use :meth:`ConnectionPool.acquire()`, or make use of the - :ref:`connection pool cache ` instead + - Use :meth:`ConnectionPool.acquire()`, or make use of the :ref:`connection pool cache ` instead .. list-table-with-summary:: Desupported in python-oracledb 2.0 :header-rows: 1 @@ -36,33 +46,21 @@ used for new development. * - Name - Comments * - ``oracledb.__future__.old_json_col_as_obj`` - - VARCHAR2 and LOB columns created with the ``IS JSON`` check constraint - are now always fetched as JSON. Use an :ref:`output type handler - ` if the old behavior is required. - * - Parameters ``encoding`` and ``nencoding`` of :func:`oracledb.connect()` - and :func:`oracledb.create_pool()`, and the related attributes on the - objects created - - The driver encodings are always UTF-8. Remove uses of ``encoding`` and - ``nencoding`` from your code. - * - Parameter ``threaded`` of :func:`oracledb.connect()` and - :func:`oracledb.create_pool()` + - VARCHAR2 and LOB columns created with the ``IS JSON`` check constraint are now always fetched as JSON. Use an :ref:`output type handler ` if the old behavior is required. + * - Parameters ``encoding`` and ``nencoding`` of :func:`oracledb.connect()` and :func:`oracledb.create_pool()`, and the related attributes on the objects created + - The driver encodings are always UTF-8. Remove uses of ``encoding`` and ``nencoding`` from your code. + * - Parameter ``threaded`` of :func:`oracledb.connect()` and :func:`oracledb.create_pool()` - Threading is always used. Remove uses of ``threaded`` from your code. - * - Parameter ``waitTimeout`` of :func:`oracledb.create_pool()` and - ``oracledb.SessionPool()`` + * - Parameter ``waitTimeout`` of :func:`oracledb.create_pool()` and ``oracledb.SessionPool()`` - Replace with parameter ``wait_timeout`` - * - Parameter ``maxLifetimeSession`` of :func:`oracledb.create_pool()` and - ``oracledb.SessionPool()`` + * - Parameter ``maxLifetimeSession`` of :func:`oracledb.create_pool()` and ``oracledb.SessionPool()`` - Replace with parameter ``max_lifetime_session`` - * - Parameter ``sessionCallback`` of :func:`oracledb.create_pool()` and - ``oracledb.SessionPool()`` + * - Parameter ``sessionCallback`` of :func:`oracledb.create_pool()` and ``oracledb.SessionPool()`` - Replace with parameter ``session_callback`` - * - Parameter ``maxSessionsPerShard`` of :func:`oracledb.create_pool()` and - ``oracledb.SessionPool()`` + * - Parameter ``maxSessionsPerShard`` of :func:`oracledb.create_pool()` and ``oracledb.SessionPool()`` - Replace with parameter ``max_sessions_per_shard`` - * - Attribute ``maxBytesPerCharacter`` of the :ref:`Connection object - ` - - The driver encodings are always UTF-8 so this attribute can be replaced by - the constant value 4 + * - Attribute ``maxBytesPerCharacter`` of the :ref:`Connection object ` + - The driver encodings are always UTF-8 so this attribute can be replaced by the constant value 4 * - ``Connection.tnsentry`` - Replace with :attr:`Connection.dsn` * - ``SessionPool.tnsentry`` @@ -76,16 +74,11 @@ used for new development. * - Name - Comments - * - Calling :meth:`Variable.setvalue()` with a string value when the - variable type is one of :data:`oracledb.DB_TYPE_BLOB`, + * - Calling :meth:`Variable.setvalue()` with a string value when the variable type is one of :data:`oracledb.DB_TYPE_BLOB`, :data:`oracledb.DB_TYPE_CLOB` or :data:`oracledb.DB_TYPE_NCLOB`. - - Call :meth:`Connection.createlob()` with the value instead and pass the - result to :meth:`Variable.setvalue()`. - * - Setting an attribute of type :data:`oracledb.DB_TYPE_BLOB`, - :data:`oracledb.DB_TYPE_CLOB` or :data:`oracledb.DB_TYPE_NCLOB` on a - database object to a string value. - - Call :meth:`Connection.createlob()` with the value instead and set the - attribute with the result. + - Call :meth:`Connection.createlob()` with the value instead and pass the result to :meth:`Variable.setvalue()`. + * - Setting an attribute of type :data:`oracledb.DB_TYPE_BLOB`, :data:`oracledb.DB_TYPE_CLOB` or :data:`oracledb.DB_TYPE_NCLOB` on a database object to a string value. + - Call :meth:`Connection.createlob()` with the value instead and set the attribute with the result. .. list-table-with-summary:: Deprecated in python-oracledb 1.4 :header-rows: 1 @@ -95,10 +88,8 @@ used for new development. * - Name - Comments - * - Output type handler with arguments - ``handler(cursor, name, default_type, length, precision, scale)`` - - Replace with ``handler(cursor, metadata)``. See - :ref:`outputtypehandlers`. + * - Output type handler with arguments ``handler(cursor, name, default_type, length, precision, scale)`` + - Replace with ``handler(cursor, metadata)``. See :ref:`outputtypehandlers`. .. list-table-with-summary:: Deprecated in python-oracledb 1.0 :header-rows: 1 diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index e578f2a7..8c8a38ff 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -60,6 +60,12 @@ Common Changes support and :ref:`Cloud Native Authentication ` support (`issue 512 `__). +#) The x86_64 macOS and 32-bit Windows platforms are :ref:`deprecated + `. They will be desupported when the `cryptography + `__ package desupports them, see + the `cryptography deprecation announcement `__. #) Pin Cython to 3.1.x instead of 3.1.0 as requested (`issue 530 `__). #) Fixed bug when attempting to execute an empty statement @@ -69,7 +75,8 @@ Common Changes #) Fixed bug when attempting to append an element to a :ref:`DbObject ` which is not actually a collection. #) API documentation is now generated from the source code. -#) Internal change: typing_extensions is now a dependency. +#) Internal change: `typing_extensions `__ is now a dependency. oracledb `3.3.0 `__ (July 2025) diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 97977583..76d5038e 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -494,6 +494,14 @@ be beneficial. See `samples/load_csv.py `__ for a runnable example. +You should also review whether Oracle's specialized data loading tools and +features suit your environment. These can be faster than using Python. See +`SQL*Loader `__ and `External Tables +`__. + + Creating CSV Files from Oracle Database --------------------------------------- diff --git a/doc/src/user_guide/initialization.rst b/doc/src/user_guide/initialization.rst index 975c6ca2..88d757e2 100644 --- a/doc/src/user_guide/initialization.rst +++ b/doc/src/user_guide/initialization.rst @@ -106,13 +106,13 @@ More details and options are shown in the later sections: use Thin mode. The features supported by Thin mode can be found in :ref:`featuresummary`. -- On any operating system, if you set ``lib_dir`` to the library directory of a - full database or full client installation (such as from running - ``runInstaller``), you will need to have previously set the Oracle environment, - for example by setting the ``ORACLE_HOME`` environment variable. Otherwise you - will get errors like ``ORA-1804``. You should set this variable, and other - Oracle environment variables, before starting Python, as shown in :ref:`Oracle - Environment Variables `. +- On any operating system, if you set the ``lib_dir`` parameter to the library + directory of a full database or full client installation (such as from + running ``runInstaller``), you will need to have previously set the Oracle + environment, for example by setting the ``ORACLE_HOME`` environment + variable. Otherwise you will get errors like ``ORA-1804``. You should set + this variable, and other Oracle environment variables, before starting + Python, as shown in :ref:`Oracle Environment Variables `. - The :meth:`~oracledb.init_oracle_client()` function may be called multiple times in your application but must always pass the same arguments. diff --git a/doc/src/user_guide/installation.rst b/doc/src/user_guide/installation.rst index 676b1114..248e9a5d 100644 --- a/doc/src/user_guide/installation.rst +++ b/doc/src/user_guide/installation.rst @@ -526,8 +526,8 @@ architecture. Note Oracle Database 23ai 32-bit clients are not available on any platform, however you can use older 32-bit clients to connect to Oracle Database 23ai. -1. Set required Oracle environment variables by running the Oracle environment - script. For example: +1. Set required Oracle environment variables, such as ``ORACLE_HOME``, by + running the Oracle environment script. For example: .. code-block:: shell diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index d9da1013..3d8c70b9 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -494,11 +494,60 @@ Changing Query Results with Rowfactories Python-oracledb "rowfactories" are methods called for each row retrieved from the database. The :meth:`Cursor.rowfactory` method is called with the tuple fetched from the database before it is returned to the application. The method -can convert the tuple to a different value. +can convert the tuple to a different value or representation. + +A rowfactory should be set on a cursor after a call to :meth:`Cursor.execute()` +before fetching data from the cursor. Calling :meth:`~Cursor.execute()` again +will clear any previous rowfactory. + +**Fetching Rows using a Data Class** + +Python `Data Classes `__ +provide a simple way to encapsulate data. An example of using them with a query +rowfactory is: + +.. code-block:: python + + import dataclasses + import datetime + + . . . + + @dataclasses.dataclass + class MyRow: + employee_id: int + last_name: str + hire_date: datetime.datetime + + cursor.execute( + """select employee_id, last_name, hire_date + from employees + where employee_id < 105 + order by employee_id""") + + cursor.rowfactory = MyRow + + for row in cursor: + print("Number:", row.employee_id) + print("Name:", row.last_name) + print("Hire Date:", row.hire_date) + +The output is:: + + Number: 100 + Name: King + Hire Date: 2003-06-17 00:00:00 + Number: 101 + Name: Kochhar + Hire Date: 2005-09-21 00:00:00 + Number: 102 + Name: De Haan + Hire Date: 2001-01-13 00:00:00 **Fetching Rows as Dictionaries** -For example, to fetch each row of a query as a dictionary: +To fetch each row of a query as a dictionary, you can use +:meth:`Cursor.rowfactory` like: .. code-block:: python @@ -515,7 +564,8 @@ The output is:: 'POSTAL_CODE': '00989', 'CITY': 'Roma', 'STATE_PROVINCE': None, 'COUNTRY_ID': 'IT'} -Also see how ``JSON_OBJECT`` is used in :ref:`jsondatatype`. +Also see how ``JSON_OBJECT`` is used in :ref:`jsondatatype`, since querying +directly as JSON may be preferable. If you join tables where the same column name occurs in both tables with different meanings or values, then use a column alias in the query. Otherwise, diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index f1d9aa9d..0cb53479 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -455,6 +455,9 @@ def rowfactory(self) -> Callable: each row but if this attribute is set, the method is called with the tuple that would normally be returned, and the result of the method is returned instead. + + The ``rowfactory`` attribute should be set after each statement + execution before data is fetched from the cursor. """ self._verify_open() return self._impl.rowfactory From 168c39d3113e7b35805d0d0622af8a91edfa7b1b Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:41:35 -0600 Subject: [PATCH 206/239] Migrate to using pytest as test runner. --- doc/src/release_notes.rst | 1 + pyproject.toml | 34 +- tests/README.md | 43 +- tests/conftest.py | 1024 ++++++ tests/create_schema.py | 42 +- tests/drop_schema.py | 19 +- tests/ext/README.md | 2 +- tests/ext/{test_env.py => conftest.py} | 77 +- tests/ext/test_ext_1000_pool_shrink.py | 178 +- tests/ext/test_ext_1100_external_auth.py | 325 +- tests/ext/test_ext_1200_statement_cache.py | 141 +- tests/ext/test_ext_1300_verifier_type.py | 109 +- tests/ext/test_ext_1400_inband_notify.py | 106 +- tests/ext/test_ext_1500_pool_grow.py | 36 +- tests/ext/test_ext_1600_warnings.py | 124 +- tests/ext/test_ext_1700_warnings_async.py | 144 +- tests/ext/test_ext_1800_inband_notif_async.py | 121 +- tests/ext/test_ext_1900_pool_shrink_async.py | 179 +- tests/ext/test_ext_2000_pool_grow_async.py | 50 +- tests/ext/test_ext_2100_bfile_type.py | 279 +- tests/ext/test_ext_2200_bfile_type_async.py | 288 +- tests/ext/test_ext_2300_tg.py | 191 +- tests/ext/test_ext_2400_tg_async.py | 224 +- tests/ext/test_ext_2500_config_cache.py | 230 +- .../test_ext_2600_sessionless_transaction.py | 114 +- ..._ext_2700_sessionless_transaction_async.py | 94 +- tests/test_1000_module.py | 436 ++- tests/test_1100_connection.py | 1666 +++++---- tests/test_1300_cursor_var.py | 911 +++-- tests/test_1400_datetime_var.py | 549 +-- tests/test_1500_types.py | 387 +- tests/test_1600_dml_returning.py | 1154 +++--- tests/test_1700_error.py | 363 +- tests/test_1800_interval_var.py | 430 +-- tests/test_1900_lob_var.py | 1216 ++++--- tests/test_2000_long_var.py | 193 +- tests/test_2100_nchar_var.py | 584 +-- tests/test_2200_number_var.py | 1097 +++--- tests/test_2300_object_var.py | 1906 +++++----- tests/test_2400_pool.py | 1819 +++++----- tests/test_2500_string_var.py | 1062 +++--- tests/test_2600_timestamp_var.py | 408 +-- tests/test_2700_aq_dbobject.py | 1163 +++--- tests/test_2800_aq_bulk.py | 322 +- tests/test_2900_rowid.py | 375 +- tests/test_3000_subscription.py | 650 ++-- tests/test_3100_boolean_var.py | 223 +- tests/test_3200_features_12_1.py | 1352 +++---- tests/test_3300_soda_database.py | 427 +-- tests/test_3400_soda_collection.py | 1964 +++++----- tests/test_3500_json.py | 599 +-- tests/test_3600_outputtypehandler.py | 1419 ++++---- tests/test_3700_var.py | 1121 +++--- tests/test_3800_typehandler.py | 475 ++- tests/test_3900_cursor_execute.py | 1053 +++--- tests/test_4000_cursor_executemany.py | 813 ++--- tests/test_4100_cursor_callproc.py | 806 ++--- tests/test_4200_cursor_scrollable.py | 388 +- tests/test_4300_cursor_other.py | 1956 +++++----- tests/test_4400_tpc.py | 635 ++-- tests/test_4500_connect_params.py | 3211 +++++++++-------- tests/test_4600_type_changes.py | 648 ++-- tests/test_4700_pool_params.py | 272 +- tests/test_4800_timestamp_ltz_var.py | 432 +-- tests/test_4900_timestamp_tz_var.py | 412 +-- tests/test_5000_externalauth.py | 524 +-- tests/test_5100_arrayvar.py | 133 +- tests/test_5200_sql_parser.py | 425 +-- tests/test_5300_connection_async.py | 1312 ++++--- tests/test_5400_cursor_execute_async.py | 1107 +++--- tests/test_5500_pool_async.py | 1169 +++--- tests/test_5600_dbobject_async.py | 1244 +++---- tests/test_5700_lob_var_async.py | 960 ++--- tests/test_5800_cursor_var_async.py | 727 ++-- tests/test_5900_dml_returning_async.py | 983 ++--- tests/test_6000_typehandler_async.py | 421 ++- tests/test_6100_cursor_executemany_async.py | 715 ++-- tests/test_6200_cursor_callproc_async.py | 209 +- tests/test_6300_cursor_other_async.py | 1792 ++++----- tests/test_6400_vector_var.py | 1402 +++---- tests/test_6500_vector_interop.py | 430 ++- tests/test_6600_defaults.py | 385 +- tests/test_6700_json_23.py | 310 +- tests/test_6800_error_async.py | 379 +- tests/test_6900_oson.py | 167 +- ..._7000_connection_async_shortcut_methods.py | 659 ++-- tests/test_7100_interval_ym_var.py | 353 +- tests/test_7200_tnsnames.py | 1006 +++--- tests/test_7300_unsupported_features_thin.py | 69 +- tests/test_7400_tpc_async.py | 641 ++-- tests/test_7500_binary_vector.py | 134 +- tests/test_7600_pipelining_async.py | 1859 +++++----- tests/test_7700_sparse_vector.py | 1445 ++++---- tests/test_7800_aq_raw.py | 913 ++--- tests/test_7900_aq_raw_async.py | 784 ++-- tests/test_8000_dataframe.py | 3193 ++++++++-------- tests/test_8100_dataframe_async.py | 2762 +++++++------- tests/test_8200_aq_bulk_async.py | 231 +- tests/test_8300_aq_json.py | 745 ++-- tests/test_8400_aq_dbobject_async.py | 813 ++--- tests/test_8500_aq_json_async.py | 726 ++-- tests/test_8600_cursor_scrollable_async.py | 415 +-- tests/test_8700_sessionless_transaction.py | 1128 +++--- ...test_8800_sessionless_transaction_async.py | 1149 +++--- tests/test_8900_dataframe_ingestion.py | 1693 +++++---- tests/test_9000_dataframe_ingestion_async.py | 1704 +++++---- tests/test_9100_dataframe_vector.py | 613 ++-- tests/test_9200_dataframe_vector_async.py | 624 ++-- tests/test_env.py | 896 ----- tox.ini | 7 +- 110 files changed, 39494 insertions(+), 38934 deletions(-) create mode 100644 tests/conftest.py rename tests/ext/{test_env.py => conftest.py} (62%) delete mode 100644 tests/test_env.py diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 8c8a38ff..3abde50f 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -75,6 +75,7 @@ Common Changes #) Fixed bug when attempting to append an element to a :ref:`DbObject ` which is not actually a collection. #) API documentation is now generated from the source code. +#) The test suite now uses `pytest `__. #) Internal change: `typing_extensions `__ is now a dependency. diff --git a/pyproject.toml b/pyproject.toml index 75714532..c630b8e1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -53,6 +53,23 @@ Documentation = "http://python-oracledb.readthedocs.io" Issues = "https://github.com/oracle/python-oracledb/issues" Source = "https://github.com/oracle/python-oracledb" +[project.optional-dependencies] +test = [ + "anyio", + "numpy", + "pandas", + "pyarrow", + "pytest", +] +oci_config = ["oci"] +oci_auth = ["oci"] +azure_config = [ + "azure-appconfiguration", + "azure-identity", + "azure-keyvault-secrets" +] +azure_auth = ["msal"] + [tool.setuptools] zip-safe = false packages = [ @@ -77,17 +94,8 @@ line-length = 79 target-version = "py39" exclude = ["templates"] -[project.optional-dependencies] -test = [ - "numpy", - "pandas", - "pyarrow", -] -oci_config = ["oci"] -oci_auth = ["oci"] -azure_config = [ - "azure-appconfiguration", - "azure-identity", - "azure-keyvault-secrets" +[tool.pytest.ini_options] +minversion = "8.3.0" +testpaths = [ + "tests" ] -azure_auth = ["msal"] diff --git a/tests/README.md b/tests/README.md index 68a38677..b3a1d831 100644 --- a/tests/README.md +++ b/tests/README.md @@ -1,19 +1,19 @@ This directory contains the test suite for python-oracledb. -1. The schemas and SQL objects that are referenced in the test suite can be - created by running the Python script [create_schema.py][1]. The script - requires administrative privileges and will prompt for these credentials as - well as the names of the schemas that will be created, unless a number of - environment variables are set as documented in the Python script - [test_env.py][2]. Run the script using the following command: +1. Install pytest and tox: + + python -m pip install pytest tox --upgrade - python create_schema.py +2. Set the required database credential and related environment variables + documented in [conftest.py][2]. -2. Install tox: +3. The schemas and SQL objects that are referenced in the test suite can be + created by running the Python script [create_schema.py][1]. The script + requires administrative privileges to complete successfully: - python -m pip install tox --upgrade + python -m pytest tests/create_schema.py -3. Run the test suite by issuing the following command in the top-level +4. Run the test suite by issuing the following command in the top-level directory of your oracledb installation: python -m tox @@ -23,22 +23,26 @@ This directory contains the test suite for python-oracledb. Alternatively, you can use the currently installed build of oracledb and run the following command instead: - python -m unittest discover -v -s tests + python -m pytest You may also run each of the test scripts independently, as in: - python test_1000_module.py + python -m pytest tests/test_1000_module.py -4. After running the test suite, the schemas can be dropped by running the + The tests run in thin mode by default. If you wish to run the tests in + thick mode, use the following command: + + python -m pytest --use-thick-mode + +5. After running the test suite, the schemas can be dropped by running the Python script [drop_schema.py][3]. The script requires administrative - privileges and will prompt for these credentials as well as the names of - the schemas that will be dropped, unless a number of environment variables - are set as documented in the Python script [test_env.py][2]. Run the + privileges to complete successfully. A set of environment variables should + be set as documented in the Python script [conftest.py][2]. Run the script using the following command: - python drop_schema.py + python -m pytest tests/drop_schema.py -5. Enable tests that require extra configuration +6. Enable tests that require extra configuration The following test(s) are automatically skipped if their required environment variable(s) and setup is not available. @@ -64,7 +68,8 @@ This directory contains the test suite for python-oracledb. alter user grant connect through ; + [1]: https://github.com/oracle/python-oracledb/blob/main/tests/create_schema.py -[2]: https://github.com/oracle/python-oracledb/blob/main/tests/test_env.py +[2]: https://github.com/oracle/python-oracledb/blob/main/tests/conftest.py [3]: https://github.com/oracle/python-oracledb/blob/main/tests/drop_schema.py [4]: https://python-oracledb.readthedocs.io/en/latest/user_guide/connection_handling.html#connecting-using-external-authentication diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..52ef17b0 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,1024 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Sets the environment used by the python-oracledb test suite. Production +# applications should consider using External Authentication to avoid hard +# coded credentials. +# +# You can set values in environment variables to bypass having the test suite +# request the information it requires. +# +# PYO_TEST_MAIN_USER: user used for most test cases +# PYO_TEST_MAIN_PASSWORD: password of user used for most test cases +# PYO_TEST_PROXY_USER: user for testing proxying +# PYO_TEST_PROXY_PASSWORD: password of user for testing proxying +# PYO_TEST_CONNECT_STRING: connect string for test suite +# PYO_TEST_ADMIN_USER: administrative user for test suite +# PYO_TEST_ADMIN_PASSWORD: administrative password for test suite +# PYO_TEST_WALLET_LOCATION: location of wallet file (thin mode, mTLS) +# PYO_TEST_WALLET_PASSWORD: password for wallet file (thin mode, mTLS) +# PYO_TEST_EXTERNAL_USER: user for testing external authentication +# PYO_TEST_EDITION_NAME: name of edition for editioning tests +# PYO_TEST_PLUGINS: list of plugins to import before running tests +# PYO_TEST_ORACLE_CLIENT_PATH: Oracle Client or Instant Client library dir +# +# PYO_TEST_CONNECT_STRING can be set to an Easy Connect string, or a +# Net Service Name from a tnsnames.ora file or external naming service, +# or it can be the name of a local Oracle database instance. +# +# On Windows set PYO_TEST_ORACLE_CLIENT_PATH if Oracle libraries are not in +# PATH. On macOS set the variable to the Instant Client directory. On Linux do +# not set the variable; instead set LD_LIBRARY_PATH or configure ldconfig +# before running Python. +# +# If oracledb is using Instant Client, then an Easy Connect string is generally +# appropriate. The syntax is: +# +# [//]host_name[:port][/service_name][:server_type][/instance_name] +# +# Commonly just the host_name and service_name are needed +# e.g. "localhost/orclpdb1" or "localhost/XEPDB1" +# +# If using a tnsnames.ora file, the file can be in a default +# location such as $ORACLE_HOME/network/admin/tnsnames.ora or +# /etc/tnsnames.ora. Alternatively set the TNS_ADMIN environment +# variable and put the file in $TNS_ADMIN/tnsnames.ora. +# +# The administrative user for cloud databases is ADMIN and the administrative +# user for on premises databases is SYSTEM. +# ----------------------------------------------------------------------------- + +import importlib +import os +import platform +import secrets +import string + +import numpy +import oracledb +import pandas +import pytest + + +class DefaultsContextManager: + def __init__(self, attribute, desired_value): + self.attribute = attribute + self.desired_value = desired_value + + def __enter__(self): + self.original_value = getattr(oracledb.defaults, self.attribute) + setattr(oracledb.defaults, self.attribute, self.desired_value) + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + setattr(oracledb.defaults, self.attribute, self.original_value) + + +class FullCodeErrorContextManager: + + def __init__(self, full_codes): + self.full_codes = full_codes + if len(full_codes) == 1: + self.message_fragment = f'Error "{full_codes[0]}"' + else: + message_fragment = ", ".join(f'"{s}"' for s in full_codes[:-1]) + message_fragment += f' or "{full_codes[-1]}"' + self.message_fragment = f"One of the errors {message_fragment}" + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, tb): + if exc_type is None: + raise AssertionError(f"{self.message_fragment} was not raised.") + if not issubclass(exc_type, oracledb.Error): + return False + if issubclass(exc_type, oracledb.Error): + self.error_obj = exc_value.args[0] + if self.error_obj.full_code not in self.full_codes: + message = ( + f"{self.message_fragment} should have been raised but " + f'"{self.error_obj.full_code}" was raised instead.' + ) + raise AssertionError(message) + return True + + +class SystemStatInfo: + get_sid_sql = "select sys_context('userenv', 'sid') from dual" + get_stat_sql = """ + select ss.value + from v$sesstat ss, v$statname sn + where ss.sid = :sid + and ss.statistic# = sn.statistic# + and sn.name = :stat_name + """ + stat_name = None + + def _initialize(self, conn, admin_conn): + self.prev_value = 0 + self.admin_conn = admin_conn + with conn.cursor() as cursor: + cursor.execute(self.get_sid_sql) + (self.sid,) = cursor.fetchone() + self.get_value() + + async def _initialize_async(self, conn, admin_conn): + self.prev_value = 0 + self.admin_conn = admin_conn + with conn.cursor() as cursor: + await cursor.execute(self.get_sid_sql) + (self.sid,) = await cursor.fetchone() + await self.get_value_async() + + def get_value(self): + with self.admin_conn.cursor() as cursor: + cursor.execute( + self.get_stat_sql, sid=self.sid, stat_name=self.stat_name + ) + (current_value,) = cursor.fetchone() + diff_value = current_value - self.prev_value + self.prev_value = current_value + return diff_value + + async def get_value_async(self): + with self.admin_conn.cursor() as cursor: + await cursor.execute( + self.get_stat_sql, sid=self.sid, stat_name=self.stat_name + ) + (current_value,) = await cursor.fetchone() + diff_value = current_value - self.prev_value + self.prev_value = current_value + return diff_value + + +class RoundTripInfo(SystemStatInfo): + stat_name = "SQL*Net roundtrips to/from client" + + +class ParseCountInfo(SystemStatInfo): + stat_name = "parse count (total)" + + +class TestEnv: + + def _convert_df_value(self, df_val): + """ + This method converts a dataframe cell value to use with assertions + For e.g. NaN and np.array cannot be compared directly. Values are + converted according to the following rules: + - NaN -> None + - np.array -> np.array.tolist() (Python list) + """ + if isinstance(df_val, numpy.ndarray): + return df_val.tolist() + elif pandas.isna(df_val): + return None + elif isinstance(df_val, dict): + return {k: self._convert_df_value(v) for k, v in df_val.items()} + else: + return df_val + + def _get_charset(self, conn): + """ + Determines the character set in use by the database. + """ + with conn.cursor() as cursor: + cursor.execute( + """ + select value + from nls_database_parameters + where parameter = 'NLS_CHARACTERSET' + """ + ) + (value,) = cursor.fetchone() + return value + + def _get_charset_ratios(self, conn): + """ + Calculates the character set ratios used by the database. + """ + cursor = conn.cursor() + cursor.execute( + """ + select + cast('X' as varchar2(1)), + cast('Y' as nvarchar2(1)) + from dual + """ + ) + varchar_info, nvarchar_info = cursor.description + return (varchar_info.internal_size, nvarchar_info.internal_size) + + def _initialize(self): + """ + Initializes the remaining items after establishing a connection to the + database. This is done so that alternative initialization of some + process wide variables in thick mode can take place independently for + some tests. + """ + + # if already initialized, nothing to do! + if self.initialized: + return + + # setup thick mode, if needed + if self.use_thick_mode: + if oracledb.is_thin_mode(): + oracledb.init_oracle_client(lib_dir=self.oracle_client_path) + oracledb.defaults.thick_mode_dsn_passthrough = False + self.client_version = oracledb.clientversion()[:2] + + # import any requested plugins + if self.plugins is not None: + for name in self.plugins.split(","): + module_name = f"oracledb.plugins.{name}" + importlib.import_module(module_name) + + # establish a connection to determine the remaining information + params = self.get_connect_params() + conn = oracledb.connect(dsn=self.connect_string, params=params) + version_parts = conn.version.split(".")[:2] + self.server_version = tuple(int(s) for s in version_parts) + self.is_drcp = self._is_drcp() + self.is_implicit_pooling = self._is_implicit_pooling() + self.is_on_oracle_cloud = self._is_on_oracle_cloud(conn) + self.charset = self._get_charset(conn) + self.charset_ratios = self._get_charset_ratios(conn) + self.sleep_proc_name = ( + "dbms_session.sleep" + if self.server_version >= (18, 0) + else "dbms_lock.sleep" + ) + + # mark environment as fully initialized + self.initialized = True + + def _is_drcp(self): + """ + Calculates whether or not DRCP is being used. + """ + params = oracledb.ConnectParams() + params.parse_connect_string(self.connect_string) + server_type = params.server_type + return ( + server_type == "pooled" + or isinstance(server_type, list) + and "pooled" in server_type + ) + + def _is_implicit_pooling(self): + """ + Calculates whether implicit pooling is being used. + """ + if not self.is_drcp: + return False + params = oracledb.ConnectParams() + params.parse_connect_string(self.connect_string) + pool_boundary = params.pool_boundary + return ( + pool_boundary is not None + or isinstance(pool_boundary, list) + and [s for s in pool_boundary if s] + ) + + def _is_on_oracle_cloud(self, conn): + """ + Calculates whether the database is running on Oracle Cloud. + """ + if self.server_version < (18, 0): + return False + cursor = conn.cursor() + cursor.execute( + """ + select sys_context('userenv', 'cloud_service') + from dual + """ + ) + (service_name,) = cursor.fetchone() + return service_name is not None + + def assert_raises_full_code(self, *full_codes): + """ + Verifies that the block of code raises an exception with the specified + full codes. + """ + return FullCodeErrorContextManager(full_codes) + + def create_schema(self, conn): + """ + Creates the database objects used by the python-oracledb test suite. + """ + self.drop_schema(conn) + self.run_sql_script( + conn, + "create_schema", + main_user=self.main_user, + main_password=self.main_password, + proxy_user=self.proxy_user, + proxy_password=self.proxy_password, + edition_name=self.edition_name, + ) + if self.has_server_version(21): + self.run_sql_script( + conn, "create_schema_21", main_user=self.main_user + ) + if self.has_server_version(23, 4): + self.run_sql_script( + conn, "create_schema_23_4", main_user=self.main_user + ) + if self.has_server_version(23, 5): + self.run_sql_script( + conn, "create_schema_23_5", main_user=self.main_user + ) + if self.has_server_version(23, 7): + self.run_sql_script( + conn, "create_schema_23_7", main_user=self.main_user + ) + if self.is_on_oracle_cloud: + self.run_sql_script( + conn, "create_schema_cloud", main_user=self.main_user + ) + + def drop_schema(self, conn): + """ + Drops the database objects used by the python-oracledb test suite. + """ + self.run_sql_script( + conn, + "drop_schema", + main_user=self.main_user, + proxy_user=self.proxy_user, + edition_name=self.edition_name, + ) + + def defaults_context_manager(self, attribute, desired_value): + """ + Returns a defaults context manager which sets the specified attribute + to the desired value and restores it once the block completes. + """ + return DefaultsContextManager(attribute, desired_value) + + def get_admin_connection(self, use_async=False): + """ + Returns an administrative connection to the database. + """ + if not self.admin_user or not self.admin_password: + pytest.skip("missing administrative credentials") + params = self.get_connect_params() + if self.admin_user.upper() == "SYS": + params = params.copy() + params.set(mode=oracledb.AUTH_MODE_SYSDBA) + method = oracledb.connect_async if use_async else oracledb.connect + return method( + dsn=self.connect_string, + params=params, + user=self.admin_user, + password=self.admin_password, + ) + + def get_admin_connection_async(self): + """ + Returns an administrative connection to the database. + """ + return self.get_admin_connection(use_async=True) + + def get_and_clear_queue( + self, + conn, + queue_name, + payload_type=None, + message="not supported with this client/server combination", + ): + if payload_type == "JSON": + if not self.has_client_and_server_version(21): + pytest.skip(message) + elif isinstance(payload_type, str): + payload_type = conn.gettype(payload_type) + queue = conn.queue(queue_name, payload_type) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + while queue.deqone(): + pass + return conn.queue(queue_name, payload_type) + + async def get_and_clear_queue_async( + self, + conn, + queue_name, + payload_type=None, + message="not supported with this client/server combination", + ): + if payload_type == "JSON": + if not self.has_client_and_server_version(21): + pytest.skip(message) + elif isinstance(payload_type, str): + payload_type = await conn.gettype(payload_type) + queue = conn.queue(queue_name, payload_type) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + while await queue.deqone(): + pass + return conn.queue(queue_name, payload_type) + + def get_connect_params(self): + """ + Returns an instance of ConnectParams used to manage connection + parameters. + """ + return oracledb.ConnectParams( + user=self.main_user, + password=self.main_password, + config_dir=self.wallet_location, + wallet_location=self.wallet_location, + wallet_password=self.wallet_password, + disable_oob=True, + ) + + def get_connection(self, dsn=None, use_async=False, **kwargs): + """ + Returns a connection to the database. + """ + self._initialize() + if dsn is None: + dsn = self.connect_string + method = oracledb.connect_async if use_async else oracledb.connect + return method(dsn=dsn, params=self.get_connect_params(), **kwargs) + + def get_connection_async(self, dsn=None, **kwargs): + """ + Returns a connection to the database using asyncio. + """ + return self.get_connection(dsn, use_async=True, **kwargs) + + def get_data_from_df(self, df): + """ + Returns data from the data frame in a normalized fashion suitable for + comparison. In particular, NaN values cannot be compared to one another + so they are converted to the value None for comparison purposes. + """ + return [ + tuple(self._convert_df_value(v) for v in row) + for row in df.itertuples(index=False, name=None) + ] + + def get_db_object_as_plain_object(self, obj): + """ + Returns a database object as a plain object to make assertions simpler. + """ + if obj.type.iscollection: + element_values = [] + for value in obj.aslist(): + if isinstance(value, oracledb.DbObject): + value = self.get_db_object_as_plain_object(value) + elif isinstance(value, oracledb.LOB): + value = value.read() + element_values.append(value) + return element_values + attr_values = [] + for attribute in obj.type.attributes: + value = getattr(obj, attribute.name) + if isinstance(value, oracledb.DbObject): + value = self.get_db_object_as_plain_object(value) + elif isinstance(value, oracledb.LOB): + value = value.read() + attr_values.append(value) + return tuple(attr_values) + + async def get_db_object_as_plain_object_async(self, obj): + """ + Returns a database object as a plain object to make assertions simpler. + """ + if obj.type.iscollection: + element_values = [] + for value in obj.aslist(): + if isinstance(value, oracledb.DbObject): + value = await self.get_db_object_as_plain_object_async( + value + ) + elif isinstance(value, oracledb.AsyncLOB): + value = await value.read() + element_values.append(value) + return element_values + attr_values = [] + for attribute in obj.type.attributes: + value = getattr(obj, attribute.name) + if isinstance(value, oracledb.DbObject): + value = await self.get_db_object_as_plain_object_async(value) + elif isinstance(value, oracledb.AsyncLOB): + value = await value.read() + attr_values.append(value) + return tuple(attr_values) + + def get_pool(self, use_async=False, **kwargs): + """ + Returns a connection pool for the database. + """ + self._initialize() + method = ( + oracledb.create_pool_async if use_async else oracledb.create_pool + ) + return method( + dsn=self.connect_string, params=self.get_pool_params(), **kwargs + ) + + def get_pool_async(self, **kwargs): + """ + Returns a connection pool for the database using asyncio. + """ + return self.get_pool(use_async=True, **kwargs) + + def get_pool_params(self): + """ + Returns an instance of PoolParams used to manage connection pool + parameters. + """ + return oracledb.PoolParams( + user=self.main_user, + password=self.main_password, + config_dir=self.wallet_location, + wallet_location=self.wallet_location, + wallet_password=self.wallet_password, + disable_oob=True, + ) + + def get_random_string(self, length=10): + """ + Return a random string of the specified length. + """ + return "".join( + secrets.choice(string.ascii_letters) for i in range(length) + ) + + def get_sid_serial(self, conn): + """ + Returns the sid and serial number of the connection as a 2-tuple. + """ + if not self.use_thick_mode: + return (conn.session_id, conn.serial_num) + with conn.cursor() as cursor: + cursor.execute( + """ + select + dbms_debug_jdwp.current_session_id, + dbms_debug_jdwp.current_session_serial + from dual + """ + ) + return cursor.fetchone() + + def has_client_and_server_version(self, major_version, minor_version=0): + """ + Returns a boolean indicating if the test environment is using a client + version and a database with the specified version or later. + """ + if not self.has_client_version(major_version, minor_version): + return False + if not self.has_server_version(major_version, minor_version): + return False + return True + + def has_client_version(self, major_version, minor_version=0): + """ + Returns a boolean indicating if the test environment is using a client + version with the specified version or later. + """ + self._initialize() + if oracledb.is_thin_mode(): + return True + return self.client_version >= (major_version, minor_version) + + def has_server_version(self, major_version, minor_version=0): + """ + Returns a boolean indicating if the test environment is using a server + version with the specified version or later. + """ + self._initialize() + return self.server_version >= (major_version, minor_version) + + def run_sql_script(self, conn, script_name, **kwargs): + """ + Runs the specified script with the specified replacement values. + """ + statement_parts = [] + cursor = conn.cursor() + replace_values = [("&" + k + ".", v) for k, v in kwargs.items()] + [ + ("&" + k, v) for k, v in kwargs.items() + ] + script_dir = os.path.dirname(__file__) + file_name = os.path.join(script_dir, "sql", script_name + ".sql") + for line in open(file_name): + if line.strip() == "/": + statement = "".join(statement_parts).strip() + if statement: + for search_value, replace_value in replace_values: + statement = statement.replace( + search_value, replace_value + ) + try: + cursor.execute(statement) + except: + print("Failed to execute SQL:", statement) + raise + statement_parts = [] + else: + statement_parts.append(line) + cursor.execute( + """ + select name, type, line, position, text + from dba_errors + where owner = upper(:owner) + order by name, type, line, position + """, + owner=self.main_user, + ) + prev_name = prev_obj_type = None + for name, obj_type, line_num, position, text in cursor: + if name != prev_name or obj_type != prev_obj_type: + print("%s (%s)" % (name, obj_type)) + prev_name = name + prev_obj_type = obj_type + print(" %s/%s %s" % (line_num, position, text)) + + +@pytest.fixture +def admin_conn(test_env): + """ + Return an administrative connection to the database using the pytest + configuration. + """ + with test_env.get_admin_connection() as conn: + yield conn + + +@pytest.fixture +def anyio_backend(): + """ + Only asyncio is being tested currently. + """ + return "asyncio" + + +@pytest.fixture +async def async_admin_conn(test_env): + """ + Return an administrative connection to the database using the pytest + configuration with asyncio. + """ + async with test_env.get_admin_connection_async() as conn: + yield conn + + +@pytest.fixture +async def async_conn(test_env): + """ + Return a connection to the database using the pytest configuration with + asyncio + """ + async with test_env.get_connection_async() as conn: + with conn.cursor() as cursor: + await cursor.execute("alter session set time_zone = '+00:00'") + yield conn + + +@pytest.fixture +async def async_cursor(async_conn): + """ + Return a connection to the database using the pytest configuration using + asyncio. + """ + with async_conn.cursor() as cursor: + yield cursor + + +@pytest.fixture +def conn(test_env): + """ + Return a connection to the database using the pytest configuration. + """ + with test_env.get_connection() as conn: + with conn.cursor() as cursor: + cursor.execute("alter session set time_zone = '+00:00'") + yield conn + + +@pytest.fixture +def cursor(conn): + """ + Return a connection to the database using the pytest configuration. + """ + with conn.cursor() as cursor: + yield cursor + + +@pytest.fixture +def disable_fetch_lobs(): + """ + Disables fetching of LOB locators for the duration of the test. + """ + orig_value = oracledb.defaults.fetch_lobs + oracledb.defaults.fetch_lobs = False + yield + oracledb.defaults.fetch_lobs = orig_value + + +def get_env_value(name, default_value=None, required=False): + """ + Returns the value of the environment variable if it is present and the + default value if it is not. If marked as required, the test suite will + immediately fail. + """ + env_name = f"PYO_TEST_{name}" + value = os.environ.get(env_name) + if value is None: + if required: + msg = f"missing value for environment variable {env_name}" + pytest.exit(msg, 1) + return default_value + return value + + +@pytest.fixture +def parse_count_checker(conn, admin_conn, test_env): + """ + Return an object used for checking the round trips on a connection. + """ + if test_env.is_implicit_pooling: + pytest.skip("sessions can change with implicit pooling") + checker = ParseCountInfo() + checker._initialize(conn, admin_conn) + return checker + + +@pytest.fixture +async def parse_count_checker_async(async_conn, async_admin_conn, test_env): + """ + Return an object used for checking the round trips on a connection. + """ + if test_env.is_implicit_pooling: + pytest.skip("sessions can change with implicit pooling") + checker = ParseCountInfo() + await checker._initialize_async(async_conn, async_admin_conn) + return checker + + +def pytest_addoption(parser): + """ + Adds python-oracledb testing options to the command line. + """ + parser.addoption("--use-thick-mode", action="store_true") + + +@pytest.fixture +def round_trip_checker(conn, admin_conn, test_env): + """ + Return an object used for checking the round trips on a connection. + """ + if test_env.is_implicit_pooling: + pytest.skip("sessions can change with implicit pooling") + checker = RoundTripInfo() + checker._initialize(conn, admin_conn) + return checker + + +@pytest.fixture +async def round_trip_checker_async(async_conn, async_admin_conn, test_env): + """ + Return an object used for checking the round trips on a connection. + """ + if test_env.is_implicit_pooling: + pytest.skip("sessions can change with implicit pooling") + checker = RoundTripInfo() + await checker._initialize_async(async_conn, async_admin_conn) + return checker + + +@pytest.fixture +def skip_if_drcp(test_env): + """ + Skips the test if running with DRCP. + """ + test_env._initialize() + if test_env.is_drcp: + pytest.skip("not supported with DRCP") + + +@pytest.fixture +def skip_if_implicit_pooling(test_env): + """ + Skips the test if running with implicit pooling. + """ + test_env._initialize() + if test_env.is_implicit_pooling: + pytest.skip("not supported with implicit pooling") + + +@pytest.fixture +def skip_unless_binary_vectors_supported(test_env): + """ + Skips the test if binary vectors are not supported. + """ + if not test_env.has_client_and_server_version(23, 5): + pytest.skip("no binary vector support") + + +@pytest.fixture +def skip_unless_call_timeout_supported(test_env): + """ + Skips the test if not running in thin mode. + """ + if not test_env.has_client_version(18): + pytest.skip("no call timeout support") + + +@pytest.fixture +def skip_unless_domains_supported(test_env): + """ + Skips the test if domains are not supported. + """ + if not test_env.has_server_version(23): + pytest.skip("no domain support") + + +@pytest.fixture +def skip_unless_json_supported(test_env): + """ + Skips the test if JSON values are not supported. + """ + if not test_env.has_client_and_server_version(12, 2): + pytest.skip("no JSON support") + + +@pytest.fixture +def skip_unless_long_passwords_supported(test_env): + """ + Skips the test if not running in thin mode. + """ + if not test_env.has_client_and_server_version(23): + pytest.skip("no long password support") + + +@pytest.fixture +def skip_unless_native_boolean_supported(test_env): + """ + Skips the test if native booleans are not supported. + """ + if not test_env.has_client_and_server_version(23): + pytest.skip("no native boolean support") + + +@pytest.fixture +def skip_unless_native_json_supported(test_env): + """ + Skips the test if native JSON data is not supported. + """ + if not test_env.has_client_and_server_version(21): + pytest.skip("no native JSON support") + + +@pytest.fixture +def skip_unless_plsql_boolean_supported(test_env): + """ + Skips the test if PL/SQL booleans are not supported. + """ + if not test_env.has_client_and_server_version(12, 1): + pytest.skip("no PL/SQL boolean support") + + +@pytest.fixture +def skip_unless_pool_timed_wait_supported(test_env): + """ + Skips the test if pooled timed wait is not supported. + """ + if not test_env.has_client_and_server_version(12, 2): + pytest.skip("no pool timed wait support") + + +@pytest.fixture +def skip_unless_sessionless_transactions_supported(test_env): + """ + Skips the test if sessionless transactions are not supported. + """ + if not test_env.has_client_and_server_version(23, 6): + pytest.skip("no sessionless transactions support") + + +@pytest.fixture +def skip_unless_sparse_vectors_supported(test_env): + """ + Skips the test if sparse vectors are not supported. + """ + if not test_env.has_client_and_server_version(23, 7): + pytest.skip("no sparse vector support") + + +@pytest.fixture +def skip_unless_thick_mode(test_env): + """ + Skips the test if not running in thick mode. + """ + if not test_env.use_thick_mode: + pytest.skip("requires thick mode") + + +@pytest.fixture +def skip_unless_thin_mode(test_env): + """ + Skips the test if not running in thin mode. + """ + if test_env.use_thick_mode: + pytest.skip("requires thin mode") + + +@pytest.fixture +def skip_unless_vectors_supported(test_env): + """ + Skips the test if vectors are not supported. + """ + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("no vector support") + + +@pytest.fixture +def soda_db(conn, test_env): + """ + Return the SODA database object. + """ + message = "not supported with this client/server combination" + if not test_env.use_thick_mode: + pytest.skip(message) + if not test_env.has_client_version(18, 3): + pytest.skip(message) + if not test_env.has_server_version(18): + pytest.skip(message) + if test_env.has_server_version(20, 1): + if not test_env.has_client_version(20, 1): + pytest.skip(message) + if test_env.has_client_version(23, 3) and platform.system() == "Darwin": + pytest.skip(message) + soda_db = conn.getSodaDatabase() + for name in soda_db.getCollectionNames(): + soda_db.openCollection(name).drop() + return soda_db + + +@pytest.fixture(scope="session") +def test_env(pytestconfig): + """ + Returns an object containing a test environment which can be used to + perform common checks. + """ + env = TestEnv() + env.use_thick_mode = pytestconfig.getoption("--use-thick-mode") + env.main_user = get_env_value("MAIN_USER", default_value="pythontest") + env.main_password = get_env_value("MAIN_PASSWORD", required=True) + env.proxy_user = get_env_value( + "PROXY_USER", default_value="pythontestproxy" + ) + env.proxy_password = get_env_value("PROXY_PASSWORD") + env.connect_string = get_env_value( + "CONNECT_STRING", default_value="localhost/orclpdb1" + ) + env.admin_user = get_env_value("ADMIN_USER", default_value="admin") + env.admin_password = get_env_value("ADMIN_PASSWORD") + if env.use_thick_mode: + env.wallet_location = env.wallet_password = None + else: + env.wallet_location = get_env_value("WALLET_LOCATION") + env.wallet_password = get_env_value("WALLET_PASSWORD") + env.external_user = get_env_value("EXTERNAL_USER") + env.edition_name = get_env_value( + "EDITION_NAME", default_value="pythonedition" + ) + env.plugins = get_env_value("PLUGINS") + env.oracle_client_path = ( + get_env_value("ORACLE_CLIENT_PATH") + if platform.system() in ("Darwin", "Windows") + else None + ) + env.initialized = False + return env diff --git a/tests/create_schema.py b/tests/create_schema.py index 98fd236e..a9c5b4a4 100644 --- a/tests/create_schema.py +++ b/tests/create_schema.py @@ -29,44 +29,6 @@ # necessary for running the python-oracledb test suite. # ----------------------------------------------------------------------------- -import drop_schema -import test_env -# connect as administrative user (usually SYSTEM or ADMIN) -conn = test_env.get_admin_connection() - -# drop existing users and editions, if applicable -drop_schema.drop_schema(conn) - -# create test schemas -print("Creating test schemas...") -test_env.run_sql_script( - conn, - "create_schema", - main_user=test_env.get_main_user(), - main_password=test_env.get_main_password(), - proxy_user=test_env.get_proxy_user(), - proxy_password=test_env.get_proxy_password(), - edition_name=test_env.get_edition_name(), -) -if test_env.has_server_version(21): - test_env.run_sql_script( - conn, "create_schema_21", main_user=test_env.get_main_user() - ) -if test_env.has_server_version(23, 4): - test_env.run_sql_script( - conn, "create_schema_23_4", main_user=test_env.get_main_user() - ) -if test_env.has_server_version(23, 5): - test_env.run_sql_script( - conn, "create_schema_23_5", main_user=test_env.get_main_user() - ) -if test_env.has_server_version(23, 7): - test_env.run_sql_script( - conn, "create_schema_23_7", main_user=test_env.get_main_user() - ) -if test_env.is_on_oracle_cloud(conn): - test_env.run_sql_script( - conn, "create_schema_cloud", main_user=test_env.get_main_user() - ) -print("Done.") +def test_create_schema(admin_conn, test_env): + test_env.create_schema(admin_conn) diff --git a/tests/drop_schema.py b/tests/drop_schema.py index 29ae4339..c9bb4225 100644 --- a/tests/drop_schema.py +++ b/tests/drop_schema.py @@ -32,21 +32,6 @@ # test schemas. # ----------------------------------------------------------------------------- -import test_env - -def drop_schema(conn): - print("Dropping test schemas...") - test_env.run_sql_script( - conn, - "drop_schema", - main_user=test_env.get_main_user(), - proxy_user=test_env.get_proxy_user(), - edition_name=test_env.get_edition_name(), - ) - - -if __name__ == "__main__": - conn = test_env.get_admin_connection() - drop_schema(conn) - print("Done.") +def test_drop_schema(admin_conn, test_env): + test_env.drop_schema(admin_conn) diff --git a/tests/ext/README.md b/tests/ext/README.md index 79d4d00a..7e1f7311 100644 --- a/tests/ext/README.md +++ b/tests/ext/README.md @@ -8,7 +8,7 @@ file. All of the tests can be run by executing this command: - python -m unittest discover -s tests/ext -v + pytest tests/ext Or each file can be run independently. diff --git a/tests/ext/test_env.py b/tests/ext/conftest.py similarity index 62% rename from tests/ext/test_env.py rename to tests/ext/conftest.py index a5c4b9fd..34ee9ff4 100644 --- a/tests/ext/test_env.py +++ b/tests/ext/conftest.py @@ -29,18 +29,37 @@ import configparser import os -import unittest - -dir_name = os.path.dirname(os.path.dirname(__file__)) -file_name = os.path.join(dir_name, os.path.basename(__file__)) -exec(open(file_name).read(), globals(), locals()) +import pytest DATABASES_SECTION_NAME = "Databases" +@pytest.fixture(scope="session") +def extended_config(test_env): + return ExtendedConfig(test_env) + + +@pytest.fixture +def skip_unless_has_orapki(extended_config): + if not extended_config.get_bool_value("has_orapki"): + pytest.skip("extended configuration has_orapki is disabled") + + +@pytest.fixture +def skip_unless_local_database(extended_config): + if not extended_config.get_bool_value("local_database"): + pytest.skip("extended configuration local_database is disabled") + + +@pytest.fixture +def skip_unless_run_long_tests(extended_config): + if not extended_config.get_bool_value("run_long_tests"): + pytest.skip("extended configuration run_long_tests is disabled") + + class ExtendedConfig: - def __init__(self): + def __init__(self, test_env): default_file_name = os.path.join( os.path.dirname(__file__), "config.ini" ) @@ -50,48 +69,18 @@ def __init__(self): self.parser = configparser.ConfigParser() self.parser.read(file_name) self.section_name = "DEFAULT" - connect_string_to_use = get_connect_string().upper() # noqa: F821 - if self.parser.has_section(DATABASES_SECTION_NAME): for section_name, connect_string in self.parser.items( DATABASES_SECTION_NAME ): - if connect_string.upper() == connect_string_to_use: + if connect_string.upper() == test_env.connect_string.upper(): self.section_name = section_name break - -_extended_config = ExtendedConfig() - - -def get_extended_config_bool(name, fallback=False): - return _extended_config.parser.getboolean( - _extended_config.section_name, name, fallback=fallback - ) - - -def get_extended_config_str(name, fallback=None): - return _extended_config.parser.get( - _extended_config.section_name, name, fallback=fallback - ) - - -def skip_unless_has_orapki(): - return unittest.skipUnless( - get_extended_config_bool("has_orapki"), - "extended configuration has_orapki is disabled", - ) - - -def skip_unless_local_database(): - return unittest.skipUnless( - get_extended_config_bool("local_database"), - "extended configuration local_database is disabled", - ) - - -def skip_unless_run_long_tests(): - return unittest.skipUnless( - get_extended_config_bool("run_long_tests"), - "extended configuration run_long_tests is disabled", - ) + def get_bool_value(self, name, fallback=False): + """ + Returns a boolean for a specifically named value. + """ + return self.parser.getboolean( + self.section_name, name, fallback=fallback + ) diff --git a/tests/ext/test_ext_1000_pool_shrink.py b/tests/ext/test_ext_1000_pool_shrink.py index 58575b08..7373f37c 100644 --- a/tests/ext/test_ext_1000_pool_shrink.py +++ b/tests/ext/test_ext_1000_pool_shrink.py @@ -29,92 +29,92 @@ import time -import test_env - - -@test_env.skip_unless_run_long_tests() -class TestCase(test_env.BaseTestCase): - def test_ext_1000(self): - "E1000 - test pool timeout with simple acquire after waiting" - pool = test_env.get_pool(min=3, max=10, increment=1, timeout=5) - conns = [pool.acquire() for i in range(7)] - self.assertEqual(pool.opened, 7) - for conn in conns: - conn.close() - time.sleep(10) - conn = pool.acquire() - self.assertEqual(pool.opened, 3) - - def test_ext_1001(self): - "E1001 - test pool timeout with older connection returned first" - pool = test_env.get_pool(min=2, max=5, increment=1, timeout=3) - conns = [pool.acquire() for i in range(3)] - conns[2].close() - for i in range(10): - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute("select 1 from dual") - time.sleep(4) - conn = pool.acquire() - self.assertEqual(pool.opened, 3) - - @test_env.skip_unless_thin_mode() - def test_ext_1002(self): - "E1002 - test pool timeout shrinks to min on pool inactivity" - pool = test_env.get_pool(min=3, max=10, increment=2, timeout=4) - conns = [pool.acquire() for i in range(6)] - self.assertEqual(pool.opened, 6) - for conn in conns: - conn.close() - time.sleep(6) - self.assertEqual(pool.opened, 3) - - @test_env.skip_unless_thin_mode() - def test_ext_1003(self): - "E1003 - test pool timeout eliminates extra connections on inactivity" - pool = test_env.get_pool(min=4, max=10, increment=4, timeout=3) - conns = [pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - time.sleep(2) - self.assertEqual(pool.opened, 8) - time.sleep(3) - self.assertEqual(pool.opened, 5) - del conns - - @test_env.skip_unless_thin_mode() - def test_ext_1004(self): - "E1004 - test pool max_lifetime_session on release" - pool = test_env.get_pool( - min=4, max=10, increment=4, max_lifetime_session=3 - ) - conns = [pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - time.sleep(2) - self.assertEqual(pool.opened, 8) - time.sleep(2) - for conn in conns: - conn.close() - time.sleep(2) - self.assertEqual(pool.opened, 4) - - @test_env.skip_unless_thin_mode() - def test_ext_1005(self): - "E1005 - test pool max_lifetime_session on acquire" - pool = test_env.get_pool( - min=4, max=10, increment=4, max_lifetime_session=4 - ) - conns = [pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - time.sleep(2) - self.assertEqual(pool.opened, 8) - for conn in conns: - conn.close() - time.sleep(4) - with pool.acquire(): - pass - time.sleep(2) - self.assertEqual(pool.opened, 4) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_run_long_tests): + pass + + +def test_ext_1000(test_env): + "E1000 - test pool timeout with simple acquire after waiting" + pool = test_env.get_pool(min=3, max=10, increment=1, timeout=5) + conns = [pool.acquire() for i in range(7)] + assert pool.opened == 7 + for conn in conns: + conn.close() + time.sleep(10) + conn = pool.acquire() + assert pool.opened == 3 + + +def test_ext_1001(test_env): + "E1001 - test pool timeout with older connection returned first" + pool = test_env.get_pool(min=2, max=5, increment=1, timeout=3) + conns = [pool.acquire() for i in range(3)] + conns[2].close() + for i in range(10): + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select 1 from dual") + time.sleep(4) + conn = pool.acquire() + assert pool.opened == 3 + + +def test_ext_1002(skip_unless_thin_mode, test_env): + "E1002 - test pool timeout shrinks to min on pool inactivity" + pool = test_env.get_pool(min=3, max=10, increment=2, timeout=4) + conns = [pool.acquire() for i in range(6)] + assert pool.opened == 6 + for conn in conns: + conn.close() + time.sleep(6) + assert pool.opened == 3 + + +def test_ext_1003(skip_unless_thin_mode, test_env): + "E1003 - test pool timeout eliminates extra connections on inactivity" + pool = test_env.get_pool(min=4, max=10, increment=4, timeout=3) + conns = [pool.acquire() for i in range(5)] + assert pool.opened == 5 + time.sleep(2) + assert pool.opened == 8 + time.sleep(3) + assert pool.opened == 5 + del conns + + +def test_ext_1004(skip_unless_thin_mode, test_env): + "E1004 - test pool max_lifetime_session on release" + pool = test_env.get_pool( + min=4, max=10, increment=4, max_lifetime_session=3 + ) + conns = [pool.acquire() for i in range(5)] + assert pool.opened == 5 + time.sleep(2) + assert pool.opened == 8 + time.sleep(2) + for conn in conns: + conn.close() + time.sleep(2) + assert pool.opened == 4 + + +def test_ext_1005(skip_unless_thin_mode, test_env): + "E1005 - test pool max_lifetime_session on acquire" + pool = test_env.get_pool( + min=4, max=10, increment=4, max_lifetime_session=4 + ) + conns = [pool.acquire() for i in range(5)] + assert pool.opened == 5 + time.sleep(2) + assert pool.opened == 8 + for conn in conns: + conn.close() + time.sleep(4) + with pool.acquire(): + pass + time.sleep(2) + assert pool.opened == 4 diff --git a/tests/ext/test_ext_1100_external_auth.py b/tests/ext/test_ext_1100_external_auth.py index 1af4e9c1..e65f2182 100644 --- a/tests/ext/test_ext_1100_external_auth.py +++ b/tests/ext/test_ext_1100_external_auth.py @@ -35,157 +35,176 @@ import tempfile import oracledb -import test_env - - -@test_env.skip_unless_thick_mode() -@test_env.skip_unless_has_orapki() -class TestCase(test_env.BaseTestCase): - alias_name = "ext_test_1100" - user = "ext_test_1100_user" - requires_connection = False - - @classmethod - def _build_sqlnet_config(cls): - """ - Builds the sqlnet.ora configuration file. - """ - connect_string = test_env.get_connect_string() - cls.password = test_env.get_random_string() - subprocess.run( - [ - "orapki", - "wallet", - "create", - "-wallet", - cls.temp_dir.name, - "-auto_login_only", - ], - stdout=subprocess.DEVNULL, +import pytest + +ALIAS_NAME = "ext_test_1100" +USER = "ext_test_1100_user" + + +@pytest.fixture(scope="module") +def config(sqlnet_config, tnsnames_config, setup_user): + """ + Builds the configuration used for the tests in this module. Note that if + the Oracle Client libraries have already been initialized then all of these + tests will be skipped. + """ + pass + + +@pytest.fixture(scope="module") +def config_dir(): + """ + Returns the directory where the configuration will be stored. + """ + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + + +@pytest.fixture(scope="module") +def setup_user(config_dir, test_env, user_password): + """ + Builds the user credentials stored in the supplied wallet. + """ + oracledb.init_oracle_client(config_dir=config_dir) + admin_conn = test_env.get_admin_connection() + with admin_conn.cursor() as cursor: + try: + cursor.execute(f"drop user {USER} cascade") + except oracledb.DatabaseError: + pass + cursor.execute( + f"create user {USER} identified by " + f"{user_password}" ) - subprocess.run( - [ - "mkstore", - "-wrl", - cls.temp_dir.name, - "-createCredential", - cls.alias_name, - cls.user, - cls.password, - ], - stdout=subprocess.DEVNULL, - ) - subprocess.run( - [ - "mkstore", - "-wrl", - cls.temp_dir.name, - "-createCredential", - connect_string, - cls.user, - cls.password, - ], - stdout=subprocess.DEVNULL, - ) - contents = ( - "WALLET_LOCATION=(SOURCE=(METHOD=FILE)(METHOD_DATA=" - + f"(DIRECTORY={cls.temp_dir.name})))\n" - + "SQLNET.WALLET_OVERRIDE=TRUE" - ) - file_name = os.path.join(cls.temp_dir.name, "sqlnet.ora") - with open(file_name, "w") as f: - f.write(contents) - - @classmethod - def _build_tnsnames_config(cls): - """ - Builds the tnsnames.ora configuration file. - """ - params = oracledb.ConnectParams() - params.parse_connect_string(test_env.get_connect_string()) - connect_string = params.get_connect_string() - file_name = os.path.join(cls.temp_dir.name, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{cls.alias_name} = {connect_string}") - - @classmethod - def _build_user(cls): - """ - Builds the user credentials stored in the supplied wallet. - """ - admin_conn = test_env.get_admin_connection() - with admin_conn.cursor() as cursor: - try: - cursor.execute(f"drop user {cls.user} cascade") - except oracledb.DatabaseError: - pass - cursor.execute( - f"create user {cls.user} identified by " + f"{cls.password}" - ) - cursor.execute(f"grant create session to {cls.user}") - - @classmethod - def setUpClass(cls): - cls.temp_dir = tempfile.TemporaryDirectory() - cls._build_sqlnet_config() - cls._build_tnsnames_config() - oracledb.init_oracle_client(config_dir=cls.temp_dir.name) - cls._build_user() - - @classmethod - def tearDownClass(cls): - cls.temp_dir.cleanup() - - def test_ext_1100(self): - "E1100 - external authentication with tnsnames alias (implicit)" - with oracledb.connect(dsn=self.alias_name) as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, self.user.upper()) - - def test_ext_1101(self): - "E1101 - external authentication with tnsnames alias (explicit)" - with oracledb.connect(externalauth=True, dsn=self.alias_name) as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, self.user.upper()) - - def test_ext_1102(self): - "E1102 - external authentication with connect string (explicit)" - with oracledb.connect( - externalauth=True, dsn=test_env.get_connect_string() - ) as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, self.user.upper()) - - def test_ext_1103(self): - "E1103 - external authentication with tnsnames alias (explicit)" - pool = oracledb.create_pool( - externalauth=True, dsn=self.alias_name, homogeneous=False - ) - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, self.user.upper()) - - def test_ext_1104(self): - "E1104 - external authentication with connect string (explicit)" - pool = oracledb.create_pool( - externalauth=True, - homogeneous=False, - dsn=test_env.get_connect_string(), - ) - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, self.user.upper()) - - -if __name__ == "__main__": - test_env.run_test_cases() + cursor.execute(f"grant create session to {USER}") + + +@pytest.fixture(scope="module") +def sqlnet_config(config_dir, test_env, user_password): + """ + Builds the SQL*Net configuration file. + """ + if not test_env.use_thick_mode: + pytest.skip("requires thick mode") + elif not oracledb.is_thin_mode(): + pytest.skip("must be run separately from all other tests") + subprocess.run( + [ + "orapki", + "wallet", + "create", + "-wallet", + config_dir, + "-auto_login_only", + ], + stdout=subprocess.DEVNULL, + ) + subprocess.run( + [ + "mkstore", + "-wrl", + config_dir, + "-createCredential", + ALIAS_NAME, + USER, + user_password, + ], + stdout=subprocess.DEVNULL, + ) + subprocess.run( + [ + "mkstore", + "-wrl", + config_dir, + "-createCredential", + test_env.connect_string, + USER, + user_password, + ], + stdout=subprocess.DEVNULL, + ) + contents = ( + "WALLET_LOCATION=(SOURCE=(METHOD=FILE)(METHOD_DATA=" + + f"(DIRECTORY={config_dir})))\n" + + "SQLNET.WALLET_OVERRIDE=TRUE" + ) + file_name = os.path.join(config_dir, "sqlnet.ora") + with open(file_name, "w") as f: + f.write(contents) + + +@pytest.fixture(scope="module") +def tnsnames_config(config_dir, test_env): + """ + Builds the tnsnames.ora configuration file. + """ + params = oracledb.ConnectParams() + params.parse_connect_string(test_env.connect_string) + connect_string = params.get_connect_string() + file_name = os.path.join(config_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{ALIAS_NAME} = {connect_string}") + + +@pytest.fixture(scope="module") +def user_password(test_env): + return test_env.get_random_string() + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_thick_mode, skip_unless_has_orapki, config): + pass + + +def test_ext_1100(): + "E1100 - external authentication with tnsnames alias (implicit)" + with oracledb.connect(dsn=ALIAS_NAME) as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == USER.upper() + + +def test_ext_1101(): + "E1101 - external authentication with tnsnames alias (explicit)" + with oracledb.connect(externalauth=True, dsn=ALIAS_NAME) as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == USER.upper() + + +def test_ext_1102(test_env): + "E1102 - external authentication with connect string (explicit)" + with oracledb.connect( + externalauth=True, dsn=test_env.connect_string + ) as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == USER.upper() + + +def test_ext_1103(): + "E1103 - external authentication with tnsnames alias (explicit)" + pool = oracledb.create_pool( + externalauth=True, dsn=ALIAS_NAME, homogeneous=False + ) + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == USER.upper() + + +def test_ext_1104(test_env): + "E1104 - external authentication with connect string (explicit)" + pool = oracledb.create_pool( + externalauth=True, + homogeneous=False, + dsn=test_env.connect_string, + ) + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == USER.upper() diff --git a/tests/ext/test_ext_1200_statement_cache.py b/tests/ext/test_ext_1200_statement_cache.py index d22e501c..a7fec4ce 100644 --- a/tests/ext/test_ext_1200_statement_cache.py +++ b/tests/ext/test_ext_1200_statement_cache.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,83 +28,72 @@ import random -import test_env +def test_ext_1200(conn, parse_count_checker): + "E1200 - verify statement cache is used" + statements = [ + "select 0 from dual", + "select 1 from dual", + "select 2 from dual", + ] + conn.stmtcachesize = len(statements) + for i in range(500): + sql = random.choice(statements) + with conn.cursor() as cursor: + cursor.execute(sql) + cursor.fetchall() + assert parse_count_checker.get_value() == 3 -class TestCase(test_env.BaseTestCase): - requires_connection = False - def test_ext_1200(self): - "E1200 - verify statement cache is used" - statements = [ - "select 0 from dual", - "select 1 from dual", - "select 2 from dual", - ] - with test_env.get_connection(stmtcachesize=len(statements)) as conn: - self.setup_parse_count_checker(conn) - for i in range(500): - sql = random.choice(statements) - with conn.cursor() as cursor: - cursor.execute(sql) - cursor.fetchall() - self.assertParseCount(3) +def test_ext_1201(conn, parse_count_checker): + "E1201 - verify statement cache uses LIFO" + statements = [ + "select 0 from dual", + "select 1 from dual", + "select 2 from dual", + ] + conn.stmtcachesize = len(statements) + for sql in statements: + with conn.cursor() as cursor: + cursor.execute(sql) + cursor.fetchall() + with conn.cursor() as cursor: + cursor.execute("begin null; end;") + assert parse_count_checker.get_value() == 4 + for sql in statements[1:]: + with conn.cursor() as cursor: + cursor.execute(sql) + cursor.fetchall() + assert parse_count_checker.get_value() == 0 + with conn.cursor() as cursor: + cursor.execute(statements[0]) + cursor.fetchall() + assert parse_count_checker.get_value() == 1 - def test_ext_1201(self): - "E1201 - verify statement cache uses LIFO" - statements = [ - "select 0 from dual", - "select 1 from dual", - "select 2 from dual", - ] - with test_env.get_connection(stmtcachesize=len(statements)) as conn: - self.setup_parse_count_checker(conn) - for sql in statements: - with conn.cursor() as cursor: - cursor.execute(sql) - cursor.fetchall() - with conn.cursor() as cursor: - cursor.execute("begin null; end;") - self.assertParseCount(4) - for sql in statements[1:]: - with conn.cursor() as cursor: - cursor.execute(sql) - cursor.fetchall() - self.assertParseCount(0) - with conn.cursor() as cursor: - cursor.execute(statements[0]) - cursor.fetchall() - self.assertParseCount(1) - def test_ext_1202(self): - "E1202 - verify copied statement is independent of cached statement" - sql = "select : val from dual" - value1 = "One" - value2 = "Four" - value3 = "Five" - with test_env.get_connection() as conn: - self.setup_parse_count_checker(conn) - with conn.cursor() as cursor: - cursor.execute(sql, [value1]) - rows = cursor.fetchall() - self.assertEqual(rows, [(value1,)]) - self.assertParseCount(1) - with conn.cursor() as cursor: - cursor.execute(sql, [value1]) - self.assertParseCount(0) - with conn.cursor() as copyCursor: - copyCursor.execute(sql, [value2]) - rows = copyCursor.fetchall() - self.assertEqual(rows, [(value2,)]) - rows = cursor.fetchall() - self.assertEqual(rows, [(value1,)]) - self.assertParseCount(1) - with conn.cursor() as cursor: - cursor.execute(sql, [value3]) - rows = cursor.fetchall() - self.assertEqual(rows, [(value3,)]) - self.assertParseCount(0) - - -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_1202(conn, parse_count_checker): + "E1202 - verify copied statement is independent of cached statement" + sql = "select : val from dual" + value1 = "One" + value2 = "Four" + value3 = "Five" + with conn.cursor() as cursor: + cursor.execute(sql, [value1]) + rows = cursor.fetchall() + assert rows == [(value1,)] + assert parse_count_checker.get_value() == 1 + with conn.cursor() as cursor: + cursor.execute(sql, [value1]) + assert parse_count_checker.get_value() == 0 + with conn.cursor() as copyCursor: + copyCursor.execute(sql, [value2]) + rows = copyCursor.fetchall() + assert rows == [(value2,)] + rows = cursor.fetchall() + assert rows == [(value1,)] + assert parse_count_checker.get_value() == 1 + with conn.cursor() as cursor: + cursor.execute(sql, [value3]) + rows = cursor.fetchall() + assert rows == [(value3,)] + assert parse_count_checker.get_value() == 0 diff --git a/tests/ext/test_ext_1300_verifier_type.py b/tests/ext/test_ext_1300_verifier_type.py index 5143d4d0..9a8d37cc 100644 --- a/tests/ext/test_ext_1300_verifier_type.py +++ b/tests/ext/test_ext_1300_verifier_type.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,67 +28,64 @@ """ import oracledb -import test_env +import pytest +USER = "ext_test_1300_user" -class TestCase(test_env.BaseTestCase): - requires_connection = False - user = "ext_test_1300_user" - @classmethod - def setUpClass(cls): - cls.admin_conn = test_env.get_admin_connection() - cls.password = test_env.get_random_string() - with cls.admin_conn.cursor() as cursor: - cursor.execute( - "select count(*) from user$ where name = :1", - [cls.user.upper()], - ) - (count,) = cursor.fetchone() - keyword = "create" if count == 0 else "alter" - cursor.execute( - f"{keyword} user {cls.user} identified by {cls.password}" - ) - cursor.execute( - "select spare4 from user$ where name = :1", [cls.user.upper()] - ) - (password_data,) = cursor.fetchone() - cls.verifier_11g, cls.verifier_12c = password_data.split(";") - cursor.execute(f"drop user {cls.user}") +@pytest.fixture(scope="module") +def admin_conn(test_env): + """ + Returns the password to use for the user that is created. + """ + with test_env.get_admin_connection() as conn: + yield conn - @classmethod - def tearDownClass(cls): - user = test_env.get_main_user() - password = test_env.get_main_password() - with cls.admin_conn.cursor() as cursor: - cursor.execute(f"alter user {user} identified by {password}") - def _verify_connection(self, verifier): - """ - Verify the ability to connect to the database using the given verifier. - """ - user = test_env.get_main_user() - sql = f"alter user {user} identified by values '{verifier}'" - with self.admin_conn.cursor() as cursor: - cursor.execute(sql) - conn = oracledb.connect( - user=user, - password=self.password, - dsn=test_env.get_connect_string(), - ) - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (fetched_user,) = cursor.fetchone() - self.assertEqual(fetched_user, user.upper()) +@pytest.fixture(scope="module") +def user_password(test_env): + """ + Returns the password to use for the user that is created. + """ + return test_env.get_random_string() - def test_ext_1300(self): - "E1300 - test with an 11g verifier" - self._verify_connection(self.verifier_11g) - def test_ext_1301(self): - "E1301 - test with a 12c verifier" - self._verify_connection(self.verifier_12c) +@pytest.fixture(scope="module") +def verifier_data(admin_conn, test_env, user_password): + """ + Returns the verifier data after creating a user with the given password. + """ + with admin_conn.cursor() as cursor: + cursor.execute( + "select count(*) from user$ where name = :1", + [USER.upper()], + ) + (count,) = cursor.fetchone() + keyword = "create" if count == 0 else "alter" + cursor.execute(f"{keyword} user {USER} identified by {user_password}") + cursor.execute( + "select spare4 from user$ where name = :1", [USER.upper()] + ) + (verifier_data,) = cursor.fetchone() + cursor.execute(f"drop user {USER}") + yield verifier_data + user = test_env.main_user + password = test_env.main_password + cursor.execute(f"alter user {user} identified by {password}") -if __name__ == "__main__": - test_env.run_test_cases() +@pytest.mark.parametrize("ix", [0, 1]) +def test_ext_1300(ix, admin_conn, test_env, verifier_data, user_password): + "E1300 - test with different verifiers" + user = test_env.main_user + verifier = verifier_data.split(";")[ix] + sql = f"alter user {user} identified by values '{verifier}'" + with admin_conn.cursor() as cursor: + cursor.execute(sql) + with oracledb.connect( + user=user, password=user_password, dsn=test_env.connect_string + ) as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (fetched_user,) = cursor.fetchone() + assert fetched_user == user.upper() diff --git a/tests/ext/test_ext_1400_inband_notify.py b/tests/ext/test_ext_1400_inband_notify.py index 49338e04..9277e863 100644 --- a/tests/ext/test_ext_1400_inband_notify.py +++ b/tests/ext/test_ext_1400_inband_notify.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,70 +28,62 @@ use. """ -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - requires_connection = False - - @classmethod - def setUpClass(cls): - cls.admin_conn = test_env.get_admin_connection() - user = test_env.get_main_user() - with cls.admin_conn.cursor() as cursor: +@pytest.fixture(scope="module", autouse=True) +def setup_user(test_env): + user = test_env.main_user + with test_env.get_admin_connection() as admin_conn: + with admin_conn.cursor() as cursor: cursor.execute(f"grant execute on dbms_tg_dbg to {user}") - - @classmethod - def tearDownClass(cls): - user = test_env.get_main_user() - with cls.admin_conn.cursor() as cursor: + yield cursor.execute(f"revoke execute on dbms_tg_dbg from {user}") - def test_ext_1400(self): - "E1400 - test standalone connection is marked unhealthy" - conn = test_env.get_connection() - self.assertEqual(conn.is_healthy(), True) + +def test_ext_1400(test_env): + "E1400 - test standalone connection is marked unhealthy" + conn = test_env.get_connection() + assert conn.is_healthy() + with conn.cursor() as cursor: + cursor.callproc("dbms_tg_dbg.set_session_drainable") + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == test_env.main_user.upper() + assert not conn.is_healthy() + + +def test_ext_1401(test_env): + "E1401 - test pooled connection that is marked unhealthy" + pool = test_env.get_pool(min=1, max=1, increment=1) + with pool.acquire() as conn: + assert conn.is_healthy() with conn.cursor() as cursor: cursor.callproc("dbms_tg_dbg.set_session_drainable") + info = test_env.get_sid_serial(conn) + assert not conn.is_healthy() + with conn.cursor() as cursor: cursor.execute("select user from dual") (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - self.assertEqual(conn.is_healthy(), False) - - def test_ext_1401(self): - "E1401 - test pooled connection that is marked unhealthy" - pool = test_env.get_pool(min=1, max=1, increment=1) - with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - with conn.cursor() as cursor: - cursor.callproc("dbms_tg_dbg.set_session_drainable") - info = self.get_sid_serial(conn) - self.assertEqual(conn.is_healthy(), False) - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - new_info = self.get_sid_serial(conn) - self.assertNotEqual(new_info, info) - - def test_ext_1402(self): - "E1402 - test pooled connection is dropped from pool" - pool = test_env.get_pool(min=1, max=1, increment=1) - with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - info = self.get_sid_serial(conn) - with pool.acquire() as conn: - new_info = self.get_sid_serial(conn) - self.assertEqual(new_info, info) - with conn.cursor() as cursor: - cursor.callproc("dbms_tg_dbg.set_session_drainable") - with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - new_info = self.get_sid_serial(conn) - self.assertNotEqual(new_info, info) + assert user == test_env.main_user.upper() + with pool.acquire() as conn: + assert conn.is_healthy() + new_info = test_env.get_sid_serial(conn) + assert new_info != info -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_1402(test_env): + "E1402 - test pooled connection is dropped from pool" + pool = test_env.get_pool(min=1, max=1, increment=1) + with pool.acquire() as conn: + assert conn.is_healthy() + info = test_env.get_sid_serial(conn) + with pool.acquire() as conn: + new_info = test_env.get_sid_serial(conn) + assert new_info == info + with conn.cursor() as cursor: + cursor.callproc("dbms_tg_dbg.set_session_drainable") + with pool.acquire() as conn: + assert conn.is_healthy() + new_info = test_env.get_sid_serial(conn) + assert new_info != info diff --git a/tests/ext/test_ext_1500_pool_grow.py b/tests/ext/test_ext_1500_pool_grow.py index 4f9b2dd6..40d9eb4b 100644 --- a/tests/ext/test_ext_1500_pool_grow.py +++ b/tests/ext/test_ext_1500_pool_grow.py @@ -30,26 +30,18 @@ import time -import test_env - -@test_env.skip_unless_run_long_tests() -class TestCase(test_env.BaseTestCase): - def test_ext_1500(self): - "E1500 - test static pool grows back to the min after sessions killed" - pool = test_env.get_pool(min=5, max=5, increment=1, ping_interval=0) - conns = [pool.acquire() for i in range(5)] - with test_env.get_admin_connection() as admin_conn: - with admin_conn.cursor() as admin_cursor: - for conn in conns: - sid, serial = self.get_sid_serial(conn) - kill_sql = f"alter system kill session '{sid},{serial}'" - admin_cursor.execute(kill_sql) - conns.clear() - conn = pool.acquire() - time.sleep(2) - self.assertEqual(pool.opened, pool.min) - - -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_1500(skip_unless_run_long_tests, test_env): + "E1500 - test static pool grows back to the min after sessions killed" + pool = test_env.get_pool(min=5, max=5, increment=1, ping_interval=0) + conns = [pool.acquire() for i in range(5)] + with test_env.get_admin_connection() as admin_conn: + with admin_conn.cursor() as admin_cursor: + for conn in conns: + sid, serial = test_env.get_sid_serial(conn) + kill_sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(kill_sql) + conns.clear() + conn = pool.acquire() + time.sleep(2) + assert pool.opened == pool.min diff --git a/tests/ext/test_ext_1600_warnings.py b/tests/ext/test_ext_1600_warnings.py index f5a6b47d..3cd74363 100644 --- a/tests/ext/test_ext_1600_warnings.py +++ b/tests/ext/test_ext_1600_warnings.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -30,26 +30,24 @@ import time import oracledb -import test_env +import pytest +PROFILE_NAME = "profile_ext_test_1600" +USER_NAME = "user_ext_test_1600" -class TestCase(test_env.BaseTestCase): - profile_name = "profile_ext_test_1600" - user_name = "user_ext_test_1600" - requires_connection = False - @classmethod - def setUpClass(cls): - conn = test_env.get_admin_connection() - password = test_env.get_main_password() - cursor = conn.cursor() +@pytest.fixture(scope="module", autouse=True) +def setup_user(test_env): + password = test_env.main_password + with test_env.get_admin_connection() as admin_conn: + cursor = admin_conn.cursor() cursor.execute( f""" declare e_user_missing exception; pragma exception_init(e_user_missing, -1918); begin - execute immediate('drop user {cls.user_name} cascade'); + execute immediate('drop user {USER_NAME} cascade'); exception when e_user_missing then null; @@ -62,78 +60,68 @@ def setUpClass(cls): e_user_missing exception; pragma exception_init(e_user_missing, -2380); begin - execute immediate('drop profile {cls.profile_name}'); + execute immediate('drop profile {PROFILE_NAME}'); exception when e_user_missing then null; end; """ ) - cursor.execute(f"create user {cls.user_name} identified by {password}") - cursor.execute(f"grant create session to {cls.user_name}") + cursor.execute(f"create user {USER_NAME} identified by {password}") + cursor.execute(f"grant create session to {USER_NAME}") cursor.execute( f""" - create profile {cls.profile_name} limit + create profile {PROFILE_NAME} limit password_life_time 1 / 24 / 60 / 60 password_grace_time 7 """ ) - cursor.execute( - f"alter user {cls.user_name} profile {cls.profile_name}" - ) + cursor.execute(f"alter user {USER_NAME} profile {PROFILE_NAME}") time.sleep(2) + yield + cursor.execute(f"drop user {USER_NAME} cascade") + cursor.execute(f"drop profile {PROFILE_NAME}") - @classmethod - def tearDownClass(cls): - conn = test_env.get_admin_connection() - cursor = conn.cursor() - cursor.execute(f"drop user {cls.user_name} cascade") - cursor.execute(f"drop profile {cls.profile_name}") - def test_ext_1600(self): - "E1600 - test standalone connection generates a warning" - password = test_env.get_main_password() - with oracledb.connect( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - ) as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) +def test_ext_1600(test_env): + "E1600 - test standalone connection generates a warning" + with oracledb.connect( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + ) as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] - def test_ext_1601_pooled_conn_warning_min_0(self): - "E1601 - test pooled connection generates a warning (min 0)" - password = test_env.get_main_password() - pool = oracledb.create_pool( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - min=0, - max=5, - increment=1, - ) - with pool.acquire() as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) - with pool.acquire() as conn: - self.assertIsNone(conn.warning) - pool.close(0) - def test_ext_1602_pooled_conn_warning_min_1(self): - "E1602 - test pooled connection generates a warning (min 1)" - password = test_env.get_main_password() - pool = oracledb.create_pool( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - min=1, - max=5, - increment=1, - ) - with pool.acquire() as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) - with pool.acquire() as conn: - self.assertIsNone(conn.warning) - pool.close(0) +def test_ext_1601_pooled_conn_warning_min_0(test_env): + "E1601 - test pooled connection generates a warning (min 0)" + pool = oracledb.create_pool( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + min=0, + max=5, + increment=1, + ) + with pool.acquire() as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] + with pool.acquire() as conn: + assert conn.warning is None + pool.close(0) -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_1602_pooled_conn_warning_min_1(test_env): + "E1602 - test pooled connection generates a warning (min 1)" + pool = oracledb.create_pool( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + min=1, + max=5, + increment=1, + ) + with pool.acquire() as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] + with pool.acquire() as conn: + assert conn.warning is None + pool.close(0) diff --git a/tests/ext/test_ext_1700_warnings_async.py b/tests/ext/test_ext_1700_warnings_async.py index 66a728e9..6efa96e7 100644 --- a/tests/ext/test_ext_1700_warnings_async.py +++ b/tests/ext/test_ext_1700_warnings_async.py @@ -28,118 +28,106 @@ users. """ -import asyncio +import time import oracledb -import test_env +import pytest +PROFILE_NAME = "profile_ext_test_1700" +USER_NAME = "user_ext_test_1700" -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - profile_name = "profile_priv_test_1700" - user_name = "user_priv_test_1700" - requires_connection = False - setup_completed = False - async def __perform_setup(self): - """ - Perform the setup, if necessary. - """ - if self.__class__.setup_completed: - return - conn = await test_env.get_admin_connection_async() - password = test_env.get_main_password() - cursor = conn.cursor() - await cursor.execute( +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture(scope="module", autouse=True) +def setup_user(test_env): + password = test_env.main_password + with test_env.get_admin_connection() as admin_conn: + cursor = admin_conn.cursor() + cursor.execute( f""" declare e_user_missing exception; pragma exception_init(e_user_missing, -1918); begin - execute immediate('drop user {self.user_name} cascade'); + execute immediate('drop user {USER_NAME} cascade'); exception when e_user_missing then null; end; """ ) - await cursor.execute( + cursor.execute( f""" declare e_user_missing exception; pragma exception_init(e_user_missing, -2380); begin - execute immediate('drop profile {self.profile_name}'); + execute immediate('drop profile {PROFILE_NAME}'); exception when e_user_missing then null; end; """ ) - await cursor.execute( - f"create user {self.user_name} identified by {password}" - ) - await cursor.execute(f"grant create session to {self.user_name}") - await cursor.execute( + cursor.execute(f"create user {USER_NAME} identified by {password}") + cursor.execute(f"grant create session to {USER_NAME}") + cursor.execute( f""" - create profile {self.profile_name} limit + create profile {PROFILE_NAME} limit password_life_time 1 / 24 / 60 / 60 password_grace_time 7 """ ) - await cursor.execute( - f"alter user {self.user_name} profile {self.profile_name}" - ) - await asyncio.sleep(2) - self.__class__.setup_completed = True + cursor.execute(f"alter user {USER_NAME} profile {PROFILE_NAME}") + time.sleep(2) + yield + cursor.execute(f"drop user {USER_NAME} cascade") + cursor.execute(f"drop profile {PROFILE_NAME}") - async def test_ext_1700(self): - "E1700 - test standalone connection generates a warning" - await self.__perform_setup() - password = test_env.get_main_password() - async with oracledb.connect_async( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - ) as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) - async def test_ext_1701(self): - "E1701 - test pooled connection generates a warning (min 0)" - await self.__perform_setup() - password = test_env.get_main_password() - pool = oracledb.create_pool_async( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - min=0, - max=5, - increment=1, - ) - async with pool.acquire() as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) - async with pool.acquire() as conn: - self.assertIsNone(conn.warning) - await pool.close(0) +async def test_ext_1700(test_env): + "E1700 - test standalone connection generates a warning" + async with oracledb.connect_async( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + ) as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] - async def test_ext_1702(self): - "E1702 - test pooled connection generates a warning (min 1)" - await self.__perform_setup() - password = test_env.get_main_password() - pool = oracledb.create_pool_async( - user=self.user_name, - password=password, - dsn=test_env.get_connect_string(), - min=1, - max=5, - increment=1, - ) - async with pool.acquire() as conn: - self.assertIn(conn.warning.full_code, ["ORA-28002", "ORA-28098"]) - async with pool.acquire() as conn: - self.assertIsNone(conn.warning) - await pool.close(0) + +async def test_ext_1701(test_env): + "E1701 - test pooled connection generates a warning (min 0)" + pool = oracledb.create_pool_async( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + min=0, + max=5, + increment=1, + ) + async with pool.acquire() as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] + async with pool.acquire() as conn: + assert conn.warning is None + await pool.close(0) -if __name__ == "__main__": - test_env.run_test_cases() +async def test_ext_1702(test_env): + "E1702 - test pooled connection generates a warning (min 1)" + pool = oracledb.create_pool_async( + user=USER_NAME, + password=test_env.main_password, + dsn=test_env.connect_string, + min=1, + max=5, + increment=1, + ) + async with pool.acquire() as conn: + assert conn.warning.full_code in ["ORA-28002", "ORA-28098"] + async with pool.acquire() as conn: + assert conn.warning is None + await pool.close(0) diff --git a/tests/ext/test_ext_1800_inband_notif_async.py b/tests/ext/test_ext_1800_inband_notif_async.py index ecc7b1dd..071ccb90 100644 --- a/tests/ext/test_ext_1800_inband_notif_async.py +++ b/tests/ext/test_ext_1800_inband_notif_async.py @@ -35,75 +35,68 @@ # is required. # ----------------------------------------------------------------------------- -import test_env +import pytest -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - requires_connection = False - setup_completed = False +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass - async def __perform_setup(self): - """ - Perform setup, if needed. - """ - if self.__class__.setup_completed: - return - user = test_env.get_main_user() - async with test_env.get_admin_connection(use_async=True) as conn: - with conn.cursor() as cursor: - await cursor.execute(f"grant execute on dbms_tg_dbg to {user}") - self.__class__.setup_completed = True - async def test_ext_1800(self): - "E1800 - test standalone connection is marked unhealthy" - await self.__perform_setup() - async with test_env.get_connection_async() as conn: - self.assertEqual(conn.is_healthy(), True) - with conn.cursor() as cursor: - await cursor.callproc("dbms_tg_dbg.set_session_drainable") - await cursor.execute("select user from dual") - (user,) = await cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - self.assertEqual(conn.is_healthy(), False) +@pytest.fixture(scope="module", autouse=True) +def setup_user(test_env): + user = test_env.main_user + with test_env.get_admin_connection() as admin_conn: + with admin_conn.cursor() as cursor: + cursor.execute(f"grant execute on dbms_tg_dbg to {user}") + yield + cursor.execute(f"revoke execute on dbms_tg_dbg from {user}") - async def test_ext_1801(self): - "E1801 - test pooled connection that is marked unhealthy" - await self.__perform_setup() - pool = test_env.get_pool_async(min=1, max=1, increment=1) - async with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - with conn.cursor() as cursor: - await cursor.callproc("dbms_tg_dbg.set_session_drainable") - info = await self.get_sid_serial(conn) - self.assertEqual(conn.is_healthy(), False) - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - (user,) = await cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - async with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - new_info = await self.get_sid_serial(conn) - self.assertNotEqual(new_info, info) - await pool.close() - async def test_ext_1802(self): - "E1802 - test pooled connection is dropped from pool" - await self.__perform_setup() - pool = test_env.get_pool_async(min=1, max=1, increment=1) - async with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - info = await self.get_sid_serial(conn) - async with pool.acquire() as conn: - new_info = await self.get_sid_serial(conn) - self.assertEqual(new_info, info) - with conn.cursor() as cursor: - await cursor.callproc("dbms_tg_dbg.set_session_drainable") - async with pool.acquire() as conn: - self.assertEqual(conn.is_healthy(), True) - new_info = await self.get_sid_serial(conn) - self.assertNotEqual(new_info, info) +async def test_ext_1800(test_env): + "E1800 - test standalone connection is marked unhealthy" + async with test_env.get_connection_async() as conn: + assert conn.is_healthy() + with conn.cursor() as cursor: + await cursor.callproc("dbms_tg_dbg.set_session_drainable") + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + assert user == test_env.main_user.upper() + assert not conn.is_healthy() -if __name__ == "__main__": - test_env.run_test_cases() +async def test_ext_1801(test_env): + "E1801 - test pooled connection that is marked unhealthy" + pool = test_env.get_pool_async(min=1, max=1, increment=1) + async with pool.acquire() as conn: + assert conn.is_healthy() + with conn.cursor() as cursor: + await cursor.callproc("dbms_tg_dbg.set_session_drainable") + info = (conn.session_id, conn.serial_num) + assert not conn.is_healthy() + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + assert user == test_env.main_user.upper() + async with pool.acquire() as conn: + assert conn.is_healthy() + new_info = (conn.session_id, conn.serial_num) + assert new_info != info + await pool.close() + + +async def test_ext_1802(test_env): + "E1802 - test pooled connection is dropped from pool" + pool = test_env.get_pool_async(min=1, max=1, increment=1) + async with pool.acquire() as conn: + assert conn.is_healthy() + info = (conn.session_id, conn.serial_num) + async with pool.acquire() as conn: + new_info = (conn.session_id, conn.serial_num) + assert new_info == info + with conn.cursor() as cursor: + await cursor.callproc("dbms_tg_dbg.set_session_drainable") + async with pool.acquire() as conn: + assert conn.is_healthy() + new_info = (conn.session_id, conn.serial_num) + assert new_info != info diff --git a/tests/ext/test_ext_1900_pool_shrink_async.py b/tests/ext/test_ext_1900_pool_shrink_async.py index 7bfc38cd..66b0af43 100644 --- a/tests/ext/test_ext_1900_pool_shrink_async.py +++ b/tests/ext/test_ext_1900_pool_shrink_async.py @@ -30,91 +30,94 @@ import asyncio -import test_env - - -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_run_long_tests() -class TestCase(test_env.BaseAsyncTestCase): - requires_connection = False - - async def test_ext_1900(self): - "E1900 - test pool timeout with simple acquite after waiting" - pool = test_env.get_pool_async(min=3, max=10, increment=1, timeout=5) - conns = [await pool.acquire() for i in range(7)] - self.assertEqual(pool.opened, 7) - for conn in conns: - await conn.close() - await asyncio.sleep(7) - conn = await pool.acquire() - self.assertEqual(pool.opened, 3) - - async def test_ext_1901(self): - "E1901 - test pool timeout with older connection returned first" - pool = test_env.get_pool_async(min=2, max=5, increment=1, timeout=3) - conns = [await pool.acquire() for i in range(3)] - await conns[2].close() - for i in range(10): - async with pool.acquire() as conn: - with conn.cursor() as cursor: - await cursor.execute("select 1 from dual") - await asyncio.sleep(4) - conn = await pool.acquire() - self.assertEqual(pool.opened, 3) - - async def test_ext_1902(self): - "E1902 - test pool timeout shrinks to min on pool inactivity" - pool = test_env.get_pool_async(min=3, max=10, increment=2, timeout=4) - conns = [await pool.acquire() for i in range(6)] - self.assertEqual(pool.opened, 6) - for conn in conns: - await conn.close() - await asyncio.sleep(6) - self.assertEqual(pool.opened, 3) - - async def test_ext_1903(self): - "E1902 - test pool timeout eliminates extra connections on inactivity" - pool = test_env.get_pool_async(min=4, max=10, increment=4, timeout=3) - conns = [await pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - await asyncio.sleep(2) - self.assertEqual(pool.opened, 8) - await asyncio.sleep(3) - self.assertEqual(pool.opened, 5) - del conns - - async def test_ext_1904(self): - "E1904 - test pool max_lifetime_session on release" - pool = test_env.get_pool_async( - min=4, max=10, increment=4, max_lifetime_session=3 - ) - conns = [await pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - await asyncio.sleep(2) - self.assertEqual(pool.opened, 8) - await asyncio.sleep(2) - for conn in conns: - await conn.close() - await asyncio.sleep(2) - self.assertEqual(pool.opened, 4) - - async def test_ext_1905(self): - "E1905 - test pool max_lifetime_session on acquire" - pool = test_env.get_pool_async( - min=4, max=10, increment=4, max_lifetime_session=4 - ) - conns = [await pool.acquire() for i in range(5)] - self.assertEqual(pool.opened, 5) - await asyncio.sleep(2) - self.assertEqual(pool.opened, 8) - for conn in conns: - await conn.close() - await asyncio.sleep(4) - async with pool.acquire(): - pass - await asyncio.sleep(2) - self.assertEqual(pool.opened, 4) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, skip_unless_thin_mode, skip_unless_run_long_tests +): + pass + + +async def test_ext_1900(test_env): + "E1900 - test pool timeout with simple acquite after waiting" + pool = test_env.get_pool_async(min=3, max=10, increment=1, timeout=5) + conns = [await pool.acquire() for i in range(7)] + assert pool.opened == 7 + for conn in conns: + await conn.close() + await asyncio.sleep(7) + conn = await pool.acquire() + assert pool.opened == 3 + + +async def test_ext_1901(test_env): + "E1901 - test pool timeout with older connection returned first" + pool = test_env.get_pool_async(min=2, max=5, increment=1, timeout=3) + conns = [await pool.acquire() for i in range(3)] + await conns[2].close() + for i in range(10): + async with pool.acquire() as conn: + with conn.cursor() as cursor: + await cursor.execute("select 1 from dual") + await asyncio.sleep(4) + conn = await pool.acquire() + assert pool.opened == 3 + + +async def test_ext_1902(test_env): + "E1902 - test pool timeout shrinks to min on pool inactivity" + pool = test_env.get_pool_async(min=3, max=10, increment=2, timeout=4) + conns = [await pool.acquire() for i in range(6)] + assert pool.opened == 6 + for conn in conns: + await conn.close() + await asyncio.sleep(6) + assert pool.opened == 3 + + +async def test_ext_1903(test_env): + "E1902 - test pool timeout eliminates extra connections on inactivity" + pool = test_env.get_pool_async(min=4, max=10, increment=4, timeout=3) + conns = [await pool.acquire() for i in range(5)] + assert pool.opened == 5 + await asyncio.sleep(2) + assert pool.opened == 8 + await asyncio.sleep(3) + assert pool.opened == 5 + del conns + + +async def test_ext_1904(test_env): + "E1904 - test pool max_lifetime_session on release" + pool = test_env.get_pool_async( + min=4, max=10, increment=4, max_lifetime_session=3 + ) + conns = [await pool.acquire() for i in range(5)] + assert pool.opened == 5 + await asyncio.sleep(2) + assert pool.opened == 8 + await asyncio.sleep(2) + for conn in conns: + await conn.close() + await asyncio.sleep(2) + assert pool.opened == 4 + + +async def test_ext_1905(test_env): + "E1905 - test pool max_lifetime_session on acquire" + pool = test_env.get_pool_async( + min=4, max=10, increment=4, max_lifetime_session=4 + ) + conns = [await pool.acquire() for i in range(5)] + assert pool.opened == 5 + await asyncio.sleep(2) + assert pool.opened == 8 + for conn in conns: + await conn.close() + await asyncio.sleep(4) + async with pool.acquire(): + pass + await asyncio.sleep(2) + assert pool.opened == 4 diff --git a/tests/ext/test_ext_2000_pool_grow_async.py b/tests/ext/test_ext_2000_pool_grow_async.py index 5addc777..b9fd69dc 100644 --- a/tests/ext/test_ext_2000_pool_grow_async.py +++ b/tests/ext/test_ext_2000_pool_grow_async.py @@ -37,34 +37,30 @@ import asyncio -import test_env +import pytest -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_run_long_tests() -class TestCase(test_env.BaseAsyncTestCase): - requires_connection = False - - async def test_ext_2000(self): - "E2000 - test static pool grows back to the min after sessions killed" - pool = test_env.get_pool_async( - min=5, max=5, increment=1, ping_interval=0 - ) - conns = [await pool.acquire() for i in range(5)] - admin_conn = await test_env.get_admin_connection_async() - with admin_conn.cursor() as admin_cursor: - for conn in conns: - sid, serial = await self.get_sid_serial(conn) - kill_sql = f"alter system kill session '{sid},{serial}'" - await admin_cursor.execute(kill_sql) - await admin_conn.close() - for conn in conns: - await conn.close() - conns.clear() - conn = await pool.acquire() - await asyncio.sleep(2) - self.assertEqual(pool.opened, pool.min) +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, skip_unless_thin_mode, skip_unless_run_long_tests +): + pass -if __name__ == "__main__": - test_env.run_test_cases() +async def test_ext_2000(test_env): + "E2000 - test static pool grows back to the min after sessions killed" + pool = test_env.get_pool_async(min=5, max=5, increment=1, ping_interval=0) + conns = [await pool.acquire() for i in range(5)] + admin_conn = await test_env.get_admin_connection_async() + with admin_conn.cursor() as admin_cursor: + for conn in conns: + sid, serial = (conn.session_id, conn.serial_num) + kill_sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(kill_sql) + await admin_conn.close() + for conn in conns: + await conn.close() + conns.clear() + conn = await pool.acquire() + await asyncio.sleep(2) + assert pool.opened == pool.min diff --git a/tests/ext/test_ext_2100_bfile_type.py b/tests/ext/test_ext_2100_bfile_type.py index 4c7f4acc..e1a5314c 100644 --- a/tests/ext/test_ext_2100_bfile_type.py +++ b/tests/ext/test_ext_2100_bfile_type.py @@ -31,158 +31,147 @@ import tempfile import oracledb -import test_env +import pytest +DIR_NAME = "EXT_TEST_2100_DIR" -@test_env.skip_unless_local_database() -class TestCase(test_env.BaseTestCase): - dir_name = "EXT_TEST_2100_DIR" - def _setup_directory(self, local_name): - """ - Setups the directory using the given local name. - """ - user = test_env.get_main_user() +@pytest.fixture(autouse=True) +def module_checks(skip_unless_local_database): + pass + + +@pytest.fixture +def temp_dir(test_env): + with tempfile.TemporaryDirectory() as temp_dir: + user = test_env.main_user with test_env.get_admin_connection() as conn: with conn.cursor() as cursor: cursor.execute( f""" - create or replace directory {self.dir_name} - as '{local_name}' + create or replace directory {DIR_NAME} + as '{temp_dir}' """ ) - cursor.execute( - f"grant read on directory {self.dir_name} to {user}" - ) + cursor.execute(f"grant read on directory {DIR_NAME} to {user}") + yield temp_dir + + +def test_ext_2100(temp_dir, cursor): + "E2100 - test fileexists() and getfilename()" + file_name = "test_2100.txt" + contents = b"Some arbitrary data for test 2100" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = cursor.fetchone() + assert bfile.read() == contents + assert bfile.fileexists() + assert bfile.getfilename() == (DIR_NAME, file_name) + + +def test_ext_2101(temp_dir, cursor): + "E2101 - test setfilename()" + file_name = "test_2101.txt" + contents = b"Some arbitrary data for test 2101" + new_file_name = "test_2101b.txt" + new_contents = b"Some arbitrary different data for test 2101" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + with open(os.path.join(temp_dir, new_file_name), "wb") as f: + f.write(new_contents) + cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = cursor.fetchone() + assert bfile.read() == contents + bfile.setfilename(DIR_NAME, new_file_name) + assert bfile.read() == new_contents + + +def test_ext_2102(temp_dir, cursor, test_env): + "E2102 - test BFILE with LOB methods" + file_name = "test_2102.txt" + contents = b"Some arbitrary data for test 2102" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = cursor.fetchone() + assert bfile.size() == len(contents) + assert bfile.read(7) == contents[6:] + assert bfile.read(7, 2) == contents[6:8] + assert not bfile.isopen() + bfile.open() + assert bfile.isopen() + bfile.close() + assert not bfile.isopen() + with test_env.assert_raises_full_code("DPY-3025"): + bfile.getchunksize() + with test_env.assert_raises_full_code("DPY-3025"): + bfile.trim(1) + with test_env.assert_raises_full_code("DPY-3025"): + bfile.write("1") + + +def test_ext_2103(temp_dir, conn, cursor): + "E2103 - test binding a BFILE" + file_name = "test_2103.txt" + contents = b"Some arbitrary data for test 2103" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = cursor.fetchone() + cursor.execute("truncate table TestBfiles") + cursor.execute("insert into TestBfiles values (:1, :2)", [1, bfile]) + conn.commit() + + +def test_ext_2104(temp_dir, cursor, test_env): + "E2104 - test reading from a missing file" + file_name = "test_2104.txt" + cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = cursor.fetchone() + assert bfile.getfilename() == (DIR_NAME, file_name) + assert not bfile.fileexists() + with test_env.assert_raises_full_code("ORA-22288"): + bfile.read() + - def test_ext_2100(self): - "E2100 - test fileexists() and getfilename()" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name = "test_2100.txt" - contents = b"Some arbitrary data for test 2100" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = self.cursor.fetchone() - self.assertEqual(bfile.read(), contents) - self.assertTrue(bfile.fileexists()) - self.assertEqual(bfile.getfilename(), (self.dir_name, file_name)) - - def test_ext_2101(self): - "E2101 - test setfilename()" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name = "test_2101.txt" - contents = b"Some arbitrary data for test 2101" - new_file_name = "test_2101b.txt" - new_contents = b"Some arbitrary different data for test 2101" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - with open(os.path.join(temp_dir, new_file_name), "wb") as f: - f.write(new_contents) - self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = self.cursor.fetchone() - self.assertEqual(bfile.read(), contents) - bfile.setfilename(self.dir_name, new_file_name) - self.assertEqual(bfile.read(), new_contents) - - def test_ext_2102(self): - "E2102 - test BFILE with LOB methods" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name = "test_2102.txt" - contents = b"Some arbitrary data for test 2102" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = self.cursor.fetchone() - self.assertEqual(bfile.size(), len(contents)) - self.assertEqual(bfile.read(7), contents[6:]) - self.assertEqual(bfile.read(7, 2), contents[6:8]) - self.assertFalse(bfile.isopen()) - bfile.open() - self.assertTrue(bfile.isopen()) - bfile.close() - self.assertFalse(bfile.isopen()) - with self.assertRaisesFullCode("DPY-3025"): - bfile.getchunksize() - with self.assertRaisesFullCode("DPY-3025"): - bfile.trim(1) - with self.assertRaisesFullCode("DPY-3025"): - bfile.write("1") - - def test_ext_2103(self): - "E2103 - test binding a BFILE" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name = "test_2103.txt" - contents = b"Some arbitrary data for test 2103" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = self.cursor.fetchone() - self.cursor.execute("truncate table TestBfiles") - self.cursor.execute( - "insert into TestBfiles values (:1, :2)", [1, bfile] - ) - self.conn.commit() - - def test_ext_2104(self): - "E2104 - test reading from a missing file" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name = "test_2104.txt" - self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = self.cursor.fetchone() - self.assertEqual(bfile.getfilename(), (self.dir_name, file_name)) - self.assertFalse(bfile.fileexists()) - with self.assertRaisesFullCode("ORA-22288"): - bfile.read() - - def test_ext_2105(self): - "E2105 - test setting and getting BFILE var" - with tempfile.TemporaryDirectory() as temp_dir: - self._setup_directory(temp_dir) - file_name1 = "test1.txt" - contents1 = b"extended test 2105 - first file" - with open(os.path.join(temp_dir, file_name1), "wb") as f: - f.write(contents1) - file_name2 = "test2.txt" - contents2 = b"extended test 2105 - second file" - with open(os.path.join(temp_dir, file_name2), "wb") as f: - f.write(contents2) - var1 = self.cursor.var(oracledb.DB_TYPE_BFILE) - var2 = self.cursor.var(oracledb.DB_TYPE_BFILE) - self.cursor.execute( - f""" - begin - :1 := BFILENAME('{self.dir_name}', '{file_name1}'); - :2 := BFILENAME('{self.dir_name}', '{file_name2}'); - end; - """, - [var1, var2], - ) - self.assertEqual(var1.getvalue().read(), contents1) - self.assertEqual(var2.getvalue().read(), contents2) - self.cursor.execute("begin :1 := :2; end;", [var1, var2]) - self.assertEqual(var1.getvalue().read(), contents2) - - -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_2105(temp_dir, cursor): + "E2105 - test setting and getting BFILE var" + file_name1 = "test1.txt" + contents1 = b"extended test 2105 - first file" + with open(os.path.join(temp_dir, file_name1), "wb") as f: + f.write(contents1) + file_name2 = "test2.txt" + contents2 = b"extended test 2105 - second file" + with open(os.path.join(temp_dir, file_name2), "wb") as f: + f.write(contents2) + var1 = cursor.var(oracledb.DB_TYPE_BFILE) + var2 = cursor.var(oracledb.DB_TYPE_BFILE) + cursor.execute( + f""" + begin + :1 := BFILENAME('{DIR_NAME}', '{file_name1}'); + :2 := BFILENAME('{DIR_NAME}', '{file_name2}'); + end; + """, + [var1, var2], + ) + assert var1.getvalue().read() == contents1 + assert var2.getvalue().read() == contents2 + cursor.execute("begin :1 := :2; end;", [var1, var2]) + assert var1.getvalue().read() == contents2 diff --git a/tests/ext/test_ext_2200_bfile_type_async.py b/tests/ext/test_ext_2200_bfile_type_async.py index 9d793d46..485cd330 100644 --- a/tests/ext/test_ext_2200_bfile_type_async.py +++ b/tests/ext/test_ext_2200_bfile_type_async.py @@ -31,159 +31,151 @@ import tempfile import oracledb -import test_env +import pytest +DIR_NAME = "EXT_TEST_2200_DIR" -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_local_database() -class TestCase(test_env.BaseAsyncTestCase): - dir_name = "EXT_TEST_2200_DIR" - async def _setup_directory(self, local_name): - """ - Setups the directory using the given local name. - """ - user = test_env.get_main_user() - async with test_env.get_admin_connection(use_async=True) as conn: +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, skip_unless_thin_mode, skip_unless_local_database +): + pass + + +@pytest.fixture +def temp_dir(test_env): + with tempfile.TemporaryDirectory() as temp_dir: + user = test_env.main_user + with test_env.get_admin_connection() as conn: with conn.cursor() as cursor: - await cursor.execute( + cursor.execute( f""" - create or replace directory {self.dir_name} - as '{local_name}' + create or replace directory {DIR_NAME} + as '{temp_dir}' """ ) - await cursor.execute( - f"grant read on directory {self.dir_name} to {user}" - ) + cursor.execute(f"grant read on directory {DIR_NAME} to {user}") + yield temp_dir + + +async def test_ext_2200(temp_dir, async_cursor): + "E2200 - test fileexists() and getfilename()" + file_name = "test_2200.txt" + contents = b"Some arbitrary data for test 2200" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + await async_cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = await async_cursor.fetchone() + assert await bfile.read() == contents + assert await bfile.fileexists() + assert bfile.getfilename() == (DIR_NAME, file_name) + + +async def test_ext_2201(temp_dir, async_cursor): + "E2201 - test setfilename()" + file_name = "test_2201.txt" + contents = b"Some arbitrary data for test 2201" + new_file_name = "test_2201b.txt" + new_contents = b"Some arbitrary different data for test 2201" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + with open(os.path.join(temp_dir, new_file_name), "wb") as f: + f.write(new_contents) + await async_cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = await async_cursor.fetchone() + assert await bfile.read() == contents + bfile.setfilename(DIR_NAME, new_file_name) + assert await bfile.read() == new_contents + + +async def test_ext_2202(temp_dir, async_cursor, test_env): + "E2202 - test BFILE with LOB methods" + file_name = "test_2202.txt" + contents = b"Some arbitrary data for test 2202" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + await async_cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = await async_cursor.fetchone() + assert await bfile.size() == len(contents) + assert await bfile.read(7) == contents[6:] + assert await bfile.read(7, 2) == contents[6:8] + assert not await bfile.isopen() + await bfile.open() + assert await bfile.isopen() + await bfile.close() + assert not await bfile.isopen() + with test_env.assert_raises_full_code("DPY-3025"): + await bfile.getchunksize() + with test_env.assert_raises_full_code("DPY-3025"): + await bfile.trim(1) + with test_env.assert_raises_full_code("DPY-3025"): + await bfile.write("1") + + +async def test_ext_2203(temp_dir, async_conn, async_cursor): + "E2203 - test binding a BFILE" + file_name = "test_2203.txt" + contents = b"Some arbitrary data for test 2203" + with open(os.path.join(temp_dir, file_name), "wb") as f: + f.write(contents) + await async_cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = await async_cursor.fetchone() + await async_cursor.execute("truncate table TestBfiles") + await async_cursor.execute( + "insert into TestBfiles values (:1, :2)", [1, bfile] + ) + await async_conn.commit() + + +async def test_ext_2204(temp_dir, async_cursor, test_env): + "E2204 - test reading from a missing file" + file_name = "test_2204.txt" + await async_cursor.execute( + "select bfilename(:1, :2) from dual", + [DIR_NAME, file_name], + ) + (bfile,) = await async_cursor.fetchone() + assert bfile.getfilename() == (DIR_NAME, file_name) + assert not await bfile.fileexists() + with test_env.assert_raises_full_code("ORA-22288"): + await bfile.read() + - async def test_ext_2200(self): - "E2200 - test fileexists() and getfilename()" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name = "test_2200.txt" - contents = b"Some arbitrary data for test 2200" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - await self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = await self.cursor.fetchone() - self.assertEqual(await bfile.read(), contents) - self.assertTrue(await bfile.fileexists()) - self.assertEqual(bfile.getfilename(), (self.dir_name, file_name)) - - async def test_ext_2201(self): - "E2201 - test setfilename()" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name = "test_2201.txt" - contents = b"Some arbitrary data for test 2201" - new_file_name = "test_2201b.txt" - new_contents = b"Some arbitrary different data for test 2201" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - with open(os.path.join(temp_dir, new_file_name), "wb") as f: - f.write(new_contents) - await self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = await self.cursor.fetchone() - self.assertEqual(await bfile.read(), contents) - bfile.setfilename(self.dir_name, new_file_name) - self.assertEqual(await bfile.read(), new_contents) - - async def test_ext_2202(self): - "E2202 - test BFILE with LOB methods" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name = "test_2202.txt" - contents = b"Some arbitrary data for test 2202" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - await self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = await self.cursor.fetchone() - self.assertEqual(await bfile.size(), len(contents)) - self.assertEqual(await bfile.read(7), contents[6:]) - self.assertEqual(await bfile.read(7, 2), contents[6:8]) - self.assertFalse(await bfile.isopen()) - await bfile.open() - self.assertTrue(await bfile.isopen()) - await bfile.close() - self.assertFalse(await bfile.isopen()) - with self.assertRaisesFullCode("DPY-3025"): - await bfile.getchunksize() - with self.assertRaisesFullCode("DPY-3025"): - await bfile.trim(1) - with self.assertRaisesFullCode("DPY-3025"): - await bfile.write("1") - - async def test_ext_2203(self): - "E2203 - test binding a BFILE" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name = "test_2203.txt" - contents = b"Some arbitrary data for test 2203" - with open(os.path.join(temp_dir, file_name), "wb") as f: - f.write(contents) - await self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = await self.cursor.fetchone() - await self.cursor.execute("truncate table TestBfiles") - await self.cursor.execute( - "insert into TestBfiles values (:1, :2)", [1, bfile] - ) - await self.conn.commit() - - async def test_ext_2204(self): - "E2204 - test reading from a missing file" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name = "test_2204.txt" - await self.cursor.execute( - "select bfilename(:1, :2) from dual", - [self.dir_name, file_name], - ) - (bfile,) = await self.cursor.fetchone() - self.assertEqual(bfile.getfilename(), (self.dir_name, file_name)) - self.assertFalse(await bfile.fileexists()) - with self.assertRaisesFullCode("ORA-22288"): - await bfile.read() - - async def test_ext_2205(self): - "E2205 - test setting and getting BFILE var" - with tempfile.TemporaryDirectory() as temp_dir: - await self._setup_directory(temp_dir) - file_name1 = "test1.txt" - contents1 = b"extended test 2105 - first file" - with open(os.path.join(temp_dir, file_name1), "wb") as f: - f.write(contents1) - file_name2 = "test2.txt" - contents2 = b"extended test 2105 - second file" - with open(os.path.join(temp_dir, file_name2), "wb") as f: - f.write(contents2) - var1 = self.cursor.var(oracledb.DB_TYPE_BFILE) - var2 = self.cursor.var(oracledb.DB_TYPE_BFILE) - await self.cursor.execute( - f""" - begin - :1 := BFILENAME('{self.dir_name}', '{file_name1}'); - :2 := BFILENAME('{self.dir_name}', '{file_name2}'); - end; - """, - [var1, var2], - ) - self.assertEqual(await var1.getvalue().read(), contents1) - self.assertEqual(await var2.getvalue().read(), contents2) - await self.cursor.execute("begin :1 := :2; end;", [var1, var2]) - self.assertEqual(await var1.getvalue().read(), contents2) - - -if __name__ == "__main__": - test_env.run_test_cases() +async def test_ext_2205(temp_dir, async_cursor): + "E2205 - test setting and getting BFILE var" + file_name1 = "test1.txt" + contents1 = b"extended test 2105 - first file" + with open(os.path.join(temp_dir, file_name1), "wb") as f: + f.write(contents1) + file_name2 = "test2.txt" + contents2 = b"extended test 2105 - second file" + with open(os.path.join(temp_dir, file_name2), "wb") as f: + f.write(contents2) + var1 = async_cursor.var(oracledb.DB_TYPE_BFILE) + var2 = async_cursor.var(oracledb.DB_TYPE_BFILE) + await async_cursor.execute( + f""" + begin + :1 := BFILENAME('{DIR_NAME}', '{file_name1}'); + :2 := BFILENAME('{DIR_NAME}', '{file_name2}'); + end; + """, + [var1, var2], + ) + assert await var1.getvalue().read() == contents1 + assert await var2.getvalue().read() == contents2 + await async_cursor.execute("begin :1 := :2; end;", [var1, var2]) + assert await var1.getvalue().read() == contents2 diff --git a/tests/ext/test_ext_2300_tg.py b/tests/ext/test_ext_2300_tg.py index 917c2752..df67904f 100644 --- a/tests/ext/test_ext_2300_tg.py +++ b/tests/ext/test_ext_2300_tg.py @@ -29,18 +29,16 @@ """ import oracledb -import test_env +import pytest +SERVICE_NAME = "oracledb-test-tg" -class TestCase(test_env.BaseTestCase): - service_name = "oracledb-test-tg" - requires_connection = False - @classmethod - def setUpClass(cls): - cls.admin_conn = test_env.get_admin_connection() - user = test_env.get_main_user() - with cls.admin_conn.cursor() as cursor: +@pytest.fixture(scope="module", autouse=True) +def setup_service(test_env): + user = test_env.main_user + with test_env.get_admin_connection() as admin_conn: + with admin_conn.cursor() as cursor: cursor.execute( f""" declare @@ -48,105 +46,98 @@ def setUpClass(cls): begin params('COMMIT_OUTCOME') := 'true'; params('RETENTION_TIMEOUT') := 604800; - dbms_service.create_service('{cls.service_name}', - '{cls.service_name}', params); - dbms_service.start_service('{cls.service_name}'); + dbms_service.create_service('{SERVICE_NAME}', + '{SERVICE_NAME}', params); + dbms_service.start_service('{SERVICE_NAME}'); end; """ ) cursor.execute(f"grant execute on dbms_tg_dbg to {user}") cursor.execute(f"grant execute on dbms_app_cont to {user}") - - @classmethod - def tearDownClass(cls): - user = test_env.get_main_user() - with cls.admin_conn.cursor() as cursor: + yield + with admin_conn.cursor() as cursor: cursor.execute(f"revoke execute on dbms_tg_dbg from {user}") cursor.execute(f"revoke execute on dbms_app_cont from {user}") - cursor.callproc("dbms_service.stop_service", [cls.service_name]) - cursor.callproc("dbms_service.delete_service", [cls.service_name]) + cursor.callproc("dbms_service.stop_service", [SERVICE_NAME]) + cursor.callproc("dbms_service.delete_service", [SERVICE_NAME]) - def test_ext_2300(self): - "E2300 - test standalone connection" - params = test_env.get_connect_params().copy() - params.parse_connect_string(test_env.get_connect_string()) - params.set(service_name=self.service_name) - for arg_name in ("pre_commit", "post_commit"): - with self.subTest(arg_name=arg_name): - conn = oracledb.connect(params=params) - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [2300, "String for test 2300"], - ) - full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" - cursor.execute( - f""" - begin - dbms_tg_dbg.set_failpoint({full_arg_name}); - end; - """ - ) - ltxid = conn.ltxid - with self.assertRaisesFullCode("DPY-4011"): - conn.commit() - conn = oracledb.connect(params=params) - cursor = conn.cursor() - committed_var = cursor.var(bool) - completed_var = cursor.var(bool) - cursor.callproc( - "dbms_app_cont.get_ltxid_outcome", - [ltxid, committed_var, completed_var], - ) - expected_value = arg_name == "post_commit" - self.assertEqual(committed_var.getvalue(), expected_value) - self.assertEqual(completed_var.getvalue(), expected_value) - def test_ext_2301(self): - "E2301 - test pooled connection" - params = test_env.get_pool_params().copy() - params.parse_connect_string(test_env.get_connect_string()) - params.set(service_name=self.service_name, max=10) - pool = oracledb.create_pool(params=params) - for arg_name in ("pre_commit", "post_commit"): - with self.subTest(arg_name=arg_name): - conn = pool.acquire() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [2300, "String for test 2300"], - ) - full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" - cursor.execute( - f""" - begin - dbms_tg_dbg.set_failpoint({full_arg_name}); - end; - """ - ) - ltxid = conn.ltxid - with self.assertRaisesFullCode("DPY-4011"): - conn.commit() - conn = pool.acquire() - cursor = conn.cursor() - committed_var = cursor.var(bool) - completed_var = cursor.var(bool) - cursor.callproc( - "dbms_app_cont.get_ltxid_outcome", - [ltxid, committed_var, completed_var], - ) - expected_value = arg_name == "post_commit" - self.assertEqual(committed_var.getvalue(), expected_value) - self.assertEqual(completed_var.getvalue(), expected_value) +def test_ext_2300(test_env): + "E2300 - test standalone connection" + params = test_env.get_connect_params().copy() + params.parse_connect_string(test_env.connect_string) + params.set(service_name=SERVICE_NAME) + for arg_name in ("pre_commit", "post_commit"): + conn = oracledb.connect(params=params) + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [2300, "String for test 2300"], + ) + full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" + cursor.execute( + f""" + begin + dbms_tg_dbg.set_failpoint({full_arg_name}); + end; + """ + ) + ltxid = conn.ltxid + with test_env.assert_raises_full_code("DPY-4011"): + conn.commit() + conn = oracledb.connect(params=params) + cursor = conn.cursor() + committed_var = cursor.var(bool) + completed_var = cursor.var(bool) + cursor.callproc( + "dbms_app_cont.get_ltxid_outcome", + [ltxid, committed_var, completed_var], + ) + expected_value = arg_name == "post_commit" + assert committed_var.getvalue() == expected_value + assert completed_var.getvalue() == expected_value -if __name__ == "__main__": - test_env.run_test_cases() +def test_ext_2301(test_env): + "E2301 - test pooled connection" + params = test_env.get_pool_params().copy() + params.parse_connect_string(test_env.connect_string) + params.set(service_name=SERVICE_NAME, max=10) + pool = oracledb.create_pool(params=params) + for arg_name in ("pre_commit", "post_commit"): + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [2300, "String for test 2300"], + ) + full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" + cursor.execute( + f""" + begin + dbms_tg_dbg.set_failpoint({full_arg_name}); + end; + """ + ) + ltxid = conn.ltxid + with test_env.assert_raises_full_code("DPY-4011"): + conn.commit() + conn = pool.acquire() + cursor = conn.cursor() + committed_var = cursor.var(bool) + completed_var = cursor.var(bool) + cursor.callproc( + "dbms_app_cont.get_ltxid_outcome", + [ltxid, committed_var, completed_var], + ) + expected_value = arg_name == "post_commit" + assert committed_var.getvalue() == expected_value + assert completed_var.getvalue() == expected_value diff --git a/tests/ext/test_ext_2400_tg_async.py b/tests/ext/test_ext_2400_tg_async.py index 5ec39454..85075d82 100644 --- a/tests/ext/test_ext_2400_tg_async.py +++ b/tests/ext/test_ext_2400_tg_async.py @@ -29,141 +29,121 @@ """ import oracledb -import test_env +import pytest +SERVICE_NAME = "oracledb-test-tg-async" -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - service_name = "oracledb-test-tg-async" - requires_connection = False - setup_completed = False - async def __perform_setup(self): - """ - Perform setup, if needed. - """ - if self.__class__.setup_completed: - return - user = test_env.get_main_user() - async with test_env.get_admin_connection(use_async=True) as conn: - cursor = conn.cursor() - await cursor.execute(f"grant execute on dbms_tg_dbg to {user}") - await cursor.execute(f"grant execute on dbms_app_cont to {user}") - await cursor.execute( - """ - select count(*) from dba_services - where name = :name - """, - name=self.service_name, - ) - (count,) = await cursor.fetchone() - if count > 0: - try: - await cursor.callproc( - "dbms_service.start_service", [self.service_name] - ) - except Exception as e: - if not str(e).startswith("ORA-44305:"): - raise - return - await cursor.execute( +@pytest.fixture(scope="module", autouse=True) +def setup_service(test_env): + user = test_env.main_user + with test_env.get_admin_connection() as admin_conn: + with admin_conn.cursor() as cursor: + cursor.execute( f""" declare params dbms_service.svc_parameter_array; begin params('COMMIT_OUTCOME') := 'true'; params('RETENTION_TIMEOUT') := 604800; - dbms_service.create_service('{self.service_name}', - '{self.service_name}', params); - dbms_service.start_service('{self.service_name}'); + dbms_service.create_service('{SERVICE_NAME}', + '{SERVICE_NAME}', params); + dbms_service.start_service('{SERVICE_NAME}'); end; """ ) + cursor.execute(f"grant execute on dbms_tg_dbg to {user}") + cursor.execute(f"grant execute on dbms_app_cont to {user}") + yield + with admin_conn.cursor() as cursor: + cursor.execute(f"revoke execute on dbms_tg_dbg from {user}") + cursor.execute(f"revoke execute on dbms_app_cont from {user}") + cursor.callproc("dbms_service.stop_service", [SERVICE_NAME]) + cursor.callproc("dbms_service.delete_service", [SERVICE_NAME]) - async def test_ext_2400(self): - "E2400 - test standalone connection" - await self.__perform_setup() - params = test_env.get_connect_params().copy() - params.parse_connect_string(test_env.get_connect_string()) - params.set(service_name=self.service_name) - for arg_name in ("pre_commit", "post_commit"): - with self.subTest(arg_name=arg_name): - conn = await oracledb.connect_async(params=params) - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [2400, "String for test 2400"], - ) - full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" - await cursor.execute( - f""" - begin - dbms_tg_dbg.set_failpoint({full_arg_name}); - end; - """ - ) - ltxid = conn.ltxid - with self.assertRaisesFullCode("DPY-4011"): - await conn.commit() - conn = await oracledb.connect_async(params=params) - cursor = conn.cursor() - committed_var = cursor.var(bool) - completed_var = cursor.var(bool) - await cursor.callproc( - "dbms_app_cont.get_ltxid_outcome", - [ltxid, committed_var, completed_var], - ) - expected_value = arg_name == "post_commit" - self.assertEqual(committed_var.getvalue(), expected_value) - self.assertEqual(completed_var.getvalue(), expected_value) - async def test_ext_2401(self): - "E2401 - test pooled connection" - await self.__perform_setup() - params = test_env.get_pool_params().copy() - params.parse_connect_string(test_env.get_connect_string()) - params.set(service_name=self.service_name, max=10) - pool = oracledb.create_pool_async(params=params) - for arg_name in ("pre_commit", "post_commit"): - with self.subTest(arg_name=arg_name): - async with pool.acquire() as conn: - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [2400, "String for test 2400"], - ) - full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" - await cursor.execute( - f""" - begin - dbms_tg_dbg.set_failpoint({full_arg_name}); - end; - """ - ) - ltxid = conn.ltxid - with self.assertRaisesFullCode("DPY-4011"): - await conn.commit() - async with pool.acquire() as conn: - cursor = conn.cursor() - committed_var = cursor.var(bool) - completed_var = cursor.var(bool) - await cursor.callproc( - "dbms_app_cont.get_ltxid_outcome", - [ltxid, committed_var, completed_var], - ) - expected_value = arg_name == "post_commit" - self.assertEqual(committed_var.getvalue(), expected_value) - self.assertEqual(completed_var.getvalue(), expected_value) - await pool.close() +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass -if __name__ == "__main__": - test_env.run_test_cases() +async def test_ext_2400(test_env): + "E2400 - test standalone connection" + params = test_env.get_connect_params().copy() + params.parse_connect_string(test_env.connect_string) + params.set(service_name=SERVICE_NAME) + for arg_name in ("pre_commit", "post_commit"): + conn = await oracledb.connect_async(params=params) + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [2400, "String for test 2400"], + ) + full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" + await cursor.execute( + f""" + begin + dbms_tg_dbg.set_failpoint({full_arg_name}); + end; + """ + ) + ltxid = conn.ltxid + with test_env.assert_raises_full_code("DPY-4011"): + await conn.commit() + conn = await oracledb.connect_async(params=params) + cursor = conn.cursor() + committed_var = cursor.var(bool) + completed_var = cursor.var(bool) + await cursor.callproc( + "dbms_app_cont.get_ltxid_outcome", + [ltxid, committed_var, completed_var], + ) + expected_value = arg_name == "post_commit" + assert committed_var.getvalue() == expected_value + assert completed_var.getvalue() == expected_value + + +async def test_ext_2401(test_env): + "E2401 - test pooled connection" + params = test_env.get_pool_params().copy() + params.parse_connect_string(test_env.connect_string) + params.set(service_name=SERVICE_NAME, max=10) + pool = oracledb.create_pool_async(params=params) + for arg_name in ("pre_commit", "post_commit"): + async with pool.acquire() as conn: + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [2400, "String for test 2400"], + ) + full_arg_name = f"dbms_tg_dbg.tg_failpoint_{arg_name}" + await cursor.execute( + f""" + begin + dbms_tg_dbg.set_failpoint({full_arg_name}); + end; + """ + ) + ltxid = conn.ltxid + with test_env.assert_raises_full_code("DPY-4011"): + await conn.commit() + async with pool.acquire() as conn: + cursor = conn.cursor() + committed_var = cursor.var(bool) + completed_var = cursor.var(bool) + await cursor.callproc( + "dbms_app_cont.get_ltxid_outcome", + [ltxid, committed_var, completed_var], + ) + expected_value = arg_name == "post_commit" + assert committed_var.getvalue() == expected_value + assert completed_var.getvalue() == expected_value + await pool.close() diff --git a/tests/ext/test_ext_2500_config_cache.py b/tests/ext/test_ext_2500_config_cache.py index 229545ab..8eee0370 100644 --- a/tests/ext/test_ext_2500_config_cache.py +++ b/tests/ext/test_ext_2500_config_cache.py @@ -31,117 +31,119 @@ import time import oracledb -import test_env - - -@test_env.skip_unless_run_long_tests() -class TestCase(test_env.BaseTestCase): - def test_ext_2500(self): - "E2500 - test config is cached" - sdu = 4096 - protocol = "proto-test" - connect_string = f"{protocol}://test_ext_2500" - config = dict( - connect_descriptor=test_env.get_connect_string(), - pyo=dict(sdu=sdu), - ) - - def hook(passed_protocol, passed_protocol_arg, passed_params): - passed_params.set_from_config(config) - config["pyo"]["sdu"] *= 2 - - oracledb.register_protocol(protocol, hook) - try: - for i in range(2): - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.sdu, sdu) - finally: - oracledb.register_protocol(protocol, None) - - def test_ext_2501(self): - "E2501 - test config cache is disabled with config_time_to_live = 0" - sdu = 4096 - protocol = "proto-test" - connect_string = f"{protocol}://test_ext_2501" - config = dict( - connect_descriptor=test_env.get_connect_string(), - config_time_to_live=0, - pyo=dict(sdu=sdu), - ) - - def hook(passed_protocol, passed_protocol_arg, passed_params): - passed_params.set_from_config(config) - config["pyo"]["sdu"] *= 2 - - oracledb.register_protocol(protocol, hook) - try: - for i in range(2): - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.sdu, sdu + i * sdu) - finally: - oracledb.register_protocol(protocol, None) - - def test_ext_2502(self): - "E2502 - test config cache expiry time" - sdu = 4096 - protocol = "proto-test" - connect_string = f"{protocol}://test_ext_2502" - config = dict( - connect_descriptor=test_env.get_connect_string(), - config_time_to_live=2, - pyo=dict(sdu=sdu), - ) - - def hook(passed_protocol, passed_protocol_arg, passed_params): - passed_params.set_from_config(config) - config["pyo"]["sdu"] *= 2 - - oracledb.register_protocol(protocol, hook) - try: - expected_sdu = sdu - for i in range(7): - if i == 3 or i == 6: - expected_sdu *= 2 - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.sdu, expected_sdu) - time.sleep(0.75) - finally: - oracledb.register_protocol(protocol, None) - - def test_ext_2503(self): - "E2503 - test config cache soft/hard expiry time" - sdu = 4096 - protocol = "proto-test" - connect_string = f"{protocol}://test_ext_2503" - config = dict( - connect_descriptor=test_env.get_connect_string(), - config_time_to_live=2, - config_time_to_live_grace_period=3, - pyo=dict(sdu=sdu), - ) - - def hook(passed_protocol, passed_protocol_arg, passed_params): - if config["pyo"]["sdu"] > sdu: - raise Exception("Arbitrary exception!") - passed_params.set_from_config(config) - config["pyo"]["sdu"] *= 2 - - oracledb.register_protocol(protocol, hook) - try: - for i in range(2): - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.sdu, sdu) - time.sleep(3) - with self.assertRaisesFullCode("DPY-2056"): - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - finally: - oracledb.register_protocol(protocol, None) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_run_long_tests): + pass + + +def test_ext_2500(test_env): + "E2500 - test config is cached" + sdu = 4096 + protocol = "proto-test" + connect_string = f"{protocol}://test_ext_2500" + config = dict( + connect_descriptor=test_env.connect_string, + pyo=dict(sdu=sdu), + ) + + def hook(passed_protocol, passed_protocol_arg, passed_params): + passed_params.set_from_config(config) + config["pyo"]["sdu"] *= 2 + + oracledb.register_protocol(protocol, hook) + try: + for i in range(2): + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.sdu == sdu + finally: + oracledb.register_protocol(protocol, None) + + +def test_ext_2501(test_env): + "E2501 - test config cache is disabled with config_time_to_live = 0" + sdu = 4096 + protocol = "proto-test" + connect_string = f"{protocol}://test_ext_2501" + config = dict( + connect_descriptor=test_env.connect_string, + config_time_to_live=0, + pyo=dict(sdu=sdu), + ) + + def hook(passed_protocol, passed_protocol_arg, passed_params): + passed_params.set_from_config(config) + config["pyo"]["sdu"] *= 2 + + oracledb.register_protocol(protocol, hook) + try: + for i in range(2): + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.sdu == sdu + i * sdu + finally: + oracledb.register_protocol(protocol, None) + + +def test_ext_2502(test_env): + "E2502 - test config cache expiry time" + sdu = 4096 + protocol = "proto-test" + connect_string = f"{protocol}://test_ext_2502" + config = dict( + connect_descriptor=test_env.connect_string, + config_time_to_live=2, + pyo=dict(sdu=sdu), + ) + + def hook(passed_protocol, passed_protocol_arg, passed_params): + passed_params.set_from_config(config) + config["pyo"]["sdu"] *= 2 + + oracledb.register_protocol(protocol, hook) + try: + expected_sdu = sdu + for i in range(7): + if i == 3 or i == 6: + expected_sdu *= 2 + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.sdu == expected_sdu + time.sleep(0.75) + finally: + oracledb.register_protocol(protocol, None) + + +def test_ext_2503(test_env): + "E2503 - test config cache soft/hard expiry time" + sdu = 4096 + protocol = "proto-test" + connect_string = f"{protocol}://test_ext_2503" + config = dict( + connect_descriptor=test_env.connect_string, + config_time_to_live=2, + config_time_to_live_grace_period=3, + pyo=dict(sdu=sdu), + ) + + def hook(passed_protocol, passed_protocol_arg, passed_params): + if config["pyo"]["sdu"] > sdu: + raise Exception("Arbitrary exception!") + passed_params.set_from_config(config) + config["pyo"]["sdu"] *= 2 + + oracledb.register_protocol(protocol, hook) + try: + for i in range(2): + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.sdu == sdu + time.sleep(3) + with test_env.assert_raises_full_code("DPY-2056"): + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + finally: + oracledb.register_protocol(protocol, None) diff --git a/tests/ext/test_ext_2600_sessionless_transaction.py b/tests/ext/test_ext_2600_sessionless_transaction.py index b1aaa9a0..accbb222 100644 --- a/tests/ext/test_ext_2600_sessionless_transaction.py +++ b/tests/ext/test_ext_2600_sessionless_transaction.py @@ -30,64 +30,62 @@ import time -import test_env - - -@test_env.skip_unless_run_long_tests() -@test_env.skip_unless_sessionless_transactions_supported() -class TestCase(test_env.BaseTestCase): - def test_ext_2600(self): - "E2600 - test error conditions with client API" - self.cursor.execute("truncate table TestTempTable") - - transaction_id = "test_2600_transaction_id" - other_transaction_id = "test_2600_different_transaction_id" - with test_env.get_connection() as conn: - cursor = conn.cursor() - - # suspending a non-existent transaction will fail only in thin - # mode - if conn.thin: - with self.assertRaisesFullCode("DPY-3036"): - conn.suspend_sessionless_transaction() - - # start first sessionless transaction - conn.begin_sessionless_transaction( - transaction_id=transaction_id, timeout=5 - ) - - # starting another sessionless transaction will fail only in thin - # mode - if conn.thin: - with self.assertRaisesFullCode("DPY-3035"): - conn.begin_sessionless_transaction( - transaction_id=other_transaction_id, timeout=5 - ) - - cursor.execute( - """ - INSERT INTO TestTempTable(IntCol, StringCol1) - VALUES(:1, :2) - """, - (1, "test_row"), - ) - - # suspend using server API should fail - with self.assertRaisesFullCode("DPY-3034"): - cursor.callproc("dbms_transaction.suspend_transaction") - - # suspend using client API should succeed - conn.suspend_sessionless_transaction() - - # wait till it times out - time.sleep(10) - - # attmpting to resume the transaction should fail - with self.assertRaisesFullCode("ORA-26218"): - conn.resume_sessionless_transaction( - transaction_id=transaction_id +import pytest + + +@pytest.fixture(autouse=True) +def module_checks( + skip_unless_sessionless_transactions_supported, skip_unless_run_long_tests +): + pass + + +def test_ext_2600(cursor, test_env): + "E2600 - test error conditions with client API" + cursor.execute("truncate table TestTempTable") + + transaction_id = "test_2600_transaction_id" + other_transaction_id = "test_2600_different_transaction_id" + with test_env.get_connection() as conn: + cursor = conn.cursor() + + # suspending a non-existent transaction will fail only in thin + # mode + if conn.thin: + with test_env.assert_raises_full_code("DPY-3036"): + conn.suspend_sessionless_transaction() + + # start first sessionless transaction + conn.begin_sessionless_transaction( + transaction_id=transaction_id, timeout=5 + ) + + # starting another sessionless transaction will fail only in thin + # mode + if conn.thin: + with test_env.assert_raises_full_code("DPY-3035"): + conn.begin_sessionless_transaction( + transaction_id=other_transaction_id, timeout=5 ) + cursor.execute( + """ + INSERT INTO TestTempTable(IntCol, StringCol1) + VALUES(:1, :2) + """, + (1, "test_row"), + ) -if __name__ == "__main__": - test_env.run_test_cases() + # suspend using server API should fail + with test_env.assert_raises_full_code("DPY-3034"): + cursor.callproc("dbms_transaction.suspend_transaction") + + # suspend using client API should succeed + conn.suspend_sessionless_transaction() + + # wait till it times out + time.sleep(10) + + # attmpting to resume the transaction should fail + with test_env.assert_raises_full_code("ORA-26218"): + conn.resume_sessionless_transaction(transaction_id=transaction_id) diff --git a/tests/ext/test_ext_2700_sessionless_transaction_async.py b/tests/ext/test_ext_2700_sessionless_transaction_async.py index 14927d58..226e914c 100644 --- a/tests/ext/test_ext_2700_sessionless_transaction_async.py +++ b/tests/ext/test_ext_2700_sessionless_transaction_async.py @@ -30,61 +30,63 @@ import asyncio -import test_env +import pytest -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_run_long_tests() -@test_env.skip_unless_sessionless_transactions_supported() -class TestCase(test_env.BaseAsyncTestCase): - async def test_ext_2700(self): - "E2700 - test error conditions with client API" - await self.cursor.execute("truncate table TestTempTable") +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, + skip_unless_thin_mode, + skip_unless_sessionless_transactions_supported, + skip_unless_run_long_tests, +): + pass - transaction_id = "test_2600_transaction_id" - other_transaction_id = "test_2600_different_transaction_id" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - # suspending a non-existent transaction will fail - with self.assertRaisesFullCode("DPY-3036"): - await conn.suspend_sessionless_transaction() +async def test_ext_2700(test_env, async_cursor): + "E2700 - test error conditions with client API" + await async_cursor.execute("truncate table TestTempTable") - # start first sessionless transaction - await conn.begin_sessionless_transaction( - transaction_id=transaction_id, timeout=5 - ) + transaction_id = "test_2600_transaction_id" + other_transaction_id = "test_2600_different_transaction_id" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() - # starting another sessionless transaction will fail - with self.assertRaisesFullCode("DPY-3035"): - await conn.begin_sessionless_transaction( - transaction_id=other_transaction_id, timeout=5 - ) - - await cursor.execute( - """ - INSERT INTO TestTempTable(IntCol, StringCol1) - VALUES(:1, :2) - """, - (1, "test_row"), - ) + # suspending a non-existent transaction will fail + with test_env.assert_raises_full_code("DPY-3036"): + await conn.suspend_sessionless_transaction() - # suspend using server API should fail - with self.assertRaisesFullCode("DPY-3034"): - await cursor.callproc("dbms_transaction.suspend_transaction") + # start first sessionless transaction + await conn.begin_sessionless_transaction( + transaction_id=transaction_id, timeout=5 + ) - # suspend using client API should succeed - await conn.suspend_sessionless_transaction() + # starting another sessionless transaction will fail + with test_env.assert_raises_full_code("DPY-3035"): + await conn.begin_sessionless_transaction( + transaction_id=other_transaction_id, timeout=5 + ) - # wait till it times out - await asyncio.sleep(10) + await cursor.execute( + """ + INSERT INTO TestTempTable(IntCol, StringCol1) + VALUES(:1, :2) + """, + (1, "test_row"), + ) - # attmpting to resume the transaction should fail - with self.assertRaisesFullCode("ORA-26218"): - await conn.resume_sessionless_transaction( - transaction_id=transaction_id - ) + # suspend using server API should fail + with test_env.assert_raises_full_code("DPY-3034"): + await cursor.callproc("dbms_transaction.suspend_transaction") + # suspend using client API should succeed + await conn.suspend_sessionless_transaction() -if __name__ == "__main__": - test_env.run_test_cases() + # wait till it times out + await asyncio.sleep(10) + + # attmpting to resume the transaction should fail + with test_env.assert_raises_full_code("ORA-26218"): + await conn.resume_sessionless_transaction( + transaction_id=transaction_id + ) diff --git a/tests/test_1000_module.py b/tests/test_1000_module.py index dfa8596d..a6888b34 100644 --- a/tests/test_1000_module.py +++ b/tests/test_1000_module.py @@ -29,236 +29,212 @@ import datetime import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - requires_connection = False - - def test_1000(self): - "1000 - test DateFromTicks()" - today = datetime.datetime.today() - timestamp = today.timestamp() - date = oracledb.DateFromTicks(int(timestamp)) - self.assertEqual(date, today.date()) - - def test_1001(self): - "1001 - test management of __future__ object" - self.assertIsNone(oracledb.__future__.dummy) - oracledb.__future__.dummy = "Unimportant" - self.assertIsNone(oracledb.__future__.dummy) - - def test_1002(self): - "1002 - test TimestampFromTicks()" - timestamp = datetime.datetime.today().timestamp() - today = datetime.datetime.fromtimestamp(timestamp) - date = oracledb.TimestampFromTicks(timestamp) - self.assertEqual(date, today) - - def test_1003(self): - "1003 - test unsupported time functions" - with self.assertRaisesFullCode("DPY-3000"): - oracledb.Time(12, 0, 0) - with self.assertRaisesFullCode("DPY-3000"): - oracledb.TimeFromTicks(100) - - def test_1004(self): - "1004 - test makedsn() with valid arguments" - for name, value in [ - ("SID", "sid_1004"), - ("SERVICE_NAME", "my_service_1004"), - ]: - host = "host_1004" - port = 1004 - region = "US WEST" - sharding_key = "ShardKey" - super_sharding_key = "SuperShardKey" - args = ( - host, - port, - value if name == "SID" else None, - value if name == "SERVICE_NAME" else None, - region, - sharding_key, - super_sharding_key, - ) - result = oracledb.makedsn(*args) - expected_value = ( - f"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})" - f"(PORT={port}))(CONNECT_DATA=({name}={value})" - f"(REGION={region})(SHARDING_KEY={sharding_key})" - f"(SUPER_SHARDING_KEY={super_sharding_key})))" - ) - self.assertEqual(result, expected_value) - - def test_1005(self): - "1005 - test makedsn() with invalid arguments" - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn(host="(invalid)", port=1521) - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn(host="host", port=1521, sid="(invalid)") - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn(host="host", port=1521, service_name="(invalid)") - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn(host="host", port=1521, region="(invalid)") - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn(host="host", port=1521, sharding_key="(invalid)") - with self.assertRaisesFullCode("DPY-2020"): - oracledb.makedsn( - host="host", port=1521, super_sharding_key="(invalid)" - ) - - def test_1006(self): - "1006 - test aliases match" - - # database type aliases - self.assertIs(oracledb.BFILE, oracledb.DB_TYPE_BFILE) - self.assertIs(oracledb.BLOB, oracledb.DB_TYPE_BLOB) - self.assertIs(oracledb.BOOLEAN, oracledb.DB_TYPE_BOOLEAN) - self.assertIs(oracledb.CLOB, oracledb.DB_TYPE_CLOB) - self.assertIs(oracledb.CURSOR, oracledb.DB_TYPE_CURSOR) - self.assertIs(oracledb.FIXED_CHAR, oracledb.DB_TYPE_CHAR) - self.assertIs(oracledb.FIXED_NCHAR, oracledb.DB_TYPE_NCHAR) - self.assertIs(oracledb.INTERVAL, oracledb.DB_TYPE_INTERVAL_DS) - self.assertIs(oracledb.LONG_BINARY, oracledb.DB_TYPE_LONG_RAW) - self.assertIs(oracledb.LONG_STRING, oracledb.DB_TYPE_LONG) - self.assertIs(oracledb.NATIVE_INT, oracledb.DB_TYPE_BINARY_INTEGER) - self.assertIs(oracledb.NATIVE_FLOAT, oracledb.DB_TYPE_BINARY_DOUBLE) - self.assertIs(oracledb.NCHAR, oracledb.DB_TYPE_NVARCHAR) - self.assertIs(oracledb.NCLOB, oracledb.DB_TYPE_NCLOB) - self.assertIs(oracledb.OBJECT, oracledb.DB_TYPE_OBJECT) - self.assertIs(oracledb.TIMESTAMP, oracledb.DB_TYPE_TIMESTAMP) - - # type aliases - self.assertIs(oracledb.ObjectType, oracledb.DbObjectType) - self.assertIs(oracledb.Object, oracledb.DbObject) - self.assertIs(oracledb.SessionPool, oracledb.ConnectionPool) - - # authentication mode aliases - self.assertIs(oracledb.DEFAULT_AUTH, oracledb.AUTH_MODE_DEFAULT) - self.assertIs(oracledb.SYSASM, oracledb.AUTH_MODE_SYSASM) - self.assertIs(oracledb.SYSBKP, oracledb.AUTH_MODE_SYSBKP) - self.assertIs(oracledb.SYSDBA, oracledb.AUTH_MODE_SYSDBA) - self.assertIs(oracledb.SYSDGD, oracledb.AUTH_MODE_SYSDGD) - self.assertIs(oracledb.SYSKMT, oracledb.AUTH_MODE_SYSKMT) - self.assertIs(oracledb.SYSOPER, oracledb.AUTH_MODE_SYSOPER) - self.assertIs(oracledb.SYSRAC, oracledb.AUTH_MODE_SYSRAC) - self.assertIs(oracledb.PRELIM_AUTH, oracledb.AUTH_MODE_PRELIM) - - # pool "get" mode aliases - self.assertIs(oracledb.SPOOL_ATTRVAL_WAIT, oracledb.POOL_GETMODE_WAIT) - self.assertIs( - oracledb.SPOOL_ATTRVAL_NOWAIT, oracledb.POOL_GETMODE_NOWAIT - ) - self.assertIs( - oracledb.SPOOL_ATTRVAL_FORCEGET, oracledb.POOL_GETMODE_FORCEGET - ) - self.assertIs( - oracledb.SPOOL_ATTRVAL_TIMEDWAIT, oracledb.POOL_GETMODE_TIMEDWAIT - ) - - # purity aliases - self.assertIs(oracledb.ATTR_PURITY_DEFAULT, oracledb.PURITY_DEFAULT) - self.assertIs(oracledb.ATTR_PURITY_NEW, oracledb.PURITY_NEW) - self.assertIs(oracledb.ATTR_PURITY_SELF, oracledb.PURITY_SELF) - - # other aliases - self.assertIs( - oracledb.SUBSCR_PROTO_OCI, oracledb.SUBSCR_PROTO_CALLBACK - ) - self.assertIs(oracledb.version, oracledb.__version__) - - @test_env.skip_unless_thin_mode() - def test_1007(self): - "1007 - test clientversion() fails without init_oracle_client()" - with self.assertRaisesFullCode("DPY-2021"): - oracledb.clientversion() - - def test_1008(self): - "1008 - test enumeration aliases match" - - # authentication mode enumeration - self.assertIs(oracledb.AUTH_MODE_DEFAULT, oracledb.AuthMode.DEFAULT) - self.assertIs(oracledb.AUTH_MODE_PRELIM, oracledb.AuthMode.PRELIM) - self.assertIs(oracledb.AUTH_MODE_SYSASM, oracledb.AuthMode.SYSASM) - self.assertIs(oracledb.AUTH_MODE_SYSBKP, oracledb.AuthMode.SYSBKP) - self.assertIs(oracledb.AUTH_MODE_SYSDBA, oracledb.AuthMode.SYSDBA) - self.assertIs(oracledb.AUTH_MODE_SYSDGD, oracledb.AuthMode.SYSDGD) - self.assertIs(oracledb.AUTH_MODE_SYSKMT, oracledb.AuthMode.SYSKMT) - self.assertIs(oracledb.AUTH_MODE_SYSOPER, oracledb.AuthMode.SYSOPER) - self.assertIs(oracledb.AUTH_MODE_SYSRAC, oracledb.AuthMode.SYSRAC) - - # batch operation type enumeration - self.assertIs( - oracledb.PIPELINE_OP_TYPE_CALL_FUNC, - oracledb.PipelineOpType.CALL_FUNC, - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_CALL_PROC, - oracledb.PipelineOpType.CALL_PROC, - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_COMMIT, oracledb.PipelineOpType.COMMIT - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_EXECUTE, oracledb.PipelineOpType.EXECUTE - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY, - oracledb.PipelineOpType.EXECUTE_MANY, - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_FETCH_ALL, - oracledb.PipelineOpType.FETCH_ALL, - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_FETCH_MANY, - oracledb.PipelineOpType.FETCH_MANY, - ) - self.assertIs( - oracledb.PIPELINE_OP_TYPE_FETCH_ONE, - oracledb.PipelineOpType.FETCH_ONE, - ) - - # pool "get" mode enumeration - self.assertIs( - oracledb.POOL_GETMODE_FORCEGET, oracledb.PoolGetMode.FORCEGET - ) - self.assertIs(oracledb.POOL_GETMODE_WAIT, oracledb.PoolGetMode.WAIT) - self.assertIs( - oracledb.POOL_GETMODE_NOWAIT, oracledb.PoolGetMode.NOWAIT - ) - self.assertIs( - oracledb.POOL_GETMODE_TIMEDWAIT, oracledb.PoolGetMode.TIMEDWAIT - ) - # purity enumeration - self.assertIs(oracledb.PURITY_DEFAULT, oracledb.Purity.DEFAULT) - self.assertIs(oracledb.PURITY_NEW, oracledb.Purity.NEW) - self.assertIs(oracledb.PURITY_SELF, oracledb.Purity.SELF) - # vector format enumeration - self.assertIs( - oracledb.VECTOR_FORMAT_BINARY, oracledb.VectorFormat.BINARY - ) - self.assertIs( - oracledb.VECTOR_FORMAT_FLOAT32, oracledb.VectorFormat.FLOAT32 - ) - self.assertIs( - oracledb.VECTOR_FORMAT_FLOAT64, oracledb.VectorFormat.FLOAT64 - ) - self.assertIs(oracledb.VECTOR_FORMAT_INT8, oracledb.VectorFormat.INT8) - - def test_1009(self): - "1009 - test enable_thin_mode()" - if not test_env.run_in_thick_mode(): +def test_1000(): + "1000 - test DateFromTicks()" + today = datetime.datetime.today() + timestamp = today.timestamp() + date = oracledb.DateFromTicks(int(timestamp)) + assert date == today.date() + + +def test_1001(): + "1001 - test management of __future__ object" + assert oracledb.__future__.dummy is None + oracledb.__future__.dummy = "Unimportant" + assert oracledb.__future__.dummy is None + + +def test_1002(): + "1002 - test TimestampFromTicks()" + timestamp = datetime.datetime.today().timestamp() + today = datetime.datetime.fromtimestamp(timestamp) + date = oracledb.TimestampFromTicks(timestamp) + assert date == today + + +def test_1003(test_env): + "1003 - test unsupported time functions" + with test_env.assert_raises_full_code("DPY-3000"): + oracledb.Time(12, 0, 0) + with test_env.assert_raises_full_code("DPY-3000"): + oracledb.TimeFromTicks(100) + + +def test_1004(): + "1004 - test makedsn() with valid arguments" + for name, value in [ + ("SID", "sid_1004"), + ("SERVICE_NAME", "my_service_1004"), + ]: + host = "host_1004" + port = 1004 + region = "US WEST" + sharding_key = "ShardKey" + super_sharding_key = "SuperShardKey" + args = ( + host, + port, + value if name == "SID" else None, + value if name == "SERVICE_NAME" else None, + region, + sharding_key, + super_sharding_key, + ) + result = oracledb.makedsn(*args) + expected_value = ( + f"(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})" + f"(PORT={port}))(CONNECT_DATA=({name}={value})" + f"(REGION={region})(SHARDING_KEY={sharding_key})" + f"(SUPER_SHARDING_KEY={super_sharding_key})))" + ) + assert result == expected_value + + +def test_1005(test_env): + "1005 - test makedsn() with invalid arguments" + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn(host="(invalid)", port=1521) + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn(host="host", port=1521, sid="(invalid)") + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn(host="host", port=1521, service_name="(invalid)") + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn(host="host", port=1521, region="(invalid)") + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn(host="host", port=1521, sharding_key="(invalid)") + with test_env.assert_raises_full_code("DPY-2020"): + oracledb.makedsn( + host="host", port=1521, super_sharding_key="(invalid)" + ) + + +def test_1006(): + "1006 - test aliases match" + + # database type aliases + assert oracledb.BFILE is oracledb.DB_TYPE_BFILE + assert oracledb.BLOB is oracledb.DB_TYPE_BLOB + assert oracledb.BOOLEAN is oracledb.DB_TYPE_BOOLEAN + assert oracledb.CLOB is oracledb.DB_TYPE_CLOB + assert oracledb.CURSOR is oracledb.DB_TYPE_CURSOR + assert oracledb.FIXED_CHAR is oracledb.DB_TYPE_CHAR + assert oracledb.FIXED_NCHAR is oracledb.DB_TYPE_NCHAR + assert oracledb.INTERVAL is oracledb.DB_TYPE_INTERVAL_DS + assert oracledb.LONG_BINARY is oracledb.DB_TYPE_LONG_RAW + assert oracledb.LONG_STRING is oracledb.DB_TYPE_LONG + assert oracledb.NATIVE_INT is oracledb.DB_TYPE_BINARY_INTEGER + assert oracledb.NATIVE_FLOAT is oracledb.DB_TYPE_BINARY_DOUBLE + assert oracledb.NCHAR is oracledb.DB_TYPE_NVARCHAR + assert oracledb.NCLOB is oracledb.DB_TYPE_NCLOB + assert oracledb.OBJECT is oracledb.DB_TYPE_OBJECT + assert oracledb.TIMESTAMP is oracledb.DB_TYPE_TIMESTAMP + + # type aliases + assert oracledb.ObjectType is oracledb.DbObjectType + assert oracledb.Object is oracledb.DbObject + assert oracledb.SessionPool is oracledb.ConnectionPool + + # authentication mode aliases + assert oracledb.DEFAULT_AUTH is oracledb.AUTH_MODE_DEFAULT + assert oracledb.SYSASM is oracledb.AUTH_MODE_SYSASM + assert oracledb.SYSBKP is oracledb.AUTH_MODE_SYSBKP + assert oracledb.SYSDBA is oracledb.AUTH_MODE_SYSDBA + assert oracledb.SYSDGD is oracledb.AUTH_MODE_SYSDGD + assert oracledb.SYSKMT is oracledb.AUTH_MODE_SYSKMT + assert oracledb.SYSOPER is oracledb.AUTH_MODE_SYSOPER + assert oracledb.SYSRAC is oracledb.AUTH_MODE_SYSRAC + assert oracledb.PRELIM_AUTH is oracledb.AUTH_MODE_PRELIM + + # pool "get" mode aliases + assert oracledb.SPOOL_ATTRVAL_WAIT is oracledb.POOL_GETMODE_WAIT + assert oracledb.SPOOL_ATTRVAL_NOWAIT is oracledb.POOL_GETMODE_NOWAIT + assert oracledb.SPOOL_ATTRVAL_FORCEGET is oracledb.POOL_GETMODE_FORCEGET + assert oracledb.SPOOL_ATTRVAL_TIMEDWAIT is oracledb.POOL_GETMODE_TIMEDWAIT + + # purity aliases + assert oracledb.ATTR_PURITY_DEFAULT is oracledb.PURITY_DEFAULT + assert oracledb.ATTR_PURITY_NEW is oracledb.PURITY_NEW + assert oracledb.ATTR_PURITY_SELF is oracledb.PURITY_SELF + + # other aliases + assert oracledb.SUBSCR_PROTO_OCI is oracledb.SUBSCR_PROTO_CALLBACK + assert oracledb.version is oracledb.__version__ + + +def test_1007(test_env, skip_unless_thin_mode): + "1007 - test clientversion() fails without init_oracle_client()" + with test_env.assert_raises_full_code("DPY-2021"): + oracledb.clientversion() + + +def test_1008(): + "1008 - test enumeration aliases match" + + # authentication mode enumeration + assert oracledb.AUTH_MODE_DEFAULT is oracledb.AuthMode.DEFAULT + assert oracledb.AUTH_MODE_PRELIM is oracledb.AuthMode.PRELIM + assert oracledb.AUTH_MODE_SYSASM is oracledb.AuthMode.SYSASM + assert oracledb.AUTH_MODE_SYSBKP is oracledb.AuthMode.SYSBKP + assert oracledb.AUTH_MODE_SYSDBA is oracledb.AuthMode.SYSDBA + assert oracledb.AUTH_MODE_SYSDGD is oracledb.AuthMode.SYSDGD + assert oracledb.AUTH_MODE_SYSKMT is oracledb.AuthMode.SYSKMT + assert oracledb.AUTH_MODE_SYSOPER is oracledb.AuthMode.SYSOPER + assert oracledb.AUTH_MODE_SYSRAC is oracledb.AuthMode.SYSRAC + + # batch operation type enumeration + assert ( + oracledb.PIPELINE_OP_TYPE_CALL_FUNC + is oracledb.PipelineOpType.CALL_FUNC + ) + assert ( + oracledb.PIPELINE_OP_TYPE_CALL_PROC + is oracledb.PipelineOpType.CALL_PROC + ) + assert oracledb.PIPELINE_OP_TYPE_COMMIT is oracledb.PipelineOpType.COMMIT + assert oracledb.PIPELINE_OP_TYPE_EXECUTE is oracledb.PipelineOpType.EXECUTE + assert ( + oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY + is oracledb.PipelineOpType.EXECUTE_MANY + ) + assert ( + oracledb.PIPELINE_OP_TYPE_FETCH_ALL + is oracledb.PipelineOpType.FETCH_ALL + ) + assert ( + oracledb.PIPELINE_OP_TYPE_FETCH_MANY + is oracledb.PipelineOpType.FETCH_MANY + ) + assert ( + oracledb.PIPELINE_OP_TYPE_FETCH_ONE + is oracledb.PipelineOpType.FETCH_ONE + ) + + # pool "get" mode enumeration + assert oracledb.POOL_GETMODE_FORCEGET is oracledb.PoolGetMode.FORCEGET + assert oracledb.POOL_GETMODE_WAIT is oracledb.PoolGetMode.WAIT + assert oracledb.POOL_GETMODE_NOWAIT is oracledb.PoolGetMode.NOWAIT + assert oracledb.POOL_GETMODE_TIMEDWAIT is oracledb.PoolGetMode.TIMEDWAIT + + # purity enumeration + assert oracledb.PURITY_DEFAULT is oracledb.Purity.DEFAULT + assert oracledb.PURITY_NEW is oracledb.Purity.NEW + assert oracledb.PURITY_SELF is oracledb.Purity.SELF + + # vector format enumeration + assert oracledb.VECTOR_FORMAT_BINARY is oracledb.VectorFormat.BINARY + assert oracledb.VECTOR_FORMAT_FLOAT32 is oracledb.VectorFormat.FLOAT32 + assert oracledb.VECTOR_FORMAT_FLOAT64 is oracledb.VectorFormat.FLOAT64 + assert oracledb.VECTOR_FORMAT_INT8 is oracledb.VectorFormat.INT8 + + +def test_1009(test_env, conn): + "1009 - test enable_thin_mode()" + if test_env.use_thick_mode: + with test_env.assert_raises_full_code("DPY-2053"): oracledb.enable_thin_mode() - with self.assertRaisesFullCode("DPY-2019"): - oracledb.init_oracle_client() - else: - with self.assertRaisesFullCode("DPY-2053"): - oracledb.enable_thin_mode() - - -if __name__ == "__main__": - test_env.run_test_cases() + else: + oracledb.enable_thin_mode() + with test_env.assert_raises_full_code("DPY-2019"): + oracledb.init_oracle_client() diff --git a/tests/test_1100_connection.py b/tests/test_1100_connection.py index 8924eeea..6e208f2c 100644 --- a/tests/test_1100_connection.py +++ b/tests/test_1100_connection.py @@ -32,936 +32,896 @@ import time import oracledb -import test_env - +import pytest + + +def _verify_connect_arg(test_env, arg_name, arg_value, sql): + """ + Verifies an argument passed during connect() matches the value actually + used by the connection. + """ + args = {} + args[arg_name] = arg_value + conn = test_env.get_connection(**args) + cursor = conn.cursor() + cursor.execute(sql) + (fetched_value,) = cursor.fetchone() + assert fetched_value == arg_value + + +def test_1100(test_env, conn): + "1100 - simple connection to database" + assert conn.username == test_env.main_user + assert conn.dsn == test_env.connect_string + assert conn.thin == (not test_env.use_thick_mode) + + +def test_1101(skip_if_drcp, test_env): + "1101 - test use of application context" + namespace = "CLIENTCONTEXT" + app_context_entries = [ + (namespace, "ATTR1", "VALUE1"), + (namespace, "ATTR2", "VALUE2"), + (namespace, "ATTR3", "VALUE3"), + ] + conn = test_env.get_connection(appcontext=app_context_entries) + cursor = conn.cursor() + for namespace, name, value in app_context_entries: + cursor.execute( + "select sys_context(:1, :2) from dual", (namespace, name) + ) + (actual_value,) = cursor.fetchone() + assert actual_value == value + + +def test_1102(test_env): + "1102 - test invalid use of application context" + with pytest.raises(TypeError): + test_env.get_connection(appcontext=[("userenv", "action")]) + + +def test_1103(conn, test_env): + "1103 - test connection end-to-end tracing attributes" + + # determine the list of attributes to check + attributes_to_check = [] + if test_env.has_client_version(12, 1): + if not test_env.is_on_oracle_cloud: + sql = """select dbop_name from v$sql_monitor + where sid = sys_context('userenv', 'sid') + and status = 'EXECUTING'""" + attributes_to_check.append(("dbop", "oracledb_dbop", sql)) + sql = "select sys_context('userenv', 'action') from dual" + attributes_to_check.append(("action", "oracledb_Action", sql)) + attributes_to_check.append(("action", None, sql)) + sql = "select sys_context('userenv', 'module') from dual" + attributes_to_check.append(("module", "oracledb_Module", sql)) + attributes_to_check.append(("module", None, sql)) + sql = "select sys_context('userenv', 'client_info') from dual" + attributes_to_check.append(("clientinfo", "oracledb_cinfo", sql)) + attributes_to_check.append(("clientinfo", None, sql)) + sql = "select sys_context('userenv', 'client_identifier') from dual" + attributes_to_check.append(("client_identifier", "oracledb_cid", sql)) + attributes_to_check.append(("client_identifier", None, sql)) + if not conn.thin: + sql = """select ecid from v$session + where sid = sys_context('userenv', 'sid')""" + attributes_to_check.append(("econtext_id", "oracledb_ecid", sql)) + attributes_to_check.append(("econtext_id", None, sql)) + + # check each of the scenarios + for attr_name, value, sql in attributes_to_check: + setattr(conn, attr_name, value) + with conn.cursor() as cursor: + cursor.execute(sql) + (result,) = cursor.fetchone() + assert result == value + + +def test_1104(test_env): + "1104 - test use of autocommit" + conn = test_env.get_connection() + cursor = conn.cursor() + other_conn = test_env.get_connection() + other_cursor = other_conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute("insert into TestTempTable (IntCol) values (1)") + other_cursor.execute("select IntCol from TestTempTable") + assert other_cursor.fetchall() == [] + conn.autocommit = True + cursor.execute("insert into TestTempTable (IntCol) values (2)") + other_cursor.execute("select IntCol from TestTempTable order by IntCol") + assert other_cursor.fetchall() == [(1,), (2,)] + + +def test_1105(test_env): + "1105 - connection to database with bad connect string" + with test_env.assert_raises_full_code( + "DPY-4000", "DPY-4026", "DPY-4027", "ORA-12154" + ): + oracledb.connect("not a valid connect string!!") + with test_env.assert_raises_full_code("DPY-4000", "DPY-4001"): + dsn = f"{test_env.main_user}@{test_env.connect_string}" + oracledb.connect(dsn) + + +def test_1106(test_env): + "1106 - connection to database with bad password" + with test_env.assert_raises_full_code("ORA-01017"): + test_env.get_connection(password=test_env.main_password + "X") + + +def test_1107(skip_if_drcp, conn, test_env): + "1107 - test changing password" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + sys_random = random.SystemRandom() + new_password = "".join( + sys_random.choice(string.ascii_letters) for i in range(20) + ) + conn.changepassword(test_env.main_password, new_password) + conn = test_env.get_connection(password=new_password) + conn.changepassword(new_password, test_env.main_password) + + +def test_1108(skip_if_drcp, conn, test_env): + "1108 - test changing password to an invalid value" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + new_password = "1" * 1500 + with test_env.assert_raises_full_code("ORA-01017", "ORA-00988"): + conn.changepassword(test_env.main_password, new_password) + with test_env.assert_raises_full_code( + "ORA-01017", "ORA-00988", "ORA-28008" + ): + conn.changepassword("incorrect old password", new_password) + + +def test_1109(skip_if_drcp, conn, test_env): + "1109 - test connecting with password containing / and @ symbols" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + sys_random = random.SystemRandom() + chars = list(sys_random.choice(string.ascii_letters) for i in range(20)) + chars[4] = "/" + chars[8] = "@" + new_password = "".join(chars) + conn.changepassword(test_env.main_password, new_password) + try: + test_env.get_connection(password=new_password) + finally: + conn.changepassword(new_password, test_env.main_password) + + +def test_1110(conn, test_env): + "1110 - confirm an exception is raised after closing a connection" + conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + conn.rollback() -class TestCase(test_env.BaseTestCase): - requires_connection = False - def __connect_and_drop(self): - """ - Connect to the database, perform a query and drop the connection. +def test_1111(skip_unless_thick_mode, conn, test_env): + "1111 - test creating a connection using a handle" + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + int_value = random.randint(1, 32768) + cursor.execute( """ + insert into TestTempTable (IntCol, StringCol1) + values (:val, null) + """, + val=int_value, + ) + conn2 = oracledb.connect(handle=conn.handle) + cursor = conn2.cursor() + cursor.execute("select IntCol from TestTempTable") + (fetched_int_value,) = cursor.fetchone() + assert fetched_int_value == int_value + + cursor.close() + with test_env.assert_raises_full_code("DPI-1034"): + conn2.close() + conn.close() + + +def test_1112(conn): + "1112 - connection version is a string" + assert isinstance(conn.version, str) + + +def test_1113(test_env): + "1113 - connection rolls back before close" + conn = test_env.get_connection() + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + other_conn = test_env.get_connection() + other_cursor = other_conn.cursor() + other_cursor.execute("insert into TestTempTable (IntCol) values (1)") + other_cursor.close() + other_conn.close() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + + +def test_1114(test_env): + "1114 - connection rolls back before destruction" + conn = test_env.get_connection() + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + other_conn = test_env.get_connection() + other_cursor = other_conn.cursor() + other_cursor.execute("insert into TestTempTable (IntCol) values (1)") + del other_cursor + del other_conn + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + + +def test_1115(test_env): + "1115 - multiple connections to database with multiple threads" + + def connect_and_drop(): with test_env.get_connection() as conn: cursor = conn.cursor() cursor.execute("select count(*) from TestNumbers") (count,) = cursor.fetchone() - self.assertEqual(count, 10) + assert count == 10 - def __verify_fetched_data(self, connection): - expected_data = [f"String {i + 1}" for i in range(10)] - sql = "select StringCol from TestStrings order by IntCol" - for i in range(5): - with connection.cursor() as cursor: - fetched_data = [s for s, in cursor.execute(sql)] - self.assertEqual(fetched_data, expected_data) + threads = [] + for i in range(20): + thread = threading.Thread(None, connect_and_drop) + threads.append(thread) + thread.start() + time.sleep(0.1) + for thread in threads: + thread.join() - def __verify_attributes(self, connection, attr_name, value, sql): - setattr(connection, attr_name, value) - cursor = connection.cursor() - cursor.execute(sql) - (result,) = cursor.fetchone() - self.assertEqual(result, value, f"{attr_name} value mismatch") - def __verify_connect_arg(self, arg_name, arg_value, sql): - args = {} - args[arg_name] = arg_value - conn = test_env.get_connection(**args) - cursor = conn.cursor() - cursor.execute(sql) - (fetched_value,) = cursor.fetchone() - self.assertEqual(fetched_value, arg_value) +def test_1116(conn, test_env): + "1116 - test string format of connection" + expected_value = ( + "" + ) + assert str(conn) == expected_value - def test_1100(self): - "1100 - simple connection to database" - conn = test_env.get_connection() - self.assertEqual( - conn.username, test_env.get_main_user(), "user name differs" - ) - self.assertEqual( - conn.dsn, test_env.get_connect_string(), "dsn differs" - ) - self.assertEqual(conn.thin, not test_env.run_in_thick_mode()) - - @test_env.skip_if_drcp() - def test_1101(self): - "1101 - test use of application context" - namespace = "CLIENTCONTEXT" - app_context_entries = [ - (namespace, "ATTR1", "VALUE1"), - (namespace, "ATTR2", "VALUE2"), - (namespace, "ATTR3", "VALUE3"), - ] - conn = test_env.get_connection(appcontext=app_context_entries) - cursor = conn.cursor() - for namespace, name, value in app_context_entries: - cursor.execute( - "select sys_context(:1, :2) from dual", (namespace, name) - ) - (actual_value,) = cursor.fetchone() - self.assertEqual(actual_value, value) - - def test_1102(self): - "1102 - test invalid use of application context" - self.assertRaises( - TypeError, - test_env.get_connection, - appcontext=[("userenv", "action")], - ) - def test_1103(self): - "1103 - test connection end-to-end tracing attributes" - conn = test_env.get_connection() - if test_env.has_client_version(12, 1): - if not self.is_on_oracle_cloud(conn): - sql = """select dbop_name from v$sql_monitor - where sid = sys_context('userenv', 'sid') - and status = 'EXECUTING'""" - self.__verify_attributes(conn, "dbop", "oracledb_dbop", sql) - sql = "select sys_context('userenv', 'action') from dual" - self.__verify_attributes(conn, "action", "oracledb_Action", sql) - self.__verify_attributes(conn, "action", None, sql) - sql = "select sys_context('userenv', 'module') from dual" - self.__verify_attributes(conn, "module", "oracledb_Module", sql) - self.__verify_attributes(conn, "module", None, sql) - sql = "select sys_context('userenv', 'client_info') from dual" - self.__verify_attributes(conn, "clientinfo", "oracledb_cinfo", sql) - self.__verify_attributes(conn, "clientinfo", None, sql) - sql = "select sys_context('userenv', 'client_identifier') from dual" - self.__verify_attributes( - conn, "client_identifier", "oracledb_cid", sql - ) - self.__verify_attributes(conn, "client_identifier", None, sql) - if not conn.thin: - sql = """select ecid from v$session - where sid = sys_context('userenv', 'sid')""" - self.__verify_attributes(conn, "econtext_id", "oracledb_ecid", sql) - self.__verify_attributes(conn, "econtext_id", None, sql) - - def test_1104(self): - "1104 - test use of autocommit" - conn = test_env.get_connection() +def test_1117(test_env): + "1117 - test context manager - close" + with test_env.get_connection() as conn: cursor = conn.cursor() - other_conn = test_env.get_connection() - other_cursor = other_conn.cursor() cursor.execute("truncate table TestTempTable") cursor.execute("insert into TestTempTable (IntCol) values (1)") - other_cursor.execute("select IntCol from TestTempTable") - self.assertEqual(other_cursor.fetchall(), []) - conn.autocommit = True + conn.commit() cursor.execute("insert into TestTempTable (IntCol) values (2)") - other_cursor.execute( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(other_cursor.fetchall(), [(1,), (2,)]) - - def test_1105(self): - "1105 - connection to database with bad connect string" - with self.assertRaisesFullCode( - "DPY-4000", "DPY-4026", "DPY-4027", "ORA-12154" - ): - oracledb.connect("not a valid connect string!!") - with self.assertRaisesFullCode("DPY-4000", "DPY-4001"): - dsn = ( - test_env.get_main_user() + "@" + test_env.get_connect_string() - ) - oracledb.connect(dsn) - - def test_1106(self): - "1106 - connection to database with bad password" - with self.assertRaisesFullCode("ORA-01017"): - test_env.get_connection( - password=test_env.get_main_password() + "X" - ) - - @test_env.skip_if_drcp() - def test_1107(self): - "1107 - test changing password" - conn = test_env.get_connection() - if self.is_on_oracle_cloud(conn): - self.skipTest("passwords on Oracle Cloud are strictly controlled") - sys_random = random.SystemRandom() - new_password = "".join( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - conn.changepassword(test_env.get_main_password(), new_password) - conn = test_env.get_connection(password=new_password) - conn.changepassword(new_password, test_env.get_main_password()) - - @test_env.skip_if_drcp() - def test_1108(self): - "1108 - test changing password to an invalid value" - conn = test_env.get_connection() - if self.is_on_oracle_cloud(conn): - self.skipTest("passwords on Oracle Cloud are strictly controlled") - new_password = "1" * 1500 - with self.assertRaisesFullCode("ORA-01017", "ORA-00988"): - conn.changepassword(test_env.get_main_password(), new_password) - with self.assertRaisesFullCode("ORA-01017", "ORA-00988", "ORA-28008"): - conn.changepassword("incorrect old password", new_password) - - @test_env.skip_if_drcp() - def test_1109(self): - "1109 - test connecting with password containing / and @ symbols" - conn = test_env.get_connection() - if self.is_on_oracle_cloud(conn): - self.skipTest("passwords on Oracle Cloud are strictly controlled") - sys_random = random.SystemRandom() - chars = list( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - chars[4] = "/" - chars[8] = "@" - new_password = "".join(chars) - conn.changepassword(test_env.get_main_password(), new_password) - try: - test_env.get_connection(password=new_password) - finally: - conn.changepassword(new_password, test_env.get_main_password()) - - def test_1110(self): - "1110 - confirm an exception is raised after closing a connection" - conn = test_env.get_connection() - conn.close() - with self.assertRaisesFullCode("DPY-1001"): - conn.rollback() - - @test_env.skip_unless_thick_mode() - def test_1111(self): - "1111 - test creating a connection using a handle" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - int_value = random.randint(1, 32768) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:val, null) - """, - val=int_value, - ) - conn2 = oracledb.connect(handle=conn.handle) - cursor = conn2.cursor() - cursor.execute("select IntCol from TestTempTable") - (fetched_int_value,) = cursor.fetchone() - self.assertEqual(fetched_int_value, int_value) - - cursor.close() - with self.assertRaisesFullCode("DPI-1034"): - conn2.close() - conn.close() - - def test_1112(self): - "1112 - connection version is a string" - conn = test_env.get_connection() - self.assertIsInstance(conn.version, str) - - def test_1113(self): - "1113 - connection rolls back before close" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - other_conn = test_env.get_connection() - other_cursor = other_conn.cursor() - other_cursor.execute("insert into TestTempTable (IntCol) values (1)") - other_cursor.close() - other_conn.close() - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 0) - - def test_1114(self): - "1114 - connection rolls back before destruction" - conn = test_env.get_connection() - cursor = conn.cursor() + with test_env.assert_raises_full_code("DPY-1001"): + conn.ping() + conn = test_env.get_connection() + cursor = conn.cursor() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 1 + + +def test_1118(conn, test_env): + "1118 - test connection attribute values" + if test_env.has_client_version(12, 1): + assert conn.ltxid == b"" + assert not conn.autocommit + conn.autocommit = True + assert conn.autocommit + assert conn.current_schema is None + conn.current_schema = "system" + assert conn.current_schema == "system" + assert conn.edition is None + conn.external_name = "test_external" + assert conn.external_name == "test_external" + conn.internal_name = "test_internal" + assert conn.internal_name == "test_internal" + if conn.max_identifier_length is not None: + assert isinstance(conn.max_identifier_length, int) + conn.stmtcachesize = 30 + assert conn.stmtcachesize == 30 + with pytest.raises(TypeError): + conn.stmtcachesize = "value" + assert conn.warning is None + + +def test_1119(conn, test_env): + "1119 - test closed connection attribute values" + conn.close() + attr_names = [ + "current_schema", + "edition", + "external_name", + "internal_name", + "stmtcachesize", + "warning", + ] + if test_env.has_client_version(12, 1): + attr_names.append("ltxid") + for name in attr_names: + with test_env.assert_raises_full_code("DPY-1001"): + getattr(conn, name) + + +def test_1120(conn, round_trip_checker): + "1120 - test connection ping makes a round trip" + conn.ping() + assert round_trip_checker.get_value() == 1 + + +def test_1121(skip_unless_thick_mode, conn): + "1121 - test begin, prepare, cancel transaction" + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + conn.begin(10, "trxnId", "branchId") + assert not conn.prepare() + conn.begin(10, "trxnId", "branchId") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert conn.prepare() + conn.cancel() + conn.rollback() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + + +def test_1122(skip_unless_thick_mode, conn): + "1122 - test multiple transactions on the same connection" + with conn.cursor() as cursor: cursor.execute("truncate table TestTempTable") - other_conn = test_env.get_connection() - other_cursor = other_conn.cursor() - other_cursor.execute("insert into TestTempTable (IntCol) values (1)") - del other_cursor - del other_conn - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 0) - - def test_1115(self): - "1115 - multiple connections to database with multiple threads" - threads = [] - for i in range(20): - thread = threading.Thread(None, self.__connect_and_drop) - threads.append(thread) - thread.start() - time.sleep(0.1) - for thread in threads: - thread.join() - - def test_1116(self): - "1116 - test string format of connection" - conn = test_env.get_connection() - expected_value = "" % ( - test_env.get_main_user(), - test_env.get_connect_string(), - ) - self.assertEqual(str(conn), expected_value) - def test_1117(self): - "1117 - test context manager - close" - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute("insert into TestTempTable (IntCol) values (1)") - conn.commit() - cursor.execute("insert into TestTempTable (IntCol) values (2)") - with self.assertRaisesFullCode("DPY-1001"): - conn.ping() - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 1) - - def test_1118(self): - "1118 - test connection attribute values" - conn = test_env.get_connection() - if test_env.has_client_version(12, 1): - self.assertEqual(conn.ltxid, b"") - self.assertFalse(conn.autocommit) - conn.autocommit = True - self.assertTrue(conn.autocommit) - self.assertIsNone(conn.current_schema) - conn.current_schema = "test_schema" - self.assertEqual(conn.current_schema, "test_schema") - self.assertIsNone(conn.edition) - conn.external_name = "test_external" - self.assertEqual(conn.external_name, "test_external") - conn.internal_name = "test_internal" - self.assertEqual(conn.internal_name, "test_internal") - if conn.max_identifier_length is not None: - self.assertIsInstance(conn.max_identifier_length, int) - conn.stmtcachesize = 30 - self.assertEqual(conn.stmtcachesize, 30) - self.assertRaises(TypeError, conn.stmtcachesize, 20.5) - self.assertRaises(TypeError, conn.stmtcachesize, "value") - self.assertIsNone(conn.warning) - - def test_1119(self): - "1119 - test closed connection attribute values" - conn = test_env.get_connection() - conn.close() - attr_names = [ - "current_schema", - "edition", - "external_name", - "internal_name", - "stmtcachesize", - "warning", - ] - if test_env.has_client_version(12, 1): - attr_names.append("ltxid") - for name in attr_names: - with self.assertRaisesFullCode("DPY-1001"): - getattr(conn, name) - - def test_1120(self): - "1120 - test connection ping makes a round trip" - self.conn = test_env.get_connection() - self.setup_round_trip_checker() - self.conn.ping() - self.assertRoundTrips(1) - - @test_env.skip_unless_thick_mode() - def test_1121(self): - "1121 - test begin, prepare, cancel transaction" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - conn.begin(10, "trxnId", "branchId") - self.assertFalse(conn.prepare()) - conn.begin(10, "trxnId", "branchId") + id_ = random.randint(0, 2**128) + xid = (0x1234, "%032x" % id_, "%032x" % 9) + conn.begin(*xid) + with conn.cursor() as cursor: cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (1, 'tesName') """ ) - self.assertTrue(conn.prepare()) - conn.cancel() - conn.rollback() - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 0) - - @test_env.skip_unless_thick_mode() - def test_1122(self): - "1122 - test multiple transactions on the same connection" - conn = test_env.get_connection() - with conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") + assert conn.prepare() + conn.commit() - id_ = random.randint(0, 2**128) - xid = (0x1234, "%032x" % id_, "%032x" % 9) - conn.begin(*xid) + for begin_trans in (True, False): + val = 3 + if begin_trans: + conn.begin() + val = 2 with conn.cursor() as cursor: cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ + values (:int_val, 'tesName') + """, + int_val=val, ) - self.assertTrue(conn.prepare()) conn.commit() - for begin_trans in (True, False): - val = 3 - if begin_trans: - conn.begin() - val = 2 - with conn.cursor() as cursor: - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, 'tesName') - """, - int_val=val, - ) - conn.commit() - - expected_rows = [(1, "tesName"), (2, "tesName"), (3, "tesName")] - with conn.cursor() as cursor: - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), expected_rows) + expected_rows = [(1, "tesName"), (2, "tesName"), (3, "tesName")] + with conn.cursor() as cursor: + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == expected_rows - @test_env.skip_unless_thick_mode() - def test_1123(self): - "1123 - test multiple global transactions on the same connection" - conn = test_env.get_connection() - with conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") - id_ = random.randint(0, 2**128) - xid = (0x1234, "%032x" % id_, "%032x" % 9) - conn.begin(*xid) - with conn.cursor() as cursor: - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.assertTrue(conn.prepare()) - conn.commit() +def test_1123(skip_unless_thick_mode, conn): + "1123 - test multiple global transactions on the same connection" + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") - for begin_trans in (True, False): - val = 3 - if begin_trans: - conn.begin() - val = 2 - with conn.cursor() as cursor: - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, 'tesName') - """, - int_val=val, - ) - conn.commit() - - id2_ = random.randint(0, 2**128) - xid2 = (0x1234, "%032x" % id2_, "%032x" % 9) - conn.begin(*xid2) + id_ = random.randint(0, 2**128) + xid = (0x1234, "%032x" % id_, "%032x" % 9) + conn.begin(*xid) + with conn.cursor() as cursor: + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert conn.prepare() + conn.commit() + + for begin_trans in (True, False): + val = 3 + if begin_trans: + conn.begin() + val = 2 with conn.cursor() as cursor: cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (4, 'tesName') - """ + values (:int_val, 'tesName') + """, + int_val=val, ) - self.assertTrue(conn.prepare()) conn.commit() - expected_rows = [ - (1, "tesName"), - (2, "tesName"), - (3, "tesName"), - (4, "tesName"), - ] - with conn.cursor() as cursor: - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), expected_rows) + id2_ = random.randint(0, 2**128) + xid2 = (0x1234, "%032x" % id2_, "%032x" % 9) + conn.begin(*xid2) + with conn.cursor() as cursor: + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (4, 'tesName') + """ + ) + assert conn.prepare() + conn.commit() - @test_env.skip_unless_thick_mode() - def test_1124(self): - "1124 - test creating global txn after a local txn" - conn = test_env.get_connection() - with conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") + expected_rows = [ + (1, "tesName"), + (2, "tesName"), + (3, "tesName"), + (4, "tesName"), + ] + with conn.cursor() as cursor: + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == expected_rows - with conn.cursor() as cursor: - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (2, 'tesName') - """ - ) - id_ = random.randint(0, 2**128) - xid = (0x1234, "%032x" % id_, "%032x" % 9) - with self.assertRaisesFullCode("ORA-24776"): - conn.begin(*xid) +def test_1124(skip_unless_thick_mode, conn, test_env): + "1124 - test creating global txn after a local txn" + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") - def test_1125(self): - "1125 - single connection to database with multiple threads" - with test_env.get_connection() as conn: - threads = [ - threading.Thread( - target=self.__verify_fetched_data, args=(conn,) - ) - for i in range(3) - ] - for t in threads: - t.start() - for t in threads: - t.join() - - def test_1126(self): - "1126 - test connection cancel" - conn = test_env.get_connection() - sleep_proc_name = test_env.get_sleep_proc_name() + with conn.cursor() as cursor: + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'tesName') + """ + ) - def perform_cancel(): - time.sleep(0.1) - conn.cancel() + id_ = random.randint(0, 2**128) + xid = (0x1234, "%032x" % id_, "%032x" % 9) + with test_env.assert_raises_full_code("ORA-24776"): + conn.begin(*xid) - thread = threading.Thread(target=perform_cancel) - thread.start() - try: - with conn.cursor() as cursor: - self.assertRaises( - oracledb.OperationalError, - cursor.callproc, - sleep_proc_name, - [2], - ) - finally: - thread.join() - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - @test_env.skip_if_drcp() - def test_1127(self): - "1127 - test changing password during connect" - conn = test_env.get_connection() - if self.is_on_oracle_cloud(conn): - self.skipTest("passwords on Oracle Cloud are strictly controlled") - sys_random = random.SystemRandom() - new_password = "".join( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - conn = test_env.get_connection(newpassword=new_password) - conn = test_env.get_connection(password=new_password) - conn.changepassword(new_password, test_env.get_main_password()) - - def test_1128(self): - "1128 - test use of autocommit during reexecute" - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - data_to_insert = [(1, "Test String #1"), (2, "Test String #2")] - conn = test_env.get_connection() - cursor = conn.cursor() - other_conn = test_env.get_connection() - other_cursor = other_conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute(sql, data_to_insert[0]) - other_cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(other_cursor.fetchall(), []) - conn.autocommit = True - cursor.execute(sql, data_to_insert[1]) - other_cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(other_cursor.fetchall(), data_to_insert) - - def test_1129(self): - "1129 - test current_schema is set properly" - conn = test_env.get_connection() - self.assertIsNone(conn.current_schema) +def test_1125(conn): + "1125 - single connection to database with multiple threads" - user = test_env.get_main_user().upper() - proxy_user = test_env.get_proxy_user().upper() - cursor = conn.cursor() - cursor.execute(f"alter session set current_schema={proxy_user}") - self.assertEqual(conn.current_schema, proxy_user) + def verify_fetched_data(): + expected_data = [f"String {i + 1}" for i in range(10)] + sql = "select StringCol from TestStrings order by IntCol" + for i in range(5): + with conn.cursor() as cursor: + fetched_data = [s for s, in cursor.execute(sql)] + assert fetched_data == expected_data - conn.current_schema = user - self.assertEqual(conn.current_schema, user) + threads = [threading.Thread(target=verify_fetched_data) for i in range(3)] + for t in threads: + t.start() + for t in threads: + t.join() - cursor.execute( - "select sys_context('userenv', 'current_schema') from dual" - ) - (result,) = cursor.fetchone() - self.assertEqual(result, user) - def test_1130(self): - "1130 - test dbms_output package" - conn = test_env.get_connection() - cursor = conn.cursor() - test_string = "Testing DBMS_OUTPUT package" - cursor.callproc("dbms_output.enable") - cursor.callproc("dbms_output.put_line", [test_string]) - string_var = cursor.var(str) - number_var = cursor.var(int) - cursor.callproc("dbms_output.get_line", (string_var, number_var)) - self.assertEqual(string_var.getvalue(), test_string) - - @test_env.skip_unless_call_timeout_supported() - def test_1131(self): - "1131 - test connection call_timeout" - conn = test_env.get_connection() - conn.call_timeout = 500 # milliseconds - self.assertEqual(conn.call_timeout, 500) - with self.assertRaisesFullCode("DPY-4011", "DPY-4024"): - conn.cursor().callproc(test_env.get_sleep_proc_name(), [2]) +def test_1126(conn, test_env): + "1126 - test connection cancel" - def test_1132(self): - "1132 - test Connection repr()" + def perform_cancel(): + time.sleep(0.1) + conn.cancel() - class MyConnection(oracledb.Connection): - pass + thread = threading.Thread(target=perform_cancel) + thread.start() + try: + with conn.cursor() as cursor: + with pytest.raises(oracledb.OperationalError): + cursor.callproc(test_env.sleep_proc_name, [2]) + finally: + thread.join() + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == test_env.main_user.upper() + + +def test_1127(skip_if_drcp, conn, test_env): + "1127 - test changing password during connect" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + sys_random = random.SystemRandom() + new_password = "".join( + sys_random.choice(string.ascii_letters) for i in range(20) + ) + conn = test_env.get_connection(newpassword=new_password) + conn = test_env.get_connection(password=new_password) + conn.changepassword(new_password, test_env.main_password) + + +def test_1128(test_env): + "1128 - test use of autocommit during reexecute" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + data_to_insert = [(1, "Test String #1"), (2, "Test String #2")] + conn = test_env.get_connection() + cursor = conn.cursor() + other_conn = test_env.get_connection() + other_cursor = other_conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute(sql, data_to_insert[0]) + other_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert other_cursor.fetchall() == [] + conn.autocommit = True + cursor.execute(sql, data_to_insert[1]) + other_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert other_cursor.fetchall() == data_to_insert + + +def test_1129(conn, test_env): + "1129 - test current_schema is set properly" + assert conn.current_schema is None + user = test_env.main_user.upper() + if test_env.proxy_user is None: + pytest.skip("proxy user not defined") + proxy_user = test_env.proxy_user.upper() + cursor = conn.cursor() + cursor.execute(f"alter session set current_schema={proxy_user}") + assert conn.current_schema == proxy_user + conn.current_schema = user + assert conn.current_schema == user + cursor.execute("select sys_context('userenv', 'current_schema') from dual") + (result,) = cursor.fetchone() + assert result == user + + +def test_1130(conn): + "1130 - test dbms_output package" + cursor = conn.cursor() + test_string = "Testing DBMS_OUTPUT package" + cursor.callproc("dbms_output.enable") + cursor.callproc("dbms_output.put_line", [test_string]) + string_var = cursor.var(str) + number_var = cursor.var(int) + cursor.callproc("dbms_output.get_line", (string_var, number_var)) + assert string_var.getvalue() == test_string + + +def test_1131(skip_unless_call_timeout_supported, conn, test_env): + "1131 - test connection call_timeout" + conn.call_timeout = 500 # milliseconds + assert conn.call_timeout == 500 + with test_env.assert_raises_full_code("DPY-4011", "DPY-4024"): + conn.cursor().callproc(test_env.sleep_proc_name, [2]) + + +def test_1132(test_env): + "1132 - test Connection repr()" + + class MyConnection(oracledb.Connection): + pass + + conn = test_env.get_connection(conn_class=MyConnection) + qual_name = conn.__class__.__qualname__ + expected_value = f"<{__name__}.{qual_name} to {conn.username}@{conn.dsn}>" + assert repr(conn) == expected_value + + conn.close() + expected_value = f"<{__name__}.{qual_name} disconnected>" + assert repr(conn) == expected_value + + +def test_1133(conn): + "1133 - test getting write-only attributes" + with pytest.raises(AttributeError): + conn.action + with pytest.raises(AttributeError): + conn.dbop + with pytest.raises(AttributeError): + conn.clientinfo + with pytest.raises(AttributeError): + conn.econtext_id + with pytest.raises(AttributeError): + conn.module + with pytest.raises(AttributeError): + conn.client_identifier + + +def test_1134(test_env): + "1134 - test error for invalid type for params and pool" + pool = test_env.get_pool() + pool.close() + with test_env.assert_raises_full_code("DPY-1002"): + test_env.get_connection(pool=pool) + with pytest.raises(TypeError): + test_env.get_connection(pool="This isn't an instance of a pool") + with test_env.assert_raises_full_code("DPY-2025"): + oracledb.connect(params={"number": 7}) + + +def test_1135(conn): + "1135 - test connection instance name" + cursor = conn.cursor() + cursor.execute( + """ + select upper(sys_context('userenv', 'instance_name')) + from dual + """ + ) + (instance_name,) = cursor.fetchone() + assert conn.instance_name.upper() == instance_name - conn = test_env.get_connection(conn_class=MyConnection) - qual_name = conn.__class__.__qualname__ - expected_value = ( - f"<{__name__}.{qual_name} to {conn.username}@{conn.dsn}>" - ) - self.assertEqual(repr(conn), expected_value) - conn.close() - expected_value = f"<{__name__}.{qual_name} disconnected>" - self.assertEqual(repr(conn), expected_value) +def test_1136(skip_unless_call_timeout_supported, conn): + "1136 - test deprecated attributes" + conn.callTimeout = 500 + assert conn.callTimeout == 500 - def test_1133(self): - "1133 - test getting write-only attributes" - conn = test_env.get_connection() - with self.assertRaises(AttributeError): - conn.action - with self.assertRaises(AttributeError): - conn.dbop - with self.assertRaises(AttributeError): - conn.clientinfo - with self.assertRaises(AttributeError): - conn.econtext_id - with self.assertRaises(AttributeError): - conn.module - with self.assertRaises(AttributeError): - conn.client_identifier - - def test_1134(self): - "1134 - test error for invalid type for params and pool" - pool = test_env.get_pool() - pool.close() - with self.assertRaisesFullCode("DPY-1002"): - test_env.get_connection(pool=pool) - self.assertRaises( - TypeError, - test_env.get_connection, - pool="This isn't an instance of a pool", - ) - with self.assertRaisesFullCode("DPY-2025"): - oracledb.connect(params={"number": 7}) - def test_1135(self): - "1135 - test connection instance name" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute( - """ - select upper(sys_context('userenv', 'instance_name')) - from dual - """ - ) - (instance_name,) = cursor.fetchone() - self.assertEqual(conn.instance_name.upper(), instance_name) +def test_1137(skip_if_drcp, skip_unless_long_passwords_supported, test_env): + "1137 - test maximum allowed length for password" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") - @test_env.skip_unless_call_timeout_supported() - def test_1136(self): - "1136 - test deprecated attributes" - conn = test_env.get_connection() - conn.callTimeout = 500 - self.assertEqual(conn.callTimeout, 500) + conn = test_env.get_connection() + original_password = test_env.main_password + new_password_32 = "a" * 32 + conn.changepassword(original_password, new_password_32) + conn = test_env.get_connection(password=new_password_32) - @test_env.skip_if_drcp() - @test_env.skip_unless_long_passwords_supported() - def test_1137(self): - "1137 - test maximum allowed length for password" - conn = test_env.get_connection() - if self.is_on_oracle_cloud(conn): - self.skipTest("passwords on Oracle Cloud are strictly controlled") + new_password_1024 = "a" * 1024 + conn.changepassword(new_password_32, new_password_1024) + conn = test_env.get_connection(password=new_password_1024) + conn.changepassword(new_password_1024, original_password) - original_password = test_env.get_main_password() - new_password_32 = "a" * 32 - conn.changepassword(original_password, new_password_32) - conn = test_env.get_connection(password=new_password_32) + new_password_1025 = "a" * 1025 + with test_env.assert_raises_full_code("ORA-28218", "ORA-00972"): + conn.changepassword(original_password, new_password_1025) - new_password_1024 = "a" * 1024 - conn.changepassword(new_password_32, new_password_1024) - conn = test_env.get_connection(password=new_password_1024) - conn.changepassword(new_password_1024, original_password) - new_password_1025 = "a" * 1025 - with self.assertRaisesFullCode("ORA-28218", "ORA-00972"): - conn.changepassword(original_password, new_password_1025) +def test_1138(conn): + "1138 - test getting db_name" + cursor = conn.cursor() + cursor.execute("select name from V$DATABASE") + (db_name,) = cursor.fetchone() + assert conn.db_name.upper() == db_name.upper() - def test_1138(self): - "1138 - test getting db_name" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("select name from V$DATABASE") - (db_name,) = cursor.fetchone() - self.assertEqual(conn.db_name.upper(), db_name.upper()) - def test_1139(self): - "1139 - test getting max_open_cursors" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute( - "select value from V$PARAMETER where name='open_cursors'" - ) - (max_open_cursors,) = cursor.fetchone() - self.assertEqual(conn.max_open_cursors, int(max_open_cursors)) +def test_1139(conn): + "1139 - test getting max_open_cursors" + cursor = conn.cursor() + cursor.execute("select value from V$PARAMETER where name='open_cursors'") + (max_open_cursors,) = cursor.fetchone() + assert conn.max_open_cursors == int(max_open_cursors) - def test_1140(self): - "1140 - test getting service_name" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute( - "select sys_context('userenv', 'service_name') from dual" - ) - (service_name,) = cursor.fetchone() - self.assertEqual(conn.service_name.upper(), service_name.upper()) - def test_1141(self): - "1141 - test transaction_in_progress" - conn = test_env.get_connection() - self.assertFalse(conn.transaction_in_progress) +def test_1140(conn): + "1140 - test getting service_name" + cursor = conn.cursor() + cursor.execute("select sys_context('userenv', 'service_name') from dual") + (service_name,) = cursor.fetchone() + assert conn.service_name.upper() == service_name.upper() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - self.assertFalse(conn.transaction_in_progress) - cursor.execute("insert into TestTempTable (IntCol) values (1)") - self.assertTrue(conn.transaction_in_progress) +def test_1141(conn): + "1141 - test transaction_in_progress" + assert not conn.transaction_in_progress - conn.commit() - self.assertFalse(conn.transaction_in_progress) + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + assert not conn.transaction_in_progress - def test_1142(self): - "1142 - test getting db_domain" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("select value from V$PARAMETER where name='db_domain'") - (db_domain,) = cursor.fetchone() - self.assertEqual(conn.db_domain, db_domain) - - def test_1143(self): - "1143 - test connecting with a proxy user" - proxy_user = test_env.get_proxy_user() - conn = test_env.get_connection(proxy_user=proxy_user) - self.assertEqual(conn.username, test_env.get_main_user()) - self.assertEqual(conn.proxy_user, proxy_user) - - @test_env.skip_unless_thin_mode() - def test_1144(self): - "1144 - test connection.sdu" - conn = test_env.get_connection() - sdu = random.randint(512, conn.sdu) - conn = test_env.get_connection(sdu=sdu) - self.assertEqual(conn.sdu, sdu) - - def test_1145(self): - "1145 - test connection with invalid conn_class" - with self.assertRaisesFullCode("DPY-2023"): - test_env.get_connection(conn_class=oracledb.ConnectionPool) - - @test_env.skip_unless_thin_mode() - def test_1146(self): - "1146 - test passing program when creating a connection" - sql = ( - "select program from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_connect_arg("program", "newprogram", sql) - - @test_env.skip_unless_thin_mode() - def test_1147(self): - "1147 - test passing machine when creating a connection" - sql = ( - "select machine from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_connect_arg("machine", "newmachine", sql) - - @test_env.skip_unless_thin_mode() - def test_1148(self): - "1148 - test passing terminal when creating a connection" - sql = ( - "select terminal from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_connect_arg("terminal", "newterminal", sql) - - @test_env.skip_unless_thin_mode() - def test_1149(self): - "1149 - test passing osuser when creating a connection" - sql = ( - "select osuser from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_connect_arg("osuser", "newosuser", sql) + cursor.execute("insert into TestTempTable (IntCol) values (1)") + assert conn.transaction_in_progress - def test_1150(self): - "1150 - test passing driver_name when creating a connection" - sql = ( - "select distinct client_driver from v$session_connect_info " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_connect_arg("driver_name", "newdriver", sql) + conn.commit() + assert not conn.transaction_in_progress - @test_env.skip_unless_thin_mode() - def test_1151(self): - "1151 - test getting session id" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("select dbms_debug_jdwp.current_session_id from dual") - (fetched_value,) = cursor.fetchone() - self.assertEqual(conn.session_id, fetched_value) - @test_env.skip_unless_thin_mode() - def test_1152(self): - "1152 - test getting session serial number" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute( - "select dbms_debug_jdwp.current_session_serial from dual" - ) - (fetched_value,) = cursor.fetchone() - self.assertEqual(conn.serial_num, fetched_value) - - @test_env.skip_unless_thin_mode() - def test_1153(self): - "1153 - test passed params in hook with standalone connection" - sdu = 4096 - params = test_env.get_connect_params() - protocol = "proto-test" - orig_connect_string = test_env.get_connect_string() - connect_string = f"{protocol}://{orig_connect_string}" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - self.assertEqual(passed_protocol, protocol) - self.assertEqual(passed_protocol_arg, orig_connect_string) - passed_params.parse_connect_string(passed_protocol_arg) - passed_params.set(sdu=sdu) - - try: - oracledb.register_protocol(protocol, hook) - oracledb.connect(dsn=connect_string, params=params) - self.assertEqual(params.sdu, sdu) - finally: - oracledb.register_protocol(protocol, None) - - def test_1154(self): - "1154 - test altering connection edition" - conn = test_env.get_connection() - self.assertIsNone(conn.edition) - cursor = conn.cursor() - sql = "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" - default_edition = "ORA$BASE" - test_edition = test_env.get_edition_name() - for edition in [test_edition, default_edition]: - with self.subTest(edition=edition): - cursor.execute(f"alter session set edition = {edition}") - cursor.execute(sql) - self.assertEqual(conn.edition, cursor.fetchone()[0]) - self.assertEqual(conn.edition, edition.upper()) - - def test_1155(self): - "1155 - test connect() with edition" - edition = test_env.get_edition_name() - conn = test_env.get_connection(edition=edition) - cursor = conn.cursor() - cursor.execute( - "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" - ) - self.assertEqual(cursor.fetchone()[0], edition.upper()) - self.assertEqual(conn.edition, edition) +def test_1142(conn): + "1142 - test getting db_domain" + cursor = conn.cursor() + cursor.execute("select value from V$PARAMETER where name='db_domain'") + (db_domain,) = cursor.fetchone() + assert conn.db_domain == db_domain - def test_1156(self): - "1156 - test connect() with parameters hook" - conn = test_env.get_connection() - orig_stmtcachesize = conn.stmtcachesize - stmtcachesize = orig_stmtcachesize + 10 - def hook(params): - params.set(stmtcachesize=stmtcachesize) +def test_1143(test_env): + "1143 - test connecting with a proxy user" + conn = test_env.get_connection(proxy_user=test_env.proxy_user) + assert conn.username == test_env.main_user + assert conn.proxy_user == test_env.proxy_user - try: - oracledb.register_params_hook(hook) - conn = test_env.get_connection() - self.assertEqual(conn.stmtcachesize, stmtcachesize) - finally: - oracledb.unregister_params_hook(hook) - conn = test_env.get_connection() - self.assertEqual(conn.stmtcachesize, orig_stmtcachesize) - - def test_1157(self): - "1157 - test connect() with multiple parameters hooks" - - def hook1(params): - order.append("first") - - def hook2(params): - order.append("second") - - def hook3(params): - order.append("third") - - oracledb.register_params_hook(hook1) - oracledb.register_params_hook(hook2) - oracledb.register_params_hook(hook3) - try: - order = [] - test_env.get_connection() - self.assertEqual(order, ["first", "second", "third"]) - finally: - oracledb.unregister_params_hook(hook1) - oracledb.unregister_params_hook(hook2) - oracledb.unregister_params_hook(hook3) - - def test_1158(self): - "1158 - test error in the middle of a database response" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] - cursor.executemany( - "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", - data, +def test_1144(skip_unless_thin_mode, conn, test_env): + "1144 - test connection.sdu" + sdu = random.randint(512, conn.sdu) + conn = test_env.get_connection(sdu=sdu) + assert conn.sdu == sdu + + +def test_1145(test_env): + "1145 - test connection with invalid conn_class" + with test_env.assert_raises_full_code("DPY-2023"): + test_env.get_connection(conn_class=oracledb.ConnectionPool) + + +def test_1146(skip_unless_thin_mode, test_env): + "1146 - test passing program when creating a connection" + sql = ( + "select program from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_connect_arg(test_env, "program", "newprogram", sql) + + +def test_1147(skip_unless_thin_mode, test_env): + "1147 - test passing machine when creating a connection" + sql = ( + "select machine from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_connect_arg(test_env, "machine", "newmachine", sql) + + +def test_1148(skip_unless_thin_mode, test_env): + "1148 - test passing terminal when creating a connection" + sql = ( + "select terminal from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_connect_arg(test_env, "terminal", "newterminal", sql) + + +def test_1149(skip_unless_thin_mode, test_env): + "1149 - test passing osuser when creating a connection" + sql = ( + "select osuser from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_connect_arg(test_env, "osuser", "newosuser", sql) + + +def test_1150(test_env): + "1150 - test passing driver_name when creating a connection" + sql = ( + "select distinct client_driver from v$session_connect_info " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_connect_arg(test_env, "driver_name", "newdriver", sql) + + +def test_1151(skip_unless_thin_mode, conn): + "1151 - test getting session id" + cursor = conn.cursor() + cursor.execute("select dbms_debug_jdwp.current_session_id from dual") + (fetched_value,) = cursor.fetchone() + assert conn.session_id == fetched_value + + +def test_1152(skip_unless_thin_mode, conn): + "1152 - test getting session serial number" + cursor = conn.cursor() + cursor.execute("select dbms_debug_jdwp.current_session_serial from dual") + (fetched_value,) = cursor.fetchone() + assert conn.serial_num == fetched_value + + +def test_1153(skip_unless_thin_mode, test_env): + "1153 - test passed params in hook with standalone connection" + sdu = 4096 + params = test_env.get_connect_params() + protocol = "proto-test" + orig_connect_string = test_env.connect_string + connect_string = f"{protocol}://{orig_connect_string}" + + def hook(passed_protocol, passed_protocol_arg, passed_params): + assert passed_protocol == protocol + assert passed_protocol_arg == orig_connect_string + passed_params.parse_connect_string(passed_protocol_arg) + passed_params.set(sdu=sdu) + + try: + oracledb.register_protocol(protocol, hook) + oracledb.connect(dsn=connect_string, params=params) + assert params.sdu == sdu + finally: + oracledb.register_protocol(protocol, None) + + +def test_1154(conn, test_env): + "1154 - test altering connection edition" + assert conn.edition is None + cursor = conn.cursor() + sql = "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" + default_edition = "ORA$BASE" + test_edition = test_env.edition_name + for edition in [test_edition, default_edition]: + cursor.execute(f"alter session set edition = {edition}") + cursor.execute(sql) + assert conn.edition == cursor.fetchone()[0] + assert conn.edition == edition.upper() + + +def test_1155(test_env): + "1155 - test connect() with edition" + edition = test_env.edition_name + conn = test_env.get_connection(edition=edition) + cursor = conn.cursor() + cursor.execute( + "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" + ) + assert cursor.fetchone()[0] == edition.upper() + assert conn.edition == edition + + +def test_1156(conn, test_env): + "1156 - test connect() with parameters hook" + orig_stmtcachesize = conn.stmtcachesize + stmtcachesize = orig_stmtcachesize + 10 + + def hook(params): + params.set(stmtcachesize=stmtcachesize) + + try: + oracledb.register_params_hook(hook) + conn = test_env.get_connection() + assert conn.stmtcachesize == stmtcachesize + finally: + oracledb.unregister_params_hook(hook) + + conn = test_env.get_connection() + assert conn.stmtcachesize == orig_stmtcachesize + + +def test_1157(test_env, conn): + "1157 - test connect() with multiple parameters hooks" + + def hook1(params): + order.append("first") + + def hook2(params): + order.append("second") + + def hook3(params): + order.append("third") + + test_env.get_connection() + oracledb.register_params_hook(hook1) + oracledb.register_params_hook(hook2) + oracledb.register_params_hook(hook3) + try: + order = [] + test_env.get_connection() + assert order == ["first", "second", "third"] + finally: + oracledb.unregister_params_hook(hook1) + oracledb.unregister_params_hook(hook2) + oracledb.unregister_params_hook(hook3) + + +def test_1158(conn, test_env): + "1158 - test error in the middle of a database response" + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] + cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + conn.commit() + cursor.arraysize = 1500 + with test_env.assert_raises_full_code("ORA-01476"): + cursor.execute( + """ + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol < 1500 + union all + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol = 1500 + """ ) - conn.commit() - cursor.arraysize = 1500 - with self.assertRaisesFullCode("ORA-01476"): - cursor.execute( - """ - select IntCol, 1 / NumberCol - from TestTempTable - where IntCol < 1500 - union all - select IntCol, 1 / NumberCol - from TestTempTable - where IntCol = 1500 - """ - ) - cursor.fetchall() - - -if __name__ == "__main__": - test_env.run_test_cases() + cursor.fetchall() diff --git a/tests/test_1300_cursor_var.py b/tests/test_1300_cursor_var.py index 3304cc3a..f75b0465 100644 --- a/tests/test_1300_cursor_var.py +++ b/tests/test_1300_cursor_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,479 +27,478 @@ """ import oracledb -import test_env -class TestCase(test_env.BaseTestCase): - def test_1300(self): - "1300 - test binding in a cursor" - cursor = self.conn.cursor() - self.assertIsNone(cursor.description) - self.cursor.execute( - """ - begin - open :cursor for select 'X' StringValue from dual; - end; - """, - cursor=cursor, - ) - varchar_ratio, _ = test_env.get_charset_ratios() - expected_value = [ - ( - "STRINGVALUE", - oracledb.DB_TYPE_CHAR, - 1, - varchar_ratio, - None, - None, - True, - ) - ] - self.assertEqual(cursor.description, expected_value) - self.assertEqual(cursor.fetchall(), [("X",)]) - - def test_1301(self): - "1301 - test binding in a cursor from a package" - cursor = self.conn.cursor() - self.assertIsNone(cursor.description) - self.cursor.callproc("pkg_TestRefCursors.TestOutCursor", (2, cursor)) - varchar_ratio, _ = test_env.get_charset_ratios() - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "STRINGCOL", - oracledb.DB_TYPE_VARCHAR, - 20, - 20 * varchar_ratio, - None, - None, - False, - ), - ] - self.assertEqual(cursor.description, expected_value) - self.assertEqual(cursor.fetchall(), [(1, "String 1"), (2, "String 2")]) - - def test_1302(self): - "1302 - test that binding the cursor itself is not supported" - cursor = self.conn.cursor() - sql = """ - begin - open :pcursor for - select 1 from dual; - end;""" - with self.assertRaisesFullCode("DPY-3009"): - cursor.execute(sql, pcursor=cursor) - - def test_1303(self): - "1303 - test returning a ref cursor after closing it" - out_cursor = self.conn.cursor() - sql = """ - begin - open :pcursor for - select IntCol - from TestNumbers - order by IntCol; - end;""" - self.cursor.execute(sql, pcursor=out_cursor) - rows = out_cursor.fetchall() - out_cursor.close() - out_cursor = self.conn.cursor() - self.cursor.execute(sql, pcursor=out_cursor) - rows2 = out_cursor.fetchall() - self.assertEqual(rows, rows2) - - def test_1304(self): - "1304 - test fetching a cursor" - self.cursor.execute( - """ - select IntCol, cursor(select IntCol + 1 from dual) CursorValue - from TestNumbers - order by IntCol - """ - ) - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "CURSORVALUE", - oracledb.DB_TYPE_CURSOR, - None, - None, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - for i in range(1, 11): - number, cursor = self.cursor.fetchone() - self.assertEqual(number, i) - self.assertEqual(cursor.fetchall(), [(i + 1,)]) - - def test_1305(self): - "1305 - test that ref cursor binds cannot use optimised path" - ref_cursor = self.conn.cursor() - sql = """ - begin - open :rcursor for - select IntCol, StringCol - from TestStrings where IntCol - between :start_value and :end_value; - end;""" - expected_value = [(2, "String 2"), (3, "String 3"), (4, "String 4")] - self.cursor.execute( - sql, rcursor=ref_cursor, start_value=2, end_value=4 +def test_1300(conn, test_env): + "1300 - test binding in a cursor" + ref_cursor = conn.cursor() + assert ref_cursor.description is None + cursor = conn.cursor() + cursor.execute( + """ + begin + open :cursor for select 'X' StringValue from dual; + end; + """, + cursor=ref_cursor, + ) + varchar_ratio, _ = test_env.charset_ratios + expected_value = [ + ( + "STRINGVALUE", + oracledb.DB_TYPE_CHAR, + 1, + varchar_ratio, + None, + None, + True, ) - self.assertEqual(ref_cursor.fetchall(), expected_value) - ref_cursor.close() + ] + assert ref_cursor.description == expected_value + assert ref_cursor.fetchall() == [("X",)] + + +def test_1301(conn, test_env): + "1301 - test binding in a cursor from a package" + ref_cursor = conn.cursor() + assert ref_cursor.description is None + cursor = conn.cursor() + cursor.callproc("pkg_TestRefCursors.TestOutCursor", (2, ref_cursor)) + varchar_ratio, _ = test_env.charset_ratios + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "STRINGCOL", + oracledb.DB_TYPE_VARCHAR, + 20, + 20 * varchar_ratio, + None, + None, + False, + ), + ] + assert ref_cursor.description == expected_value + assert ref_cursor.fetchall() == [(1, "String 1"), (2, "String 2")] + + +def test_1302(conn, test_env): + "1302 - test that binding the cursor itself is not supported" + cursor = conn.cursor() + sql = """ + begin + open :pcursor for + select 1 from dual; + end;""" + with test_env.assert_raises_full_code("DPY-3009"): + cursor.execute(sql, pcursor=cursor) + - expected_value = [(5, "String 5"), (6, "String 6")] - ref_cursor = self.conn.cursor() - self.cursor.execute( - sql, rcursor=ref_cursor, start_value=5, end_value=6 +def test_1303(conn): + "1303 - test returning a ref cursor after closing it" + out_cursor = conn.cursor() + sql = """ + begin + open :pcursor for + select IntCol + from TestNumbers + order by IntCol; + end;""" + cursor = conn.cursor() + cursor.execute(sql, pcursor=out_cursor) + rows = out_cursor.fetchall() + out_cursor.close() + out_cursor = conn.cursor() + cursor.execute(sql, pcursor=out_cursor) + rows2 = out_cursor.fetchall() + assert rows == rows2 + + +def test_1304(conn): + "1304 - test fetching a cursor" + cursor = conn.cursor() + cursor.execute( + """ + select IntCol, cursor(select IntCol + 1 from dual) CursorValue + from TestNumbers + order by IntCol + """ + ) + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "CURSORVALUE", + oracledb.DB_TYPE_CURSOR, + None, + None, + None, + None, + True, + ), + ] + assert cursor.description == expected_value + for i in range(1, 11): + number, child_cursor = cursor.fetchone() + assert number == i + assert child_cursor.fetchall() == [(i + 1,)] + + +def test_1305(conn): + "1305 - test that ref cursor binds cannot use optimised path" + ref_cursor = conn.cursor() + sql = """ + begin + open :rcursor for + select IntCol, StringCol + from TestStrings where IntCol + between :start_value and :end_value; + end;""" + expected_value = [(2, "String 2"), (3, "String 3"), (4, "String 4")] + cursor = conn.cursor() + cursor.execute(sql, rcursor=ref_cursor, start_value=2, end_value=4) + assert ref_cursor.fetchall() == expected_value + ref_cursor.close() + + expected_value = [(5, "String 5"), (6, "String 6")] + ref_cursor = conn.cursor() + cursor.execute(sql, rcursor=ref_cursor, start_value=5, end_value=6) + assert ref_cursor.fetchall() == expected_value + + +def test_1306(conn, round_trip_checker): + "1306 - test round trips using a REF cursor" + + # simple DDL only requires a single round trip + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") + assert round_trip_checker.get_value() == 1 + + # array execution only requires a single round trip + num_rows = 590 + with conn.cursor() as cursor: + sql = "insert into TestTempTable (IntCol) values (:1)" + data = [(n + 1,) for n in range(num_rows)] + cursor.executemany(sql, data) + assert round_trip_checker.get_value() == 1 + + # create REF cursor and execute stored procedure + # (array size set before procedure is called) + with conn.cursor() as cursor: + refcursor = conn.cursor() + refcursor.arraysize = 150 + cursor.callproc("myrefcursorproc", [refcursor]) + refcursor.fetchall() + assert round_trip_checker.get_value() == 5 + + # create REF cursor and execute stored procedure + # (array size set after procedure is called) + with conn.cursor() as cursor: + refcursor = conn.cursor() + cursor.callproc("myrefcursorproc", [refcursor]) + refcursor.arraysize = 145 + refcursor.fetchall() + assert round_trip_checker.get_value() == 6 + + +def test_1307(conn): + "1307 - test executing different SQL after getting a REF cursor" + with conn.cursor() as cursor: + refcursor = conn.cursor() + cursor.callproc("myrefcursorproc", [refcursor]) + var = cursor.var(int) + refcursor.execute("begin :1 := 15; end;", [var]) + assert var.getvalue() == 15 + + +def test_1308(conn): + "1308 - test calling a function that returns a REF cursor" + with conn.cursor() as cursor: + ref_cursor = cursor.callfunc( + "pkg_TestRefCursors.TestReturnCursor", + oracledb.DB_TYPE_CURSOR, + [2], ) - self.assertEqual(ref_cursor.fetchall(), expected_value) - - def test_1306(self): - "1306 - test round trips using a REF cursor" - self.setup_round_trip_checker() - - # simple DDL only requires a single round trip - with self.conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") - self.assertRoundTrips(1) - - # array execution only requires a single round trip - num_rows = 590 - with self.conn.cursor() as cursor: - sql = "insert into TestTempTable (IntCol) values (:1)" - data = [(n + 1,) for n in range(num_rows)] - cursor.executemany(sql, data) - self.assertRoundTrips(1) - - # create REF cursor and execute stored procedure - # (array size set before procedure is called) - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - refcursor.arraysize = 150 - cursor.callproc("myrefcursorproc", [refcursor]) - refcursor.fetchall() - self.assertRoundTrips(5) - - # create REF cursor and execute stored procedure - # (array size set after procedure is called) - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - cursor.callproc("myrefcursorproc", [refcursor]) - refcursor.arraysize = 145 - refcursor.fetchall() - self.assertRoundTrips(6) - - def test_1307(self): - "1307 - test executing different SQL after getting a REF cursor" - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - cursor.callproc("myrefcursorproc", [refcursor]) - var = cursor.var(int) - refcursor.execute("begin :1 := 15; end;", [var]) - self.assertEqual(var.getvalue(), 15) - - def test_1308(self): - "1308 - test calling a function that returns a REF cursor" - with self.conn.cursor() as cursor: - ref_cursor = cursor.callfunc( - "pkg_TestRefCursors.TestReturnCursor", - oracledb.DB_TYPE_CURSOR, - [2], - ) - self.assertEqual( - ref_cursor.fetchall(), [(1, "String 1"), (2, "String 2")] - ) + assert ref_cursor.fetchall() == [(1, "String 1"), (2, "String 2")] - def test_1309(self): - "1309 - test using an output type handler with a REF cursor" - def type_handler(cursor, metadata): - return cursor.var(str, arraysize=cursor.arraysize) +def test_1309(conn): + "1309 - test using an output type handler with a REF cursor" - self.conn.outputtypehandler = type_handler - var = self.cursor.var(oracledb.DB_TYPE_CURSOR) - string_val = "Test String - 1309" - with self.conn.cursor() as cursor: - cursor.callproc( - "pkg_TestRefCursors.TestLobCursor", [string_val, var] - ) - ref_cursor = var.getvalue() - self.assertEqual(ref_cursor.fetchall(), [(string_val,)]) - self.assertIs(var.getvalue(), ref_cursor) - - def test_1310(self): - "1310 - bind a REF cursor but never open it" - ref_cursor_var = self.cursor.var(oracledb.DB_TYPE_CURSOR) - self.cursor.execute( - """ - begin - if false then - open :cursor for - select user - from dual; - end if; - end; - """, - cursor=ref_cursor_var, - ) - ref_cursor = ref_cursor_var.getvalue() - if ref_cursor is not None: - with self.assertRaisesFullCode("DPY-4025"): - ref_cursor.fetchall() - - def test_1311(self): - "1311 - test fetching a cursor with a custom class" - - class Counter: - num_cursors_created = 0 - - @classmethod - def cursor_created(cls): - cls.num_cursors_created += 1 - - class MyConnection(oracledb.Connection): - def cursor(self): - Counter.cursor_created() - return super().cursor() - - conn = test_env.get_connection(conn_class=MyConnection) - cursor = conn.cursor() - cursor.execute( - """ - select - cursor(select 1 from dual), - cursor(select 2 from dual) - from dual - """ - ) - cursor.fetchall() - self.assertEqual(Counter.num_cursors_created, 3) + def type_handler(cursor, metadata): + return cursor.var(str, arraysize=cursor.arraysize) - def test_1312(self): - "1312 - test that nested cursors are fetched correctly" - sql = """ - select - 'Level 1 String', - cursor( - select - 'Level 2 String', - cursor( - select - 'Level 3 String', - cursor( - select 1, 'Level 4 String A' from dual - union all - select 2, 'Level 4 String B' from dual - union all - select 3, 'Level 4 String C' from dual - ) as nc3 - from dual - ) as nc2 - from dual - ) as nc1 - from dual""" - self.cursor.execute(sql) + conn.outputtypehandler = type_handler + var = conn.cursor().var(oracledb.DB_TYPE_CURSOR) + string_val = "Test String - 1309" + with conn.cursor() as cursor: + cursor.callproc("pkg_TestRefCursors.TestLobCursor", [string_val, var]) + ref_cursor = var.getvalue() + assert ref_cursor.fetchall() == [(string_val,)] + assert var.getvalue() is ref_cursor - def transform_row(r): - return tuple(transform_fn(v) for v in r) - def transform_fn(v): - if isinstance(v, oracledb.Cursor): - return [transform_row(r) for r in v] - return v +def test_1310(conn, test_env): + "1310 - bind a REF cursor but never open it" + cursor = conn.cursor() + ref_cursor_var = cursor.var(oracledb.DB_TYPE_CURSOR) + cursor.execute( + """ + begin + if false then + open :cursor for + select user + from dual; + end if; + end; + """, + cursor=ref_cursor_var, + ) + ref_cursor = ref_cursor_var.getvalue() + if ref_cursor is not None: + with test_env.assert_raises_full_code("DPY-4025"): + ref_cursor.fetchall() - rows = [transform_row(r) for r in self.cursor] - expected_value = [ - ( - "Level 1 String", - [ - ( - "Level 2 String", - [ - ( - "Level 3 String", - [ - (1, "Level 4 String A"), - (2, "Level 4 String B"), - (3, "Level 4 String C"), - ], - ), - ], - ), - ], - ) - ] - self.assertEqual(rows, expected_value) - def test_1313(self): - "1313 - test fetching nested cursors with more columns than parent" - sql = """ - select - 'Top Level String', - cursor( - select - 'Nested String 1', - 'Nested String 2', - 'Nested String 3' - from dual - ) - from dual""" - self.cursor.execute(sql) - - def transform_row(r): - return tuple(transform_fn(v) for v in r) - - def transform_fn(v): - if isinstance(v, oracledb.Cursor): - return [transform_row(r) for r in v] - return v - - rows = [transform_row(r) for r in self.cursor] - expected_value = [ - ( - "Top Level String", - [("Nested String 1", "Nested String 2", "Nested String 3")], - ) - ] - self.assertEqual(rows, expected_value) - - def test_1314(self): - "1314 - test reusing a closed ref cursor for executing different sql" - sql = "select 13141, 'String 13141' from dual" - ref_cursor = self.conn.cursor() - ref_cursor.prefetchrows = 0 - ref_cursor.execute(sql) - plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" - self.cursor.execute(plsql, rcursor=ref_cursor) - sql = "select 13142, 'String 13142' from dual" - ref_cursor.execute(sql) - self.assertEqual( - ref_cursor.fetchall(), - [ - (13142, "String 13142"), - ], - ) +def test_1311(test_env): + "1311 - test fetching a cursor with a custom class" - def test_1315(self): - "1315 - test reusing a closed ref cursor for executing same sql" - sql = "select 1315, 'String 1315' from dual" - ref_cursor = self.conn.cursor() - ref_cursor.prefetchrows = 0 - ref_cursor.execute(sql) - plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" - self.cursor.execute(plsql, rcursor=ref_cursor) - ref_cursor.execute(sql) - self.assertEqual( - ref_cursor.fetchall(), - [ - (1315, "String 1315"), - ], - ) + class Counter: + num_cursors_created = 0 - def test_1316(self): - "1316 - test using a closed ref cursor for OUT bind" - value = "test 1316a" - sql = """ - declare - t_Cursor sys_refcursor; - begin - open t_Cursor for - select :value - from dual; - :cursor := t_Cursor; - end; - """ - var = self.cursor.var(oracledb.DB_TYPE_CURSOR) - self.cursor.execute(sql, [value, var]) - ref_cursor = var.getvalue() - self.assertEqual(ref_cursor.fetchall(), [(value,)]) - ref_cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - self.cursor.execute(sql, [value, var]) - - def test_1317(self): - "1317 - test binding a closed cursor" - ref_cursor = self.conn.cursor() - ref_cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - self.cursor.callfunc( - "pkg_testRefCursors.TestInCursor", str, [ref_cursor] - ) + @classmethod + def cursor_created(cls): + cls.num_cursors_created += 1 - def test_1318(self): - "1318 - test ref cursor doesn't work after connection is closed" - conn = test_env.get_connection() - cursor = conn.cursor() - var = cursor.var(oracledb.DB_TYPE_CURSOR) - cursor.callproc("myrefcursorproc", [var]) - conn.close() - with self.assertRaisesFullCode("DPY-1001"): - ref_cursor = var.getvalue() - ref_cursor.fetchall() + class MyConnection(oracledb.Connection): + def cursor(self): + Counter.cursor_created() + return super().cursor() - def test_1319(self): - "1319 - test binding cursor that is not from the same connection" - sql = """ - declare - t_Cursor sys_refcursor; - begin - open t_Cursor for - select 1319 - from dual; - :cursor := t_Cursor; - end; + conn = test_env.get_connection(conn_class=MyConnection) + cursor = conn.cursor() + cursor.execute( + """ + select + cursor(select 1 from dual), + cursor(select 2 from dual) + from dual """ - conn = test_env.get_connection() - ref_cursor = conn.cursor() - with self.assertRaisesFullCode("DPY-3027"): - self.cursor.execute(sql, [ref_cursor]) - - def test_1320(self): - "1320 - test fetching nested cursors repeatedly" - sql = """ + ) + cursor.fetchall() + assert Counter.num_cursors_created == 3 + + +def test_1312(conn): + "1312 - test that nested cursors are fetched correctly" + sql = """ + select + 'Level 1 String', + cursor( select - s.Description, - cursor(select 'Nested String for ' || s.Description from dual) - from - ( - select 'Top Level String 1' as Description - from dual - union all - select 'Top Level String 2' - from dual - union all - select 'Top Level String 3' - from dual + 'Level 2 String', + cursor( + select + 'Level 3 String', + cursor( + select 1, 'Level 4 String A' from dual union all - select 'Top Level String 4' - from dual + select 2, 'Level 4 String B' from dual union all - select 'Top Level String 5' - from dual - ) s""" - - for i in range(3): - with self.conn.cursor() as cursor: - cursor.arraysize = 10 - cursor.execute(sql) - desc, nested1 = cursor.fetchone() - self.assertEqual(desc, "Top Level String 1") - nested_rows = nested1.fetchall() - self.assertEqual( - nested_rows, [("Nested String for Top Level String 1",)] - ) - desc, nested2 = cursor.fetchone() - self.assertEqual(desc, "Top Level String 2") - nested_rows = nested2.fetchall() - self.assertEqual( - nested_rows, [("Nested String for Top Level String 2",)] - ) - - -if __name__ == "__main__": - test_env.run_test_cases() + select 3, 'Level 4 String C' from dual + ) as nc3 + from dual + ) as nc2 + from dual + ) as nc1 + from dual""" + cursor = conn.cursor() + cursor.execute(sql) + + def transform_row(r): + return tuple(transform_fn(v) for v in r) + + def transform_fn(v): + if isinstance(v, oracledb.Cursor): + return [transform_row(r) for r in v] + return v + + rows = [transform_row(r) for r in cursor] + expected_value = [ + ( + "Level 1 String", + [ + ( + "Level 2 String", + [ + ( + "Level 3 String", + [ + (1, "Level 4 String A"), + (2, "Level 4 String B"), + (3, "Level 4 String C"), + ], + ), + ], + ), + ], + ) + ] + assert rows == expected_value + + +def test_1313(conn): + "1313 - test fetching nested cursors with more columns than parent" + sql = """ + select + 'Top Level String', + cursor( + select + 'Nested String 1', + 'Nested String 2', + 'Nested String 3' + from dual + ) + from dual""" + cursor = conn.cursor() + cursor.execute(sql) + + def transform_row(r): + return tuple(transform_fn(v) for v in r) + + def transform_fn(v): + if isinstance(v, oracledb.Cursor): + return [transform_row(r) for r in v] + return v + + rows = [transform_row(r) for r in cursor] + expected_value = [ + ( + "Top Level String", + [("Nested String 1", "Nested String 2", "Nested String 3")], + ) + ] + assert rows == expected_value + + +def test_1314(conn): + "1314 - test reusing a closed ref cursor for executing different sql" + sql = "select 13141, 'String 13141' from dual" + ref_cursor = conn.cursor() + ref_cursor.prefetchrows = 0 + ref_cursor.execute(sql) + plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" + cursor = conn.cursor() + cursor.execute(plsql, rcursor=ref_cursor) + sql = "select 13142, 'String 13142' from dual" + ref_cursor.execute(sql) + assert ref_cursor.fetchall() == [(13142, "String 13142")] + + +def test_1315(conn): + "1315 - test reusing a closed ref cursor for executing same sql" + sql = "select 1315, 'String 1315' from dual" + ref_cursor = conn.cursor() + ref_cursor.prefetchrows = 0 + ref_cursor.execute(sql) + plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" + cursor = conn.cursor() + cursor.execute(plsql, rcursor=ref_cursor) + ref_cursor.execute(sql) + assert ref_cursor.fetchall() == [(1315, "String 1315")] + + +def test_1316(conn, test_env): + "1316 - test using a closed ref cursor for OUT bind" + value = "test 1316a" + sql = """ + declare + t_Cursor sys_refcursor; + begin + open t_Cursor for + select :value + from dual; + :cursor := t_Cursor; + end; + """ + cursor = conn.cursor() + var = cursor.var(oracledb.DB_TYPE_CURSOR) + cursor.execute(sql, [value, var]) + ref_cursor = var.getvalue() + assert ref_cursor.fetchall() == [(value,)] + ref_cursor.close() + with test_env.assert_raises_full_code("DPY-1006"): + cursor.execute(sql, [value, var]) + + +def test_1317(conn, cursor, test_env): + "1317 - test binding a closed cursor" + ref_cursor = conn.cursor() + ref_cursor.close() + with test_env.assert_raises_full_code("DPY-1006"): + cursor.callfunc("pkg_testRefCursors.TestInCursor", str, [ref_cursor]) + + +def test_1318(test_env): + "1318 - test ref cursor doesn't work after connection is closed" + conn = test_env.get_connection() + cursor = conn.cursor() + var = cursor.var(oracledb.DB_TYPE_CURSOR) + cursor.callproc("myrefcursorproc", [var]) + conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + ref_cursor = var.getvalue() + ref_cursor.fetchall() + + +def test_1319(cursor, test_env): + "1319 - test binding cursor that is not from the same connection" + sql = """ + declare + t_Cursor sys_refcursor; + begin + open t_Cursor for + select 1319 + from dual; + :cursor := t_Cursor; + end; + """ + ref_cursor = test_env.get_connection().cursor() + with test_env.assert_raises_full_code("DPY-3027"): + cursor.execute(sql, [ref_cursor]) + + +def test_1320(conn): + "1320 - test fetching nested cursors repeatedly" + sql = """ + select + s.Description, + cursor(select 'Nested String for ' || s.Description from dual) + from + ( + select 'Top Level String 1' as Description + from dual + union all + select 'Top Level String 2' + from dual + union all + select 'Top Level String 3' + from dual + union all + select 'Top Level String 4' + from dual + union all + select 'Top Level String 5' + from dual + ) s""" + + for i in range(3): + with conn.cursor() as cursor: + cursor.arraysize = 10 + cursor.execute(sql) + desc, nested1 = cursor.fetchone() + assert desc == "Top Level String 1" + nested_rows = nested1.fetchall() + assert nested_rows == [("Nested String for Top Level String 1",)] + desc, nested2 = cursor.fetchone() + assert desc == "Top Level String 2" + nested_rows = nested2.fetchall() + assert nested_rows == [("Nested String for Top Level String 2",)] diff --git a/tests/test_1400_datetime_var.py b/tests/test_1400_datetime_var.py index 205a8d79..3f9edb03 100644 --- a/tests/test_1400_datetime_var.py +++ b/tests/test_1400_datetime_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,285 +26,296 @@ 1400 - Module for testing date/time variables """ +import pytest import datetime import oracledb -import test_env -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - base_date = datetime.datetime(2002, 12, 9) +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + base_date = datetime.datetime(2002, 12, 9) + date_interval = datetime.timedelta(days=i, hours=i * 2, minutes=i * 24) + date_col = base_date + date_interval + if i % 2: date_interval = datetime.timedelta( - days=i, hours=i * 2, minutes=i * 24 + days=i * 2, hours=i * 3, minutes=i * 36 ) - date_col = base_date + date_interval - if i % 2: - date_interval = datetime.timedelta( - days=i * 2, hours=i * 3, minutes=i * 36 - ) - nullable_col = base_date + date_interval - else: - nullable_col = None - data_tuple = (i, date_col, nullable_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_1400(self): - "1400 - test binding in a date" - self.cursor.execute( - "select * from TestDates where DateCol = :value", - value=datetime.datetime(2002, 12, 13, 9, 36, 0), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[4]]) + nullable_col = base_date + date_interval + else: + nullable_col = None + data_tuple = (i, date_col, nullable_col) + data.append(data_tuple) + return data - def test_1401(self): - "1401 - test binding in a datetime.datetime value" - self.cursor.execute( - "select * from TestDates where DateCol = :value", - value=datetime.datetime(2002, 12, 13, 9, 36, 0), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[4]]) - - def test_1402(self): - "1402 - test binding date in a datetime variable" - var = self.cursor.var(oracledb.DATETIME) - date_val = datetime.date.today() - var.setvalue(0, date_val) - self.cursor.execute("select :1 from dual", [var]) - (result,) = self.cursor.fetchone() - self.assertEqual(result.date(), date_val) - - def test_1403(self): - "1403 - test binding in a date after setting input sizes to a string" - self.cursor.setinputsizes(value=15) - self.cursor.execute( - "select * from TestDates where DateCol = :value", - value=datetime.datetime(2002, 12, 14, 12, 0, 0), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_1404(self): - "1404 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DATETIME) - self.cursor.execute( - "select * from TestDates where DateCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_1405(self): - "1405 - test binding in a date array" - array = [r[1] for r in self.raw_data] - return_value = self.cursor.callfunc( - "pkg_TestDateArrays.TestInArrays", - oracledb.DB_TYPE_NUMBER, - [5, datetime.date(2002, 12, 12), array], - ) - self.assertEqual(return_value, 35.5) - array += array[:5] - return_value = self.cursor.callfunc( - "pkg_TestDateArrays.TestInArrays", - oracledb.DB_TYPE_NUMBER, - [7, datetime.date(2002, 12, 13), array], - ) - self.assertEqual(return_value, 24.0) - - def test_1406(self): - "1406 - test binding in a date array (with setinputsizes)" - return_value = self.cursor.var(oracledb.NUMBER) - self.cursor.setinputsizes(array=[oracledb.DATETIME, 10]) - array = [r[1] for r in self.raw_data] - self.cursor.execute( - """ - begin - :return_value := pkg_TestDateArrays.TestInArrays( - :start_value, :base_date, :array); - end; - """, - return_value=return_value, - start_value=6, - base_date=oracledb.Date(2002, 12, 13), - array=array, - ) - self.assertEqual(return_value.getvalue(), 26.5) - - def test_1407(self): - "1407 - test binding in a date array (with arrayvar)" - return_value = self.cursor.var(oracledb.NUMBER) - array = self.cursor.arrayvar(oracledb.DATETIME, 10, 20) - array.setvalue(0, [r[1] for r in self.raw_data]) - self.cursor.execute( - """ - begin - :return_value := pkg_TestDateArrays.TestInArrays( - :start_value, :base_date, :array); - end; - """, - return_value=return_value, - start_value=7, - base_date=oracledb.Date(2002, 12, 14), - array=array, - ) - self.assertEqual(return_value.getvalue(), 17.5) - - def test_1408(self): - "1408 - test binding in/out a date array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.DATETIME, 10, 100) - original_data = [r[1] for r in self.raw_data] - array.setvalue(0, original_data) - self.cursor.execute( - """ - begin - pkg_TestDateArrays.TestInOutArrays(:num_elems, :array); - end; - """, - num_elems=5, - array=array, - ) - expected_value = [ - datetime.datetime(2002, 12, 17, 2, 24, 0), - datetime.datetime(2002, 12, 18, 4, 48, 0), - datetime.datetime(2002, 12, 19, 7, 12, 0), - datetime.datetime(2002, 12, 20, 9, 36, 0), - datetime.datetime(2002, 12, 21, 12, 0, 0), - ] + original_data[5:] - self.assertEqual(array.getvalue(), expected_value) - - def test_1409(self): - "1409 - test binding out a date array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.DATETIME, 6, 100) - self.cursor.execute( - """ - begin - pkg_TestDateArrays.TestOutArrays(:num_elems, :array); - end; - """, - num_elems=6, - array=array, - ) - expected_value = [ - datetime.datetime(2002, 12, 13, 4, 48, 0), - datetime.datetime(2002, 12, 14, 9, 36, 0), - datetime.datetime(2002, 12, 15, 14, 24, 0), - datetime.datetime(2002, 12, 16, 19, 12, 0), - datetime.datetime(2002, 12, 18, 0, 0, 0), - datetime.datetime(2002, 12, 19, 4, 48, 0), - ] - self.assertEqual(array.getvalue(), expected_value) - - def test_1410(self): - "1410 - test binding out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.DATETIME) - self.cursor.execute( - """ - begin - :value := to_date(20021209, 'YYYYMMDD'); - end; - """ - ) - self.assertEqual( - bind_vars["value"].getvalue(), datetime.datetime(2002, 12, 9) - ) - def test_1411(self): - "1411 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.DATETIME) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=datetime.datetime(2002, 12, 12, 10, 0, 0), - ) - self.assertEqual( - bind_vars["value"].getvalue(), - datetime.datetime(2002, 12, 17, 16, 0, 0), - ) +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key - def test_1412(self): - "1412 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DATETIME) - self.cursor.execute( - """ - begin - :value := to_date('20021231 12:31:00', 'YYYYMMDD HH24:MI:SS'); - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2002, 12, 31, 12, 31, 0) - ) - def test_1413(self): - "1413 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DATETIME) - var.setvalue(0, datetime.datetime(2002, 12, 9, 6, 0, 0)) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2002, 12, 14, 12, 0, 0) - ) +def test_1400(cursor, module_data_by_key): + "1400 - test binding in a date" + cursor.execute( + "select * from TestDates where DateCol = :value", + value=datetime.datetime(2002, 12, 13, 9, 36, 0), + ) + assert cursor.fetchall() == [module_data_by_key[4]] - def test_1414(self): - "1414 - test cursor description is accurate" - self.cursor.execute("select * from TestDates") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ("DATECOL", oracledb.DB_TYPE_DATE, 23, None, None, None, False), - ("NULLABLECOL", oracledb.DB_TYPE_DATE, 23, None, None, None, True), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_1415(self): - "1415 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestDates order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_1416(self): - "1416 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestDates order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_1417(self): - "1417 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestDates - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - def test_1418(self): - "1418 - test fetching a date with year < 0" - with self.assertRaises(ValueError): - self.cursor.execute( - "select to_date('-4712-01-01', 'SYYYY-MM-DD') from dual" - ) - self.cursor.fetchone() + +def test_1401(cursor, module_data_by_key): + "1401 - test binding in a datetime.datetime value" + cursor.execute( + "select * from TestDates where DateCol = :value", + value=datetime.datetime(2002, 12, 13, 9, 36, 0), + ) + assert cursor.fetchall() == [module_data_by_key[4]] + + +def test_1402(cursor): + "1402 - test binding date in a datetime variable" + var = cursor.var(oracledb.DATETIME) + date_val = datetime.date.today() + var.setvalue(0, date_val) + cursor.execute("select :1 from dual", [var]) + (result,) = cursor.fetchone() + assert result.date() == date_val + + +def test_1403(cursor, module_data_by_key): + "1403 - test binding in a date after setting input sizes to a string" + cursor.setinputsizes(value=15) + cursor.execute( + "select * from TestDates where DateCol = :value", + value=datetime.datetime(2002, 12, 14, 12, 0, 0), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_1404(cursor): + "1404 - test binding in a null" + cursor.setinputsizes(value=oracledb.DATETIME) + cursor.execute( + "select * from TestDates where DateCol = :value", + value=None, + ) + assert cursor.fetchall() == [] -if __name__ == "__main__": - test_env.run_test_cases() +def test_1405(cursor, module_data): + "1405 - test binding in a date array" + array = [r[1] for r in module_data] + return_value = cursor.callfunc( + "pkg_TestDateArrays.TestInArrays", + oracledb.DB_TYPE_NUMBER, + [5, datetime.date(2002, 12, 12), array], + ) + assert return_value == 35.5 + array += array[:5] + return_value = cursor.callfunc( + "pkg_TestDateArrays.TestInArrays", + oracledb.DB_TYPE_NUMBER, + [7, datetime.date(2002, 12, 13), array], + ) + assert return_value == 24.0 + + +def test_1406(cursor, module_data): + "1406 - test binding in a date array (with setinputsizes)" + return_value = cursor.var(oracledb.NUMBER) + cursor.setinputsizes(array=[oracledb.DATETIME, 10]) + array = [r[1] for r in module_data] + cursor.execute( + """ + begin + :return_value := pkg_TestDateArrays.TestInArrays( + :start_value, :base_date, :array); + end; + """, + return_value=return_value, + start_value=6, + base_date=oracledb.Date(2002, 12, 13), + array=array, + ) + assert return_value.getvalue() == 26.5 + + +def test_1407(cursor, module_data): + "1407 - test binding in a date array (with arrayvar)" + return_value = cursor.var(oracledb.NUMBER) + array = cursor.arrayvar(oracledb.DATETIME, 10, 20) + array.setvalue(0, [r[1] for r in module_data]) + cursor.execute( + """ + begin + :return_value := pkg_TestDateArrays.TestInArrays( + :start_value, :base_date, :array); + end; + """, + return_value=return_value, + start_value=7, + base_date=oracledb.Date(2002, 12, 14), + array=array, + ) + assert return_value.getvalue() == 17.5 + + +def test_1408(cursor, module_data): + "1408 - test binding in/out a date array (with arrayvar)" + array = cursor.arrayvar(oracledb.DATETIME, 10, 100) + original_data = [r[1] for r in module_data] + array.setvalue(0, original_data) + cursor.execute( + """ + begin + pkg_TestDateArrays.TestInOutArrays(:num_elems, :array); + end; + """, + num_elems=5, + array=array, + ) + expected_value = [ + datetime.datetime(2002, 12, 17, 2, 24, 0), + datetime.datetime(2002, 12, 18, 4, 48, 0), + datetime.datetime(2002, 12, 19, 7, 12, 0), + datetime.datetime(2002, 12, 20, 9, 36, 0), + datetime.datetime(2002, 12, 21, 12, 0, 0), + ] + original_data[5:] + assert array.getvalue() == expected_value + + +def test_1409(cursor): + "1409 - test binding out a date array (with arrayvar)" + array = cursor.arrayvar(oracledb.DATETIME, 6, 100) + cursor.execute( + """ + begin + pkg_TestDateArrays.TestOutArrays(:num_elems, :array); + end; + """, + num_elems=6, + array=array, + ) + expected_value = [ + datetime.datetime(2002, 12, 13, 4, 48, 0), + datetime.datetime(2002, 12, 14, 9, 36, 0), + datetime.datetime(2002, 12, 15, 14, 24, 0), + datetime.datetime(2002, 12, 16, 19, 12, 0), + datetime.datetime(2002, 12, 18, 0, 0, 0), + datetime.datetime(2002, 12, 19, 4, 48, 0), + ] + assert array.getvalue() == expected_value + + +def test_1410(cursor): + "1410 - test binding out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DATETIME) + cursor.execute( + """ + begin + :value := to_date(20021209, 'YYYYMMDD'); + end; + """ + ) + assert bind_vars["value"].getvalue() == datetime.datetime(2002, 12, 9) + + +def test_1411(cursor): + "1411 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DATETIME) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=datetime.datetime(2002, 12, 12, 10, 0, 0), + ) + fetched_value = bind_vars["value"].getvalue() + assert fetched_value == datetime.datetime(2002, 12, 17, 16, 0, 0) + + +def test_1412(cursor): + "1412 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DATETIME) + cursor.execute( + """ + begin + :value := to_date('20021231 12:31:00', 'YYYYMMDD HH24:MI:SS'); + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2002, 12, 31, 12, 31, 0) + + +def test_1413(cursor): + "1413 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DATETIME) + var.setvalue(0, datetime.datetime(2002, 12, 9, 6, 0, 0)) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2002, 12, 14, 12, 0, 0) + + +def test_1414(cursor): + "1414 - test cursor description is accurate" + cursor.execute("select * from TestDates") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ("DATECOL", oracledb.DB_TYPE_DATE, 23, None, None, None, False), + ("NULLABLECOL", oracledb.DB_TYPE_DATE, 23, None, None, None, True), + ] + assert cursor.description == expected_value + + +def test_1415(cursor, module_data): + "1415 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestDates order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_1416(cursor, module_data): + "1416 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestDates order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_1417(cursor, module_data_by_key): + "1417 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestDates + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_1418(cursor): + "1418 - test fetching a date with year < 0" + with pytest.raises(ValueError): + cursor.execute( + "select to_date('-4712-01-01', 'SYYYY-MM-DD') from dual" + ) + cursor.fetchone() diff --git a/tests/test_1500_types.py b/tests/test_1500_types.py index 5c5a5b8e..82362a0c 100644 --- a/tests/test_1500_types.py +++ b/tests/test_1500_types.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -31,183 +31,208 @@ import pickle import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - requires_connection = False - - def __test_compare(self, db_type, api_type): - self.assertEqual(db_type, db_type) - self.assertEqual(db_type, api_type) - self.assertEqual(api_type, db_type) - self.assertNotEqual(db_type, 5) - self.assertNotEqual(db_type, oracledb.DB_TYPE_OBJECT) - - def __test_pickle(self, typ): - self.assertIs(typ, pickle.loads(pickle.dumps(typ))) - - def test_1500(self): - "1500 - test oracledb.DB_TYPE_BFILE comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_BFILE, oracledb.BFILE) - self.__test_pickle(oracledb.DB_TYPE_BFILE) - - def test_1501(self): - "1501 - test oracledb.DB_TYPE_BINARY_DOUBLE comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_BINARY_DOUBLE, oracledb.NUMBER) - self.assertEqual(oracledb.DB_TYPE_BINARY_DOUBLE, oracledb.NATIVE_FLOAT) - self.__test_pickle(oracledb.DB_TYPE_BINARY_DOUBLE) - - def test_1502(self): - "1502 - test oracledb.DB_TYPE_BINARY_FLOAT comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_BINARY_FLOAT, oracledb.NUMBER) - self.__test_pickle(oracledb.DB_TYPE_BINARY_FLOAT) - - def test_1503(self): - "1503 - test oracledb.DB_TYPE_BINARY_INTEGER comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_BINARY_INTEGER, oracledb.NUMBER) - self.assertEqual(oracledb.DB_TYPE_BINARY_INTEGER, oracledb.NATIVE_INT) - self.__test_pickle(oracledb.DB_TYPE_BINARY_INTEGER) - - def test_1504(self): - "1504 - test oracledb.DB_TYPE_BLOB comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_BLOB, oracledb.BLOB) - self.__test_pickle(oracledb.DB_TYPE_BLOB) - - def test_1505(self): - "1505 - test oracledb.DB_TYPE_BOOLEAN comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_BOOLEAN, oracledb.BOOLEAN) - self.__test_pickle(oracledb.DB_TYPE_BOOLEAN) - - def test_1506(self): - "1506 - test oracledb.DB_TYPE_CHAR comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_CHAR, oracledb.STRING) - self.assertEqual(oracledb.DB_TYPE_CHAR, oracledb.FIXED_CHAR) - self.__test_pickle(oracledb.DB_TYPE_CHAR) - - def test_1507(self): - "1507 - test oracledb.DB_TYPE_CLOB comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_CLOB, oracledb.CLOB) - self.__test_pickle(oracledb.DB_TYPE_CLOB) - - def test_1508(self): - "1508 - test oracledb.DB_TYPE_CURSOR comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_CURSOR, oracledb.CURSOR) - self.__test_pickle(oracledb.DB_TYPE_CURSOR) - - def test_1509(self): - "1509 - test oracledb.DB_TYPE_DATE comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_DATE, oracledb.DATETIME) - self.__test_pickle(oracledb.DB_TYPE_DATE) - - def test_1510(self): - "1510 - test oracledb.DB_TYPE_INTERVAL_DS comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_INTERVAL_DS, oracledb.INTERVAL) - self.__test_pickle(oracledb.DB_TYPE_INTERVAL_DS) - - def test_1511(self): - "1511 - test oracledb.DB_TYPE_LONG comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_LONG, oracledb.STRING) - self.assertEqual(oracledb.DB_TYPE_LONG, oracledb.LONG_STRING) - self.__test_pickle(oracledb.DB_TYPE_LONG) - - def test_1512(self): - "1512 - test oracledb.DB_TYPE_LONG_RAW comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_LONG_RAW, oracledb.BINARY) - self.assertEqual(oracledb.DB_TYPE_LONG_RAW, oracledb.LONG_BINARY) - self.__test_pickle(oracledb.DB_TYPE_LONG_RAW) - - def test_1513(self): - "1513 - test oracledb.DB_TYPE_NCHAR comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_NCHAR, oracledb.STRING) - self.assertEqual(oracledb.DB_TYPE_NCHAR, oracledb.FIXED_NCHAR) - self.__test_pickle(oracledb.DB_TYPE_NCHAR) - - def test_1514(self): - "1514 - test oracledb.DB_TYPE_NCLOB comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_NCLOB, oracledb.NCLOB) - self.__test_pickle(oracledb.DB_TYPE_NCLOB) - - def test_1515(self): - "1515 - test oracledb.DB_TYPE_NUMBER comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_NUMBER, oracledb.NUMBER) - self.__test_pickle(oracledb.DB_TYPE_NUMBER) - - def test_1516(self): - "1516 - test oracledb.DB_TYPE_NVARCHAR comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_NVARCHAR, oracledb.STRING) - self.assertEqual(oracledb.DB_TYPE_NVARCHAR, oracledb.NCHAR) - self.__test_pickle(oracledb.DB_TYPE_NVARCHAR) - - def test_1517(self): - "1517 - test oracledb.DB_TYPE_OBJECT comparisons and pickling" - self.assertEqual(oracledb.DB_TYPE_OBJECT, oracledb.OBJECT) - self.__test_pickle(oracledb.DB_TYPE_OBJECT) - - def test_1518(self): - "1518 - test oracledb.DB_TYPE_RAW comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_RAW, oracledb.BINARY) - self.__test_pickle(oracledb.DB_TYPE_RAW) - - def test_1519(self): - "1519 - test oracledb.DB_TYPE_ROWID comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_ROWID, oracledb.ROWID) - self.__test_pickle(oracledb.DB_TYPE_ROWID) - - def test_1520(self): - "1520 - test oracledb.DB_TYPE_TIMESTAMP comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_TIMESTAMP, oracledb.DATETIME) - self.assertEqual(oracledb.DB_TYPE_TIMESTAMP, oracledb.TIMESTAMP) - self.__test_pickle(oracledb.DB_TYPE_TIMESTAMP) - - def test_1521(self): - "1521 - test oracledb.DB_TYPE_TIMESTAMP_LTZ comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_TIMESTAMP_LTZ, oracledb.DATETIME) - self.__test_pickle(oracledb.DB_TYPE_TIMESTAMP_LTZ) - - def test_1522(self): - "1522 - test oracledb.DB_TYPE_TIMESTAMP_TZ comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_TIMESTAMP_TZ, oracledb.DATETIME) - self.__test_pickle(oracledb.DB_TYPE_TIMESTAMP_TZ) - - def test_1523(self): - "1523 - test oracledb.DB_TYPE_VARCHAR comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_VARCHAR, oracledb.STRING) - self.__test_pickle(oracledb.DB_TYPE_VARCHAR) - - def test_1524(self): - "1524 - test oracledb.NUMBER pickling" - self.__test_pickle(oracledb.NUMBER) - - def test_1525(self): - "1525 - test oracledb.STRING pickling" - self.__test_pickle(oracledb.STRING) - - def test_1526(self): - "1526 - test oracledb.DATETIME pickling" - self.__test_pickle(oracledb.DATETIME) - - def test_1527(self): - "1527 - test oracledb.BINARY pickling" - self.__test_pickle(oracledb.BINARY) - - def test_1528(self): - "1528 - test oracledb.ROWID pickling" - self.__test_pickle(oracledb.ROWID) - - def test_1529(self): - "1529 - test oracledb.DB_TYPE_UROWID comparisons and pickling" - self.__test_compare(oracledb.DB_TYPE_UROWID, oracledb.ROWID) - self.__test_pickle(oracledb.DB_TYPE_UROWID) - - def test_1530(self): - "1530 - test oracledb.DB_TYPE_JSON pickling" - self.__test_pickle(oracledb.DB_TYPE_JSON) - - def test_1531(self): - "1531 - test oracledb.DB_TYPE_INTERVAL_YM pickling" - self.__test_pickle(oracledb.DB_TYPE_INTERVAL_YM) - - -if __name__ == "__main__": - test_env.run_test_cases() + + +def _test_compare(db_type, api_type): + assert db_type == db_type + assert db_type == api_type + assert api_type == db_type + assert db_type != 5 + assert db_type != oracledb.DB_TYPE_OBJECT + + +def _test_pickle(typ): + assert typ is pickle.loads(pickle.dumps(typ)) + + +def test_1500(): + "1500 - test oracledb.DB_TYPE_BFILE comparisons and pickling" + assert oracledb.DB_TYPE_BFILE == oracledb.BFILE + _test_pickle(oracledb.DB_TYPE_BFILE) + + +def test_1501(): + "1501 - test oracledb.DB_TYPE_BINARY_DOUBLE comparisons and pickling" + _test_compare(oracledb.DB_TYPE_BINARY_DOUBLE, oracledb.NUMBER) + assert oracledb.DB_TYPE_BINARY_DOUBLE == oracledb.NATIVE_FLOAT + _test_pickle(oracledb.DB_TYPE_BINARY_DOUBLE) + + +def test_1502(): + "1502 - test oracledb.DB_TYPE_BINARY_FLOAT comparisons and pickling" + _test_compare(oracledb.DB_TYPE_BINARY_FLOAT, oracledb.NUMBER) + _test_pickle(oracledb.DB_TYPE_BINARY_FLOAT) + + +def test_1503(): + "1503 - test oracledb.DB_TYPE_BINARY_INTEGER comparisons and pickling" + _test_compare(oracledb.DB_TYPE_BINARY_INTEGER, oracledb.NUMBER) + assert oracledb.DB_TYPE_BINARY_INTEGER == oracledb.NATIVE_INT + _test_pickle(oracledb.DB_TYPE_BINARY_INTEGER) + + +def test_1504(): + "1504 - test oracledb.DB_TYPE_BLOB comparisons and pickling" + assert oracledb.DB_TYPE_BLOB == oracledb.BLOB + _test_pickle(oracledb.DB_TYPE_BLOB) + + +def test_1505(): + "1505 - test oracledb.DB_TYPE_BOOLEAN comparisons and pickling" + assert oracledb.DB_TYPE_BOOLEAN == oracledb.BOOLEAN + _test_pickle(oracledb.DB_TYPE_BOOLEAN) + + +def test_1506(): + "1506 - test oracledb.DB_TYPE_CHAR comparisons and pickling" + _test_compare(oracledb.DB_TYPE_CHAR, oracledb.STRING) + assert oracledb.DB_TYPE_CHAR == oracledb.FIXED_CHAR + _test_pickle(oracledb.DB_TYPE_CHAR) + + +def test_1507(): + "1507 - test oracledb.DB_TYPE_CLOB comparisons and pickling" + assert oracledb.DB_TYPE_CLOB == oracledb.CLOB + _test_pickle(oracledb.DB_TYPE_CLOB) + + +def test_1508(): + "1508 - test oracledb.DB_TYPE_CURSOR comparisons and pickling" + assert oracledb.DB_TYPE_CURSOR == oracledb.CURSOR + _test_pickle(oracledb.DB_TYPE_CURSOR) + + +def test_1509(): + "1509 - test oracledb.DB_TYPE_DATE comparisons and pickling" + _test_compare(oracledb.DB_TYPE_DATE, oracledb.DATETIME) + _test_pickle(oracledb.DB_TYPE_DATE) + + +def test_1510(): + "1510 - test oracledb.DB_TYPE_INTERVAL_DS comparisons and pickling" + assert oracledb.DB_TYPE_INTERVAL_DS == oracledb.INTERVAL + _test_pickle(oracledb.DB_TYPE_INTERVAL_DS) + + +def test_1511(): + "1511 - test oracledb.DB_TYPE_LONG comparisons and pickling" + _test_compare(oracledb.DB_TYPE_LONG, oracledb.STRING) + assert oracledb.DB_TYPE_LONG == oracledb.LONG_STRING + _test_pickle(oracledb.DB_TYPE_LONG) + + +def test_1512(): + "1512 - test oracledb.DB_TYPE_LONG_RAW comparisons and pickling" + _test_compare(oracledb.DB_TYPE_LONG_RAW, oracledb.BINARY) + assert oracledb.DB_TYPE_LONG_RAW == oracledb.LONG_BINARY + _test_pickle(oracledb.DB_TYPE_LONG_RAW) + + +def test_1513(): + "1513 - test oracledb.DB_TYPE_NCHAR comparisons and pickling" + _test_compare(oracledb.DB_TYPE_NCHAR, oracledb.STRING) + assert oracledb.DB_TYPE_NCHAR == oracledb.FIXED_NCHAR + _test_pickle(oracledb.DB_TYPE_NCHAR) + + +def test_1514(): + "1514 - test oracledb.DB_TYPE_NCLOB comparisons and pickling" + assert oracledb.DB_TYPE_NCLOB == oracledb.NCLOB + _test_pickle(oracledb.DB_TYPE_NCLOB) + + +def test_1515(): + "1515 - test oracledb.DB_TYPE_NUMBER comparisons and pickling" + _test_compare(oracledb.DB_TYPE_NUMBER, oracledb.NUMBER) + _test_pickle(oracledb.DB_TYPE_NUMBER) + + +def test_1516(): + "1516 - test oracledb.DB_TYPE_NVARCHAR comparisons and pickling" + _test_compare(oracledb.DB_TYPE_NVARCHAR, oracledb.STRING) + assert oracledb.DB_TYPE_NVARCHAR == oracledb.NCHAR + _test_pickle(oracledb.DB_TYPE_NVARCHAR) + + +def test_1517(): + "1517 - test oracledb.DB_TYPE_OBJECT comparisons and pickling" + assert oracledb.DB_TYPE_OBJECT == oracledb.OBJECT + _test_pickle(oracledb.DB_TYPE_OBJECT) + + +def test_1518(): + "1518 - test oracledb.DB_TYPE_RAW comparisons and pickling" + _test_compare(oracledb.DB_TYPE_RAW, oracledb.BINARY) + _test_pickle(oracledb.DB_TYPE_RAW) + + +def test_1519(): + "1519 - test oracledb.DB_TYPE_ROWID comparisons and pickling" + _test_compare(oracledb.DB_TYPE_ROWID, oracledb.ROWID) + _test_pickle(oracledb.DB_TYPE_ROWID) + + +def test_1520(): + "1520 - test oracledb.DB_TYPE_TIMESTAMP comparisons and pickling" + _test_compare(oracledb.DB_TYPE_TIMESTAMP, oracledb.DATETIME) + assert oracledb.DB_TYPE_TIMESTAMP == oracledb.TIMESTAMP + _test_pickle(oracledb.DB_TYPE_TIMESTAMP) + + +def test_1521(): + "1521 - test oracledb.DB_TYPE_TIMESTAMP_LTZ comparisons and pickling" + _test_compare(oracledb.DB_TYPE_TIMESTAMP_LTZ, oracledb.DATETIME) + _test_pickle(oracledb.DB_TYPE_TIMESTAMP_LTZ) + + +def test_1522(): + "1522 - test oracledb.DB_TYPE_TIMESTAMP_TZ comparisons and pickling" + _test_compare(oracledb.DB_TYPE_TIMESTAMP_TZ, oracledb.DATETIME) + _test_pickle(oracledb.DB_TYPE_TIMESTAMP_TZ) + + +def test_1523(): + "1523 - test oracledb.DB_TYPE_VARCHAR comparisons and pickling" + _test_compare(oracledb.DB_TYPE_VARCHAR, oracledb.STRING) + _test_pickle(oracledb.DB_TYPE_VARCHAR) + + +def test_1524(): + "1524 - test oracledb.NUMBER pickling" + _test_pickle(oracledb.NUMBER) + + +def test_1525(): + "1525 - test oracledb.STRING pickling" + _test_pickle(oracledb.STRING) + + +def test_1526(): + "1526 - test oracledb.DATETIME pickling" + _test_pickle(oracledb.DATETIME) + + +def test_1527(): + "1527 - test oracledb.BINARY pickling" + _test_pickle(oracledb.BINARY) + + +def test_1528(): + "1528 - test oracledb.ROWID pickling" + _test_pickle(oracledb.ROWID) + + +def test_1529(): + "1529 - test oracledb.DB_TYPE_UROWID comparisons and pickling" + _test_compare(oracledb.DB_TYPE_UROWID, oracledb.ROWID) + _test_pickle(oracledb.DB_TYPE_UROWID) + + +def test_1530(): + "1530 - test oracledb.DB_TYPE_JSON pickling" + _test_pickle(oracledb.DB_TYPE_JSON) + + +def test_1531(): + "1531 - test oracledb.DB_TYPE_INTERVAL_YM pickling" + _test_pickle(oracledb.DB_TYPE_INTERVAL_YM) diff --git a/tests/test_1600_dml_returning.py b/tests/test_1600_dml_returning.py index 0a1e2c6d..47e2b720 100644 --- a/tests/test_1600_dml_returning.py +++ b/tests/test_1600_dml_returning.py @@ -27,595 +27,609 @@ """ import datetime -import unittest import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_1600(self): - "1600 - test insert (single row) with DML returning" - self.cursor.execute("truncate table TestTempTable") - int_val = 5 - str_val = "A test string" - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[int_val]]) - self.assertEqual(str_var.values, [[str_val]]) - - def test_1601(self): - "1601 - test insert (multiple rows) with DML returning" - self.cursor.execute("truncate table TestTempTable") - int_values = [5, 8, 17, 24, 6] - str_values = ["Test 5", "Test 8", "Test 17", "Test 24", "Test 6"] - int_var = self.cursor.var(oracledb.NUMBER, arraysize=len(int_values)) - str_var = self.cursor.var(str, arraysize=len(int_values)) - self.cursor.setinputsizes(None, None, int_var, str_var) - data = list(zip(int_values, str_values)) - self.cursor.executemany( + + +def test_1600(cursor): + "1600 - test insert (single row) with DML returning" + cursor.execute("truncate table TestTempTable") + int_val = 5 + str_val = "A test string" + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[int_val]] + assert str_var.values == [[str_val]] + + +def test_1601(cursor): + "1601 - test insert (multiple rows) with DML returning" + cursor.execute("truncate table TestTempTable") + int_values = [5, 8, 17, 24, 6] + str_values = ["Test 5", "Test 8", "Test 17", "Test 24", "Test 6"] + int_var = cursor.var(oracledb.NUMBER, arraysize=len(int_values)) + str_var = cursor.var(str, arraysize=len(int_values)) + cursor.setinputsizes(None, None, int_var, str_var) + data = list(zip(int_values, str_values)) + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var + """, + data, + ) + assert int_var.values == [[v] for v in int_values] + assert str_var.values == [[v] for v in str_values] + + +def test_1602(cursor, test_env): + "1602 - test insert with DML returning into too small a variable" + cursor.execute("truncate table TestTempTable") + int_val = 6 + str_val = "A different test string" + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str, 2) + parameters = dict( + int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + ) + with test_env.assert_raises_full_code("DPY-4002", "DPI-1037"): + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:int_val, :str_val) returning IntCol, StringCol1 into :int_var, :str_var """, - data, - ) - self.assertEqual(int_var.values, [[v] for v in int_values]) - self.assertEqual(str_var.values, [[v] for v in str_values]) - - def test_1602(self): - "1602 - test insert with DML returning into too small a variable" - self.cursor.execute("truncate table TestTempTable") - int_val = 6 - str_val = "A different test string" - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str, 2) - parameters = dict( - int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var - ) - with self.assertRaisesFullCode("DPY-4002", "DPI-1037"): - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var - """, - parameters, - ) - - def test_1603(self): - "1603 - test update single row with DML returning" - int_val = 7 - str_val = "The updated value of the string" - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (int_val, "The initial value of the string"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - self.cursor.execute( - """ - update TestTempTable set - StringCol1 = :str_val - where IntCol = :int_val - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[int_val]]) - self.assertEqual(str_var.values, [[str_val]]) - - def test_1604(self): - "1604 - test update no rows with DML returning" - int_val = 8 - str_val = "The updated value of the string" - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (int_val, "The initial value of the string"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - self.cursor.execute( - """ - update TestTempTable set - StringCol1 = :str_val - where IntCol = :int_val - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val + 1, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[]]) - self.assertEqual(str_var.values, [[]]) - self.assertEqual(int_var.getvalue(), []) - self.assertEqual(str_var.getvalue(), []) - - def test_1605(self): - "1605 - test update multiple rows with DML returning" - self.cursor.execute("truncate table TestTempTable") - for i in (8, 9, 10): - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) values (:1, :2) - """, - (i, f"The initial value of string {i}"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - self.cursor.execute( - """ - update TestTempTable set - IntCol = IntCol + 15, - StringCol1 = 'The final value of string ' || to_char(IntCol) - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(self.cursor.rowcount, 3) - self.assertEqual(int_var.values, [[23, 24, 25]]) - expected_values = [ - [ - "The final value of string 8", - "The final value of string 9", - "The final value of string 10", - ] - ] - self.assertEqual(str_var.values, expected_values) - - def test_1606(self): - "1606 - test update multiple rows with DML returning (executemany)" - data = [(i, f"The initial value of string {i}") for i in range(1, 11)] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - data, - ) - int_var = self.cursor.var(oracledb.NUMBER, arraysize=3) - str_var = self.cursor.var(str, arraysize=3) - self.cursor.setinputsizes(None, int_var, str_var) - self.cursor.executemany( - """ - update TestTempTable set - IntCol = IntCol + 25, - StringCol1 = 'Updated value of string ' || to_char(IntCol) - where IntCol < :inVal - returning IntCol, StringCol1 into :int_var, :str_var - """, - [[3], [8], [11]], - ) - expected_values = [[26, 27], [28, 29, 30, 31, 32], [33, 34, 35]] - self.assertEqual(int_var.values, expected_values) - expected_values = [ - ["Updated value of string 1", "Updated value of string 2"], - [ - "Updated value of string 3", - "Updated value of string 4", - "Updated value of string 5", - "Updated value of string 6", - "Updated value of string 7", - ], - [ - "Updated value of string 8", - "Updated value of string 9", - "Updated value of string 10", - ], - ] - self.assertEqual(str_var.values, expected_values) - - def test_1607(self): - "1607 - test inserting an object with DML returning" - type_obj = self.conn.gettype("UDT_OBJECT") - string_value = "The string that will be verified" - obj = type_obj.newobject() - obj.STRINGVALUE = string_value - out_var = self.cursor.var( - oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT" - ) - self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (4, :obj)returning ObjectCol into :outObj - """, - obj=obj, - outObj=out_var, - ) - (result,) = out_var.getvalue() - self.assertEqual(result.STRINGVALUE, string_value) - self.conn.rollback() - - def test_1608(self): - "1608 - test inserting a row and returning a rowid" - self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(oracledb.ROWID) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (278, 'String 278') - returning rowid into :1 - """, - [var], - ) - (rowid,) = var.getvalue() - self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - where rowid = :1 - """, - [rowid], - ) - self.assertEqual(self.cursor.fetchall(), [(278, "String 278")]) - - def test_1609(self): - "1609 - test inserting with a REF cursor and returning a rowid" - self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(oracledb.ROWID) - in_cursor = self.conn.cursor() - in_cursor.execute( - """ - select StringCol - from TestStrings - where IntCol >= 5 - order by IntCol - """ - ) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (187, pkg_TestRefCursors.TestInCursor(:1)) - returning rowid into :2 - """, - (in_cursor, var), - ) - (rowid,) = var.getvalue() - self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - where rowid = :1 - """, - [rowid], - ) - self.assertEqual( - self.cursor.fetchall(), [(187, "String 7 (Modified)")] + parameters, ) - def test_1610(self): - "1610 - test delete returning decreasing number of rows" - data = [(i, f"Test String {i}") for i in range(1, 11)] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - results = [] - int_var = self.cursor.var(int) - self.cursor.setinputsizes(None, int_var) - for int_val in (5, 8, 10): - self.cursor.execute( - """ - delete from TestTempTable - where IntCol < :1 - returning IntCol into :2 - """, - [int_val], - ) - results.append(int_var.getvalue()) - self.assertEqual(results, [[1, 2, 3, 4], [5, 6, 7], [8, 9]]) - - def test_1611(self): - "1611 - test delete returning no rows after returning many rows" - data = [(i, f"Test String {i}") for i in range(1, 11)] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( + +def test_1603(cursor): + "1603 - test update single row with DML returning" + int_val = 7 + str_val = "The updated value of the string" + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (int_val, "The initial value of the string"), + ) + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str) + cursor.execute( + """ + update TestTempTable set + StringCol1 = :str_val + where IntCol = :int_val + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[int_val]] + assert str_var.values == [[str_val]] + + +def test_1604(cursor): + "1604 - test update no rows with DML returning" + int_val = 8 + str_val = "The updated value of the string" + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (int_val, "The initial value of the string"), + ) + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str) + cursor.execute( + """ + update TestTempTable set + StringCol1 = :str_val + where IntCol = :int_val + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val + 1, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[]] + assert str_var.values == [[]] + assert int_var.getvalue() == [] + assert str_var.getvalue() == [] + + +def test_1605(cursor): + "1605 - test update multiple rows with DML returning" + cursor.execute("truncate table TestTempTable") + for i in (8, 9, 10): + cursor.execute( """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) + insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - data, + (i, f"The initial value of string {i}"), ) - int_var = self.cursor.var(int) - self.cursor.execute( + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str) + cursor.execute( + """ + update TestTempTable set + IntCol = IntCol + 15, + StringCol1 = 'The final value of string ' || to_char(IntCol) + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_var=int_var, + str_var=str_var, + ) + assert cursor.rowcount == 3 + assert int_var.values == [[23, 24, 25]] + expected_values = [ + [ + "The final value of string 8", + "The final value of string 9", + "The final value of string 10", + ] + ] + assert str_var.values == expected_values + + +def test_1606(cursor): + "1606 - test update multiple rows with DML returning (executemany)" + data = [(i, f"The initial value of string {i}") for i in range(1, 11)] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + data, + ) + int_var = cursor.var(oracledb.NUMBER, arraysize=3) + str_var = cursor.var(str, arraysize=3) + cursor.setinputsizes(None, int_var, str_var) + cursor.executemany( + """ + update TestTempTable set + IntCol = IntCol + 25, + StringCol1 = 'Updated value of string ' || to_char(IntCol) + where IntCol < :inVal + returning IntCol, StringCol1 into :int_var, :str_var + """, + [[3], [8], [11]], + ) + expected_values = [[26, 27], [28, 29, 30, 31, 32], [33, 34, 35]] + assert int_var.values == expected_values + expected_values = [ + ["Updated value of string 1", "Updated value of string 2"], + [ + "Updated value of string 3", + "Updated value of string 4", + "Updated value of string 5", + "Updated value of string 6", + "Updated value of string 7", + ], + [ + "Updated value of string 8", + "Updated value of string 9", + "Updated value of string 10", + ], + ] + assert str_var.values == expected_values + + +def test_1607(conn, cursor): + "1607 - test inserting an object with DML returning" + type_obj = conn.gettype("UDT_OBJECT") + string_value = "The string that will be verified" + obj = type_obj.newobject() + obj.STRINGVALUE = string_value + out_var = cursor.var(oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT") + cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (4, :obj)returning ObjectCol into :outObj + """, + obj=obj, + outObj=out_var, + ) + (result,) = out_var.getvalue() + assert result.STRINGVALUE == string_value + conn.rollback() + + +def test_1608(cursor): + "1608 - test inserting a row and returning a rowid" + cursor.execute("truncate table TestTempTable") + var = cursor.var(oracledb.ROWID) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (278, 'String 278') + returning rowid into :1 + """, + [var], + ) + (rowid,) = var.getvalue() + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + where rowid = :1 + """, + [rowid], + ) + assert cursor.fetchall() == [(278, "String 278")] + + +def test_1609(conn, cursor): + "1609 - test inserting with a REF cursor and returning a rowid" + cursor.execute("truncate table TestTempTable") + var = cursor.var(oracledb.ROWID) + in_cursor = conn.cursor() + in_cursor.execute( + """ + select StringCol + from TestStrings + where IntCol >= 5 + order by IntCol + """ + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (187, pkg_TestRefCursors.TestInCursor(:1)) + returning rowid into :2 + """, + (in_cursor, var), + ) + (rowid,) = var.getvalue() + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + where rowid = :1 + """, + [rowid], + ) + assert cursor.fetchall() == [(187, "String 7 (Modified)")] + + +def test_1610(cursor): + "1610 - test delete returning decreasing number of rows" + data = [(i, f"Test String {i}") for i in range(1, 11)] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + results = [] + int_var = cursor.var(int) + cursor.setinputsizes(None, int_var) + for int_val in (5, 8, 10): + cursor.execute( """ delete from TestTempTable where IntCol < :1 returning IntCol into :2 """, - [5, int_var], - ) - self.assertEqual(int_var.getvalue(), [1, 2, 3, 4]) - self.cursor.execute(None, [4, int_var]) - self.assertEqual(int_var.getvalue(), []) - - def test_1612(self): - "1612 - test DML returning when an error occurs" - self.cursor.execute("truncate table TestTempTable") - int_val = 7 - str_val = "A" * 401 - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var""" - parameters = dict( - int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + [int_val], ) - with self.assertRaisesFullCode("ORA-12899"): - self.cursor.execute(sql, parameters) - - def test_1613(self): - "1613 - test DML returning with no input variables, multiple iters" - self.cursor.execute("truncate table TestTempTable") - sql = """ - insert into TestTempTable (IntCol) - values ((select count(*) + 1 from TestTempTable)) - returning IntCol into :1""" - var = self.cursor.var(int) - self.cursor.execute(sql, [var]) - self.assertEqual(var.getvalue(), [1]) - self.cursor.execute(sql, [var]) - self.assertEqual(var.getvalue(), [2]) - - def test_1614(self): - "1614 - test DML returning with a quoted bind name" - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :"_val1" , :"VaL_2" """ - self.cursor.parse(sql) - expected_bind_names = ["INT_VAL", "STR_VAL", "_val1", "VaL_2"] - self.assertEqual(self.cursor.bindnames(), expected_bind_names) - - def test_1615(self): - "1615 - test DML returning with an invalid bind name" - sql = """ - insert into TestTempTable (IntCol) - values (:int_val) - returning IntCol, StringCol1 into :ROWID""" - with self.assertRaisesFullCode("ORA-01745"): - self.cursor.parse(sql) - - def test_1616(self): - "1616 - test DML returning with a non-ascii bind name" - sql = """ - insert into TestTempTable (IntCol) - values (:int_val) - returning IntCol, StringCol1 into :méil""" - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["INT_VAL", "MÉIL"]) - - def test_1617(self): - "1617 - test DML returning with input bind variable data" - self.cursor.execute("truncate table TestTempTable") - out_var = self.cursor.var(int) - self.cursor.execute( - """ + results.append(int_var.getvalue()) + assert results == [[1, 2, 3, 4], [5, 6, 7], [8, 9]] + + +def test_1611(cursor): + "1611 - test delete returning no rows after returning many rows" + data = [(i, f"Test String {i}") for i in range(1, 11)] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + int_var = cursor.var(int) + cursor.execute( + """ + delete from TestTempTable + where IntCol < :1 + returning IntCol into :2 + """, + [5, int_var], + ) + assert int_var.getvalue() == [1, 2, 3, 4] + cursor.execute(None, [4, int_var]) + assert int_var.getvalue() == [] + + +def test_1612(cursor, test_env): + "1612 - test DML returning when an error occurs" + cursor.execute("truncate table TestTempTable") + int_val = 7 + str_val = "A" * 401 + int_var = cursor.var(oracledb.NUMBER) + str_var = cursor.var(str) + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var""" + parameters = dict( + int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + ) + with test_env.assert_raises_full_code("ORA-12899"): + cursor.execute(sql, parameters) + + +def test_1613(cursor): + "1613 - test DML returning with no input variables, multiple iters" + cursor.execute("truncate table TestTempTable") + sql = """ insert into TestTempTable (IntCol) - values (:int_val) - returning IntCol + :add_val into :out_val - """, - int_val=5, - add_val=18, - out_val=out_var, - ) - self.conn.commit() - self.assertEqual(out_var.getvalue(), [23]) - - def test_1618(self): - "1618 - test DML returning with LOBs and an output converter" - self.cursor.execute("delete from TestCLOBs") - out_var = self.cursor.var( - oracledb.DB_TYPE_CLOB, outconverter=lambda value: value.read() - ) - lob_value = "A short CLOB - 1618" - self.cursor.execute( - """ - insert into TestCLOBs (IntCol, ClobCol) - values (1, :in_val) - returning CLOBCol into :out_val - """, - in_val=lob_value, - out_val=out_var, - ) - self.conn.commit() - self.assertEqual(out_var.getvalue(), [lob_value]) - - def test_1619(self): - "1619 - test DML returning with CLOB converted to LONG" - self.cursor.execute("delete from TestCLOBs") - out_var = self.cursor.var(oracledb.DB_TYPE_LONG) - lob_value = "A short CLOB - 1619" - self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, :in_val) - returning CLOBCol into :out_val - """, - in_val=lob_value, - out_val=out_var, - ) - self.conn.commit() - self.assertEqual(out_var.getvalue(), [lob_value]) - - def test_1620(self): - "1620 - test dml returning with an index organized table" - self.cursor.execute("truncate table TestUniversalRowids") - rowid_var = self.cursor.var(oracledb.ROWID) - data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) - sql = """ - insert into TestUniversalRowids values (:1, :2, :3) - returning rowid into :4""" - self.cursor.execute(sql, data) - (rowid_value,) = rowid_var.getvalue() - self.cursor.execute( - """ - select * - from TestUniversalRowids - where rowid = :1 - """, - [rowid_value], - ) - (row,) = self.cursor.fetchall() - self.assertEqual(row, data[:3]) - - def test_1621(self): - "1621 - test plsql returning rowids with index organized table" - self.cursor.execute("truncate table TestUniversalRowids") - rowid_var = self.cursor.var(oracledb.ROWID) - data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) - self.cursor.execute( - """ - begin - insert into TestUniversalRowids values (:1, :2, :3) - returning rowid into :4; - end; - """, - data, - ) - rowid_value = rowid_var.values[0] - self.cursor.execute( - """ - select * - from TestUniversalRowids - where rowid = :1 - """, - [rowid_value], - ) - (row,) = self.cursor.fetchall() - self.assertEqual(row, data[:3]) - - def test_1622(self): - "1622 - parse DML returning with no spaces" - self.cursor.execute("truncate table TestTempTable") - sql = ( - "insert into TestTempTable (IntCol) values (:in_val)" - "returning(IntCol)into :out_val" - ) - out_val = self.cursor.var(int, arraysize=5) - self.cursor.execute(sql, in_val=25, out_val=out_val) - self.assertEqual(out_val.getvalue(), [25]) - - @test_env.skip_unless_thin_mode() - def test_1623(self): - "1623 - execute DML returning with duplicated binds" - self.cursor.execute("truncate table TestTempTable") - str_val = self.cursor.var(str) - str_val.setvalue(0, "Test Data") - sql = """ + values ((select count(*) + 1 from TestTempTable)) + returning IntCol into :1""" + var = cursor.var(int) + cursor.execute(sql, [var]) + assert var.getvalue() == [1] + cursor.execute(sql, [var]) + assert var.getvalue() == [2] + + +def test_1614(cursor): + "1614 - test DML returning with a quoted bind name" + sql = """ insert into TestTempTable (IntCol, StringCol1) - values (:id_val, :str_val || ' (Additional String)') - returning StringCol1 into :str_val - """ - with self.assertRaisesFullCode("DPY-2048"): - self.cursor.execute(sql, id_val=1, str_val=str_val) - - def test_1624(self): - "1624 - use bind variable in new statement after RETURNING statement" - self.cursor.execute("truncate table TestTempTable") - sql = ( - "insert into TestTempTable (IntCol) values (:in_val)" - "returning IntCol + 15 into :out_val" - ) - out_val = self.cursor.var(int, arraysize=5) - self.cursor.execute(sql, in_val=25, out_val=out_val) - self.assertEqual(out_val.getvalue(), [40]) - sql = "begin :out_val := :in_val + 35; end;" - self.cursor.execute(sql, in_val=35, out_val=out_val) - self.assertEqual(out_val.getvalue(), 70) - - def test_1625(self): - "1625 - test DML returning with multiple LOBs returned" - lob_data = [ - "Short CLOB - 1625a", - "Short CLOB - 1625b", - "Short CLOB - 1625c", - "Short CLOB - 1625d", - ] - all_data = [(i + 1, d) for i, d in enumerate(lob_data)] - self.cursor.execute("delete from TestCLOBs") - self.cursor.executemany( - "insert into TestCLOBs (IntCol, ClobCol) values (:1, :2)", all_data - ) - ret_val = self.cursor.var(oracledb.DB_TYPE_CLOB) - self.cursor.execute( - """ - update TestCLOBs set - ExtraNumCol1 = 1 - where ExtraNumCol1 is null - returning ClobCol into :ret_val - """, - [ret_val], - ) - self.conn.commit() - ret_lob_data = [v.read() for v in ret_val.getvalue()] - ret_lob_data.sort() - self.assertEqual(ret_lob_data, lob_data) - - @unittest.skipIf(test_env.run_in_thick_mode(), "blocked by bug 37741324") - def test_1626(self): - "1626 - test DML returning with multiple DbObjects returned" - arrays = [ - (1626, 1627, 1628), - (1629, 1630, 1631), - (1632, 1633, 1634), - (1635, 1636, 1637), - ] - all_data = [(i + 4, v[0], v[1], v[2]) for i, v in enumerate(arrays)] - self.cursor.execute("delete from TestObjects where IntCol > 3") - self.cursor.executemany( - """ - insert into TestObjects (IntCol, ArrayCol) - values (:1, udt_Array(:1, :2, :3)) - """, - all_data, - ) - typ = self.conn.gettype("UDT_ARRAY") - ret_val = self.cursor.var(typ) - self.cursor.execute( - """ - update TestObjects set - ObjectCol = null - where IntCol > 3 - returning ArrayCol into :ret_val - """, - [ret_val], - ) - self.conn.commit() - ret_obj_data = [tuple(v) for v in ret_val.getvalue()] - ret_obj_data.sort() - self.assertEqual(ret_obj_data, arrays) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :"_val1" , :"VaL_2" """ + cursor.parse(sql) + expected_bind_names = ["INT_VAL", "STR_VAL", "_val1", "VaL_2"] + assert cursor.bindnames() == expected_bind_names -if __name__ == "__main__": - test_env.run_test_cases() +def test_1615(cursor, test_env): + "1615 - test DML returning with an invalid bind name" + sql = """ + insert into TestTempTable (IntCol) + values (:int_val) + returning IntCol, StringCol1 into :ROWID""" + with test_env.assert_raises_full_code("ORA-01745"): + cursor.parse(sql) + + +def test_1616(cursor): + "1616 - test DML returning with a non-ascii bind name" + sql = """ + insert into TestTempTable (IntCol) + values (:int_val) + returning IntCol, StringCol1 into :méil""" + cursor.prepare(sql) + assert cursor.bindnames() == ["INT_VAL", "MÉIL"] + + +def test_1617(conn, cursor): + "1617 - test DML returning with input bind variable data" + cursor.execute("truncate table TestTempTable") + out_var = cursor.var(int) + cursor.execute( + """ + insert into TestTempTable (IntCol) + values (:int_val) + returning IntCol + :add_val into :out_val + """, + int_val=5, + add_val=18, + out_val=out_var, + ) + conn.commit() + assert out_var.getvalue() == [23] + + +def test_1618(conn, cursor): + "1618 - test DML returning with LOBs and an output converter" + cursor.execute("delete from TestCLOBs") + out_var = cursor.var( + oracledb.DB_TYPE_CLOB, outconverter=lambda value: value.read() + ) + lob_value = "A short CLOB - 1618" + cursor.execute( + """ + insert into TestCLOBs (IntCol, ClobCol) + values (1, :in_val) + returning CLOBCol into :out_val + """, + in_val=lob_value, + out_val=out_var, + ) + conn.commit() + assert out_var.getvalue() == [lob_value] + + +def test_1619(conn, cursor): + "1619 - test DML returning with CLOB converted to LONG" + cursor.execute("delete from TestCLOBs") + out_var = cursor.var(oracledb.DB_TYPE_LONG) + lob_value = "A short CLOB - 1619" + cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, :in_val) + returning CLOBCol into :out_val + """, + in_val=lob_value, + out_val=out_var, + ) + conn.commit() + assert out_var.getvalue() == [lob_value] + + +def test_1620(cursor): + "1620 - test dml returning with an index organized table" + cursor.execute("truncate table TestUniversalRowids") + rowid_var = cursor.var(oracledb.ROWID) + data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) + sql = """ + insert into TestUniversalRowids values (:1, :2, :3) + returning rowid into :4""" + cursor.execute(sql, data) + (rowid_value,) = rowid_var.getvalue() + cursor.execute( + """ + select * + from TestUniversalRowids + where rowid = :1 + """, + [rowid_value], + ) + (row,) = cursor.fetchall() + assert row == data[:3] + + +def test_1621(cursor): + "1621 - test plsql returning rowids with index organized table" + cursor.execute("truncate table TestUniversalRowids") + rowid_var = cursor.var(oracledb.ROWID) + data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) + cursor.execute( + """ + begin + insert into TestUniversalRowids values (:1, :2, :3) + returning rowid into :4; + end; + """, + data, + ) + rowid_value = rowid_var.values[0] + cursor.execute( + """ + select * + from TestUniversalRowids + where rowid = :1 + """, + [rowid_value], + ) + (row,) = cursor.fetchall() + assert row == data[:3] + + +def test_1622(cursor): + "1622 - parse DML returning with no spaces" + cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning(IntCol)into :out_val" + ) + out_val = cursor.var(int, arraysize=5) + cursor.execute(sql, in_val=25, out_val=out_val) + assert out_val.getvalue() == [25] + + +def test_1623(skip_unless_thin_mode, cursor, test_env): + "1623 - execute DML returning with duplicated binds" + cursor.execute("truncate table TestTempTable") + str_val = cursor.var(str) + str_val.setvalue(0, "Test Data") + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:id_val, :str_val || ' (Additional String)') + returning StringCol1 into :str_val + """ + with test_env.assert_raises_full_code("DPY-2048"): + cursor.execute(sql, id_val=1, str_val=str_val) + + +def test_1624(cursor): + "1624 - use bind variable in new statement after RETURNING statement" + cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning IntCol + 15 into :out_val" + ) + out_val = cursor.var(int, arraysize=5) + cursor.execute(sql, in_val=25, out_val=out_val) + assert out_val.getvalue() == [40] + sql = "begin :out_val := :in_val + 35; end;" + cursor.execute(sql, in_val=35, out_val=out_val) + assert out_val.getvalue() == 70 + + +def test_1625(conn, cursor): + "1625 - test DML returning with multiple LOBs returned" + lob_data = [ + "Short CLOB - 1625a", + "Short CLOB - 1625b", + "Short CLOB - 1625c", + "Short CLOB - 1625d", + ] + all_data = [(i + 1, d) for i, d in enumerate(lob_data)] + cursor.execute("delete from TestCLOBs") + cursor.executemany( + "insert into TestCLOBs (IntCol, ClobCol) values (:1, :2)", all_data + ) + ret_val = cursor.var(oracledb.DB_TYPE_CLOB) + cursor.execute( + """ + update TestCLOBs set + ExtraNumCol1 = 1 + where ExtraNumCol1 is null + returning ClobCol into :ret_val + """, + [ret_val], + ) + conn.commit() + ret_lob_data = [v.read() for v in ret_val.getvalue()] + ret_lob_data.sort() + assert ret_lob_data == lob_data + + +# blocked by bug 37741324 in thick mode +def test_1626(skip_unless_thin_mode, conn, cursor): + "1626 - test DML returning with multiple DbObjects returned" + arrays = [ + (1626, 1627, 1628), + (1629, 1630, 1631), + (1632, 1633, 1634), + (1635, 1636, 1637), + ] + all_data = [(i + 4, v[0], v[1], v[2]) for i, v in enumerate(arrays)] + cursor.execute("delete from TestObjects where IntCol > 3") + cursor.executemany( + """ + insert into TestObjects (IntCol, ArrayCol) + values (:1, udt_Array(:1, :2, :3)) + """, + all_data, + ) + typ = conn.gettype("UDT_ARRAY") + ret_val = cursor.var(typ) + cursor.execute( + """ + update TestObjects set + ObjectCol = null + where IntCol > 3 + returning ArrayCol into :ret_val + """, + [ret_val], + ) + conn.commit() + ret_obj_data = [tuple(v) for v in ret_val.getvalue()] + ret_obj_data.sort() + assert ret_obj_data == arrays diff --git a/tests/test_1700_error.py b/tests/test_1700_error.py index 4791aa72..aea7873a 100644 --- a/tests/test_1700_error.py +++ b/tests/test_1700_error.py @@ -26,191 +26,194 @@ 1700 - Module for testing error objects """ +import pytest import pickle -import unittest import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_1700(self): - "1700 - test parse error returns offset correctly" - with self.assertRaises(oracledb.Error) as cm: - self.cursor.execute("begin t_Missing := 5; end;") - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.full_code, "ORA-06550") - self.assertEqual(error_obj.offset, 6) - - def test_1701(self): - "1701 - test picking/unpickling an error object" - with self.assertRaises(oracledb.Error) as cm: - self.cursor.execute( - """ - begin - raise_application_error(-20101, 'Test!'); - end; - """ - ) - (error_obj,) = cm.exception.args - self.assertIsInstance(error_obj, oracledb._Error) - self.assertIn("Test!", error_obj.message) - self.assertEqual(error_obj.code, 20101) - self.assertEqual(error_obj.offset, 0) - self.assertIsInstance(error_obj.isrecoverable, bool) - self.assertFalse(error_obj.isrecoverable) - new_error_obj = pickle.loads(pickle.dumps(error_obj)) - self.assertIsInstance(new_error_obj, oracledb._Error) - self.assertEqual(new_error_obj.message, error_obj.message) - self.assertEqual(new_error_obj.code, error_obj.code) - self.assertEqual(new_error_obj.offset, error_obj.offset) - self.assertEqual(new_error_obj.context, error_obj.context) - self.assertEqual(new_error_obj.isrecoverable, error_obj.isrecoverable) - - def test_1702(self): - "1702 - test generation of full_code for ORA, DPI and DPY errors" - cursor = self.conn.cursor() - with self.assertRaises(oracledb.Error) as cm: - cursor.execute(None) - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.full_code, "DPY-2001") - if not self.conn.thin: - with self.assertRaises(oracledb.Error) as cm: - cursor.execute("truncate table TestTempTable") - int_var = cursor.var(int) - str_var = cursor.var(str, 2) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Longer than two chars') - returning IntCol, StringCol1 - into :int_var, :str_var - """, - int_var=int_var, - str_var=str_var, - ) - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.full_code, "DPI-1037") - - @unittest.skipUnless(test_env.has_client_version(23), "unsupported client") - def test_1703(self): - "1703 - test generation of error help portal URL" - cursor = self.conn.cursor() - with self.assertRaises(oracledb.Error) as cm: - cursor.execute("select 1 / 0 from dual") - (error_obj,) = cm.exception.args - to_check = "Help: https://docs.oracle.com/error-help/db/ora-01476/" - self.assertIn(to_check, error_obj.message) - - def test_1704(self): - "1704 - verify warning is generated when creating a procedure" - proc_name = "bad_proc_1704" - self.assertIsNone(self.cursor.warning) - self.cursor.execute( - f""" - create or replace procedure {proc_name} as - begin - null - end; - """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - self.cursor.execute( - f""" - create or replace procedure {proc_name} as - begin - null; - end; - """ - ) - self.assertIsNone(self.cursor.warning) - self.cursor.execute(f"drop procedure {proc_name}") - - def test_1705(self): - "1705 - verify warning is generated when creating a function" - func_name = "bad_func_1705" - self.cursor.execute( - f""" - create or replace function {func_name} - return number as - begin - return null - end; - """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - self.cursor.execute(f"drop function {func_name}") - self.assertIsNone(self.cursor.warning) - - def test_1706(self): - "1706 - verify warning is generated when creating a type" - type_name = "bad_type_1706" - self.cursor.execute( - f""" - create or replace type {type_name} as object ( - x bad_type - ); + + +def test_1700(cursor): + "1700 - test parse error returns offset correctly" + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute("begin t_Missing := 5; end;") + (error_obj,) = excinfo.value.args + assert error_obj.full_code == "ORA-06550" + assert error_obj.offset == 6 + + +def test_1701(cursor): + "1701 - test picking/unpickling an error object" + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute( """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - self.cursor.execute(f"drop type {type_name}") - self.assertIsNone(self.cursor.warning) - - def test_1707(self): - "1707 - verify warning is generated with executemany()" - proc_name = "bad_proc_1707" - self.assertIsNone(self.cursor.warning) - self.cursor.executemany( - f""" - create or replace procedure {proc_name} as begin - null - end; - """, - 1, - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - self.cursor.execute( - f""" - create or replace procedure {proc_name} as - begin - null; + raise_application_error(-20101, 'Test!'); end; """ ) - self.assertIsNone(self.cursor.warning) - self.cursor.execute(f"drop procedure {proc_name}") - - def test_1708(self): - "1708 - user defined errors do not generate error help portal URL" - for code in (20000, 20500, 20999): - with self.assertRaises(oracledb.Error) as cm: - self.cursor.execute( - f""" - begin - raise_application_error(-{code}, 'User defined error'); - end; - """ - ) - error_obj = cm.exception.args[0] - self.assertEqual(error_obj.code, code) - self.assertEqual(error_obj.full_code, f"ORA-{code}") - self.assertTrue("Help:" not in error_obj.message) - - @test_env.skip_if_drcp() - def test_1709(self): - "1709 - error from killed connection is deemed recoverable" - admin_conn = test_env.get_admin_connection() - conn = test_env.get_connection() - sid, serial = self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - sql = f"alter system kill session '{sid},{serial}'" - admin_cursor.execute(sql) - with self.assertRaisesFullCode("DPY-4011") as cm: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - self.assertTrue(cm.error_obj.isrecoverable) - - -if __name__ == "__main__": - test_env.run_test_cases() + (error_obj,) = excinfo.value.args + assert isinstance(error_obj, oracledb._Error) + assert "Test!" in error_obj.message + assert error_obj.code == 20101 + assert error_obj.offset == 0 + assert isinstance(error_obj.isrecoverable, bool) + assert not error_obj.isrecoverable + new_error_obj = pickle.loads(pickle.dumps(error_obj)) + assert isinstance(new_error_obj, oracledb._Error) + assert new_error_obj.message == error_obj.message + assert new_error_obj.code == error_obj.code + assert new_error_obj.offset == error_obj.offset + assert new_error_obj.context == error_obj.context + assert new_error_obj.isrecoverable == error_obj.isrecoverable + + +def test_1702(conn): + "1702 - test generation of full_code for ORA, DPI and DPY errors" + cursor = conn.cursor() + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute(None) + (error_obj,) = excinfo.value.args + assert error_obj.full_code == "DPY-2001" + if not conn.thin: + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute("truncate table TestTempTable") + int_var = cursor.var(int) + str_var = cursor.var(str, 2) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Longer than two chars') + returning IntCol, StringCol1 + into :int_var, :str_var + """, + int_var=int_var, + str_var=str_var, + ) + (error_obj,) = excinfo.value.args + assert error_obj.full_code == "DPI-1037" + + +def test_1703(conn, test_env): + "1703 - test generation of error help portal URL" + if not test_env.has_client_version(23): + pytest.skip("unsupported client") + cursor = conn.cursor() + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute("select 1 / 0 from dual") + (error_obj,) = excinfo.value.args + to_check = "Help: https://docs.oracle.com/error-help/db/ora-01476/" + assert to_check in error_obj.message + + +def test_1704(cursor): + "1704 - verify warning is generated when creating a procedure" + proc_name = "bad_proc_1704" + assert cursor.warning is None + cursor.execute( + f""" + create or replace procedure {proc_name} as + begin + null + end; + """ + ) + assert cursor.warning.full_code == "DPY-7000" + cursor.execute( + f""" + create or replace procedure {proc_name} as + begin + null; + end; + """ + ) + assert cursor.warning is None + cursor.execute(f"drop procedure {proc_name}") + + +def test_1705(cursor): + "1705 - verify warning is generated when creating a function" + func_name = "bad_func_1705" + cursor.execute( + f""" + create or replace function {func_name} + return number as + begin + return null + end; + """ + ) + assert cursor.warning.full_code == "DPY-7000" + cursor.execute(f"drop function {func_name}") + assert cursor.warning is None + + +def test_1706(cursor): + "1706 - verify warning is generated when creating a type" + type_name = "bad_type_1706" + cursor.execute( + f""" + create or replace type {type_name} as object ( + x bad_type + ); + """ + ) + assert cursor.warning.full_code == "DPY-7000" + cursor.execute(f"drop type {type_name}") + assert cursor.warning is None + + +def test_1707(cursor): + "1707 - verify warning is generated with executemany()" + proc_name = "bad_proc_1707" + assert cursor.warning is None + cursor.executemany( + f""" + create or replace procedure {proc_name} as + begin + null + end; + """, + 1, + ) + assert cursor.warning.full_code == "DPY-7000" + cursor.execute( + f""" + create or replace procedure {proc_name} as + begin + null; + end; + """ + ) + assert cursor.warning is None + cursor.execute(f"drop procedure {proc_name}") + + +def test_1708(cursor): + "1708 - user defined errors do not generate error help portal URL" + for code in (20000, 20500, 20999): + with pytest.raises(oracledb.Error) as excinfo: + cursor.execute( + f""" + begin + raise_application_error(-{code}, 'User defined error'); + end; + """ + ) + error_obj = excinfo.value.args[0] + assert error_obj.code == code + assert error_obj.full_code == f"ORA-{code}" + assert "Help:" not in error_obj.message + + +def test_1709(skip_if_drcp, conn, test_env): + "1709 - error from killed connection is deemed recoverable" + admin_conn = test_env.get_admin_connection() + conn = test_env.get_connection() + sid, serial = test_env.get_sid_serial(conn) + with admin_conn.cursor() as admin_cursor: + sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(sql) + with test_env.assert_raises_full_code("DPY-4011") as excinfo: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + assert excinfo.error_obj.isrecoverable diff --git a/tests/test_1800_interval_var.py b/tests/test_1800_interval_var.py index 1a9b38a2..4b0eded1 100644 --- a/tests/test_1800_interval_var.py +++ b/tests/test_1800_interval_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,230 +29,238 @@ import datetime import oracledb -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - delta = datetime.timedelta( - days=i, hours=i, minutes=i * 2, seconds=i * 3 - ) - if i % 2 == 0: - nullable_delta = None - else: - nullable_delta = datetime.timedelta( - days=i + 5, - hours=i + 2, - minutes=i * 2 + 5, - seconds=i * 3 + 5, - ) - precision_col = datetime.timedelta( - days=8, - hours=5, - minutes=15, - seconds=0, - ) - precision_scale_col = datetime.timedelta( - days=10, - hours=12, - minutes=15, - seconds=15, - ) - data_tuple = ( - i, - delta, - nullable_delta, - precision_col, - precision_scale_col, - ) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_1800(self): - "1800 - test binding in an interval" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) - value = datetime.timedelta(days=5, hours=5, minutes=10, seconds=15) - self.cursor.execute( - "select * from TestIntervals where IntervalCol = :value", - value=value, +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + delta = datetime.timedelta( + days=i, hours=i, minutes=i * 2, seconds=i * 3 ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_1801(self): - "1801 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) - self.cursor.execute( - "select * from TestIntervals where IntervalCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_1802(self): - "1802 - test binding out with set input sizes defined" - bind_vars = self.cursor.setinputsizes( - value=oracledb.DB_TYPE_INTERVAL_DS + if i % 2 == 0: + nullable_delta = None + else: + nullable_delta = datetime.timedelta( + days=i + 5, + hours=i + 2, + minutes=i * 2 + 5, + seconds=i * 3 + 5, + ) + precision_col = datetime.timedelta( + days=8, + hours=5, + minutes=15, + seconds=0, ) - self.cursor.execute( - """ - begin - :value := to_dsinterval('8 09:24:18.123789'); - end; - """ + precision_scale_col = datetime.timedelta( + days=10, + hours=12, + minutes=15, + seconds=15, ) - expected_value = datetime.timedelta( - days=8, hours=9, minutes=24, seconds=18, microseconds=123789 + data_tuple = ( + i, + delta, + nullable_delta, + precision_col, + precision_scale_col, ) - self.assertEqual(bind_vars["value"].getvalue(), expected_value) + data.append(data_tuple) + return data - def test_1803(self): - "1803 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes( - value=oracledb.DB_TYPE_INTERVAL_DS - ) - self.cursor.execute( - """ - begin - :value := :value + to_dsinterval('5 08:30:00'); - end; - """, - value=datetime.timedelta(days=5, hours=2, minutes=15), - ) - expected_value = datetime.timedelta(days=10, hours=10, minutes=45) - self.assertEqual(bind_vars["value"].getvalue(), expected_value) - def test_1804(self): - "1804 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes( - value=oracledb.DB_TYPE_INTERVAL_DS - ) - self.cursor.execute( - """ - begin - :value := :value + to_dsinterval('5 08:30:00'); - end; - """, - value=datetime.timedelta(days=5, seconds=12.123789), - ) - expected_value = datetime.timedelta( - days=10, hours=8, minutes=30, seconds=12, microseconds=123789 - ) - self.assertEqual(bind_vars["value"].getvalue(), expected_value) +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key - def test_1805(self): - "1805 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_INTERVAL_DS) - self.cursor.execute( - """ - begin - :value := to_dsinterval('15 18:35:45.586'); - end; - """, - value=var, - ) - expected_value = datetime.timedelta( - days=15, hours=18, minutes=35, seconds=45, milliseconds=586 - ) - self.assertEqual(var.getvalue(), expected_value) - def test_1806(self): - "1806 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_INTERVAL_DS) - var.setvalue(0, datetime.timedelta(days=1, minutes=50)) - self.cursor.execute( - """ - begin - :value := :value + to_dsinterval('8 05:15:00'); - end; - """, - value=var, - ) - expected_value = datetime.timedelta(days=9, hours=6, minutes=5) - self.assertEqual(var.getvalue(), expected_value) +def test_1800(cursor, module_data_by_key): + "1800 - test binding in an interval" + cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) + value = datetime.timedelta(days=5, hours=5, minutes=10, seconds=15) + cursor.execute( + "select * from TestIntervals where IntervalCol = :value", + value=value, + ) + assert cursor.fetchall() == [module_data_by_key[5]] - def test_1807(self): - "1807 - test cursor description is accurate" - self.cursor.execute("select * from TestIntervals") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "INTERVALCOL", - oracledb.DB_TYPE_INTERVAL_DS, - None, - None, - 2, - 6, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_INTERVAL_DS, - None, - None, - 2, - 6, - True, - ), - ( - "INTERVALPRECISIONCOL", - oracledb.DB_TYPE_INTERVAL_DS, - None, - None, - 7, - 6, - True, - ), - ( - "INTERVALPRECISIONSCALECOL", - oracledb.DB_TYPE_INTERVAL_DS, - None, - None, - 8, - 9, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - def test_1808(self): - "1808 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestIntervals order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) +def test_1801(cursor): + "1801 - test binding in a null" + cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) + cursor.execute( + "select * from TestIntervals where IntervalCol = :value", + value=None, + ) + assert cursor.fetchall() == [] - def test_1809(self): - "1809 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestIntervals order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - def test_1810(self): - "1810 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestIntervals - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) +def test_1802(cursor): + "1802 - test binding out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) + cursor.execute( + """ + begin + :value := to_dsinterval('8 09:24:18.123789'); + end; + """ + ) + expected_value = datetime.timedelta( + days=8, hours=9, minutes=24, seconds=18, microseconds=123789 + ) + assert bind_vars["value"].getvalue() == expected_value + + +def test_1803(cursor): + "1803 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) + cursor.execute( + """ + begin + :value := :value + to_dsinterval('5 08:30:00'); + end; + """, + value=datetime.timedelta(days=5, hours=2, minutes=15), + ) + expected_value = datetime.timedelta(days=10, hours=10, minutes=45) + assert bind_vars["value"].getvalue() == expected_value + + +def test_1804(cursor): + "1804 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_DS) + cursor.execute( + """ + begin + :value := :value + to_dsinterval('5 08:30:00'); + end; + """, + value=datetime.timedelta(days=5, seconds=12.123789), + ) + expected_value = datetime.timedelta( + days=10, hours=8, minutes=30, seconds=12, microseconds=123789 + ) + assert bind_vars["value"].getvalue() == expected_value + + +def test_1805(cursor): + "1805 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_INTERVAL_DS) + cursor.execute( + """ + begin + :value := to_dsinterval('15 18:35:45.586'); + end; + """, + value=var, + ) + expected_value = datetime.timedelta( + days=15, hours=18, minutes=35, seconds=45, milliseconds=586 + ) + assert var.getvalue() == expected_value + + +def test_1806(cursor): + "1806 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_INTERVAL_DS) + var.setvalue(0, datetime.timedelta(days=1, minutes=50)) + cursor.execute( + """ + begin + :value := :value + to_dsinterval('8 05:15:00'); + end; + """, + value=var, + ) + expected_value = datetime.timedelta(days=9, hours=6, minutes=5) + assert var.getvalue() == expected_value + + +def test_1807(cursor): + "1807 - test cursor description is accurate" + cursor.execute("select * from TestIntervals") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "INTERVALCOL", + oracledb.DB_TYPE_INTERVAL_DS, + None, + None, + 2, + 6, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_INTERVAL_DS, + None, + None, + 2, + 6, + True, + ), + ( + "INTERVALPRECISIONCOL", + oracledb.DB_TYPE_INTERVAL_DS, + None, + None, + 7, + 6, + True, + ), + ( + "INTERVALPRECISIONSCALECOL", + oracledb.DB_TYPE_INTERVAL_DS, + None, + None, + 8, + 9, + True, + ), + ] + assert cursor.description == expected_value + + +def test_1808(cursor, module_data): + "1808 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestIntervals order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_1809(cursor, module_data): + "1809 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestIntervals order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + - def test_1811(self): - "1811 - test binding and fetching a negative interval" - value = datetime.timedelta(days=-1, seconds=86314, microseconds=431152) - self.cursor.execute("select :1 from dual", [value]) - (result,) = self.cursor.fetchone() - self.assertEqual(result, value) +def test_1810(cursor, module_data_by_key): + "1810 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestIntervals + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None -if __name__ == "__main__": - test_env.run_test_cases() +def test_1811(cursor): + "1811 - test binding and fetching a negative interval" + value = datetime.timedelta(days=-1, seconds=86314, microseconds=431152) + cursor.execute("select :1 from dual", [value]) + (result,) = cursor.fetchone() + assert result == value diff --git a/tests/test_1900_lob_var.py b/tests/test_1900_lob_var.py index ed8e6731..9bdce6f7 100644 --- a/tests/test_1900_lob_var.py +++ b/tests/test_1900_lob_var.py @@ -27,615 +27,645 @@ """ import pickle -import unittest import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def __get_temp_lobs(self, sid): - cursor = self.conn.cursor() - cursor.execute( - """ - select cache_lobs + nocache_lobs + abstract_lobs - from v$temporary_lobs - where sid = :sid - """, - sid=sid, - ) - row = cursor.fetchone() - if row is None: - return 0 - return int(row[0]) - - def __perform_test(self, lob_type, input_type): - long_string = "" - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - self.cursor.execute(f"delete from Test{lob_type}s") - self.conn.commit() - for i in range(11): - if i > 0: - char = chr(ord("A") + i - 1) - long_string += char * 25000 - elif input_type is not db_type: - continue - self.cursor.setinputsizes(long_string=input_type) - if lob_type == "BLOB": - bind_value = long_string.encode() - else: - bind_value = long_string - self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:integer_value, :long_string) - """, - integer_value=i, - long_string=bind_value, - ) - self.conn.commit() - self.cursor.execute( - f""" - select IntCol, {lob_type}Col - from Test{lob_type}s - order by IntCol - """ - ) - self.__validate_query(self.cursor, lob_type) - - def __test_bind_ordering(self, lob_type): - main_col = "A" * 32768 - extra_col_1 = "B" * 65536 - extra_col_2 = "C" * 131072 - if lob_type == "BLOB": - main_col = main_col.encode() - extra_col_1 = extra_col_1.encode() - extra_col_2 = extra_col_2.encode() - self.conn.stmtcachesize = 0 - self.cursor.execute(f"delete from Test{lob_type}s") - self.conn.commit() - data = (1, main_col, 8, extra_col_1, 15, extra_col_2) - self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col, - ExtraNumCol1, Extra{lob_type}Col1, ExtraNumCol2, - Extra{lob_type}Col2) - values (:1, :2, :3, :4, :5, :6) - """, - data, - ) - self.cursor.execute(f"select * from Test{lob_type}s", fetch_lobs=False) - self.assertEqual(self.cursor.fetchone(), data) - - def __test_fetch_lobs_direct(self, lob_type): - self.cursor.execute(f"delete from Test{lob_type}s") - self.conn.commit() - data = [] - long_string = "" - for i in range(1, 11): - if i > 0: - char = chr(ord("A") + i - 1) - long_string += char * 25000 - if lob_type == "BLOB": - data.append((i, long_string.encode())) - else: - data.append((i, long_string)) - self.cursor.executemany( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:1, :2) - """, - data, - ) - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute( - f""" - select IntCol, {lob_type}Col - from Test{lob_type}s - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchall(), data) - - def __test_lob_operations(self, lob_type): - self.cursor.execute(f"delete from Test{lob_type}s") - self.conn.commit() - self.cursor.setinputsizes(long_string=getattr(oracledb, lob_type)) - long_string = "X" * 75000 - write_value = "TEST" +import pytest + + +def _get_temp_lobs(conn, sid): + cursor = conn.cursor() + cursor.execute( + """ + select cache_lobs + nocache_lobs + abstract_lobs + from v$temporary_lobs + where sid = :sid + """, + sid=sid, + ) + row = cursor.fetchone() + if row is None: + return 0 + return int(row[0]) + + +def _perform_test(cursor, lob_type, input_type): + long_string = "" + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + cursor.execute(f"delete from Test{lob_type}s") + cursor.connection.commit() + for i in range(11): + if i > 0: + char = chr(ord("A") + i - 1) + long_string += char * 25000 + elif input_type is not db_type: + continue + cursor.setinputsizes(long_string=input_type) if lob_type == "BLOB": - long_string = long_string.encode("ascii") - write_value = write_value.encode("ascii") - self.cursor.execute( + bind_value = long_string.encode() + else: + bind_value = long_string + cursor.execute( f""" insert into Test{lob_type}s (IntCol, {lob_type}Col) values (:integer_value, :long_string) """, - integer_value=1, - long_string=long_string, + integer_value=i, + long_string=bind_value, ) - self.cursor.execute( - f""" - select {lob_type}Col - from Test{lob_type}s - where IntCol = 1 - """ - ) - (lob,) = self.cursor.fetchone() - self.assertFalse(lob.isopen()) + cursor.connection.commit() + cursor.execute( + f""" + select IntCol, {lob_type}Col + from Test{lob_type}s + order by IntCol + """ + ) + _validate_query(cursor, lob_type) + + +def _test_bind_ordering(conn, cursor, lob_type): + main_col = "A" * 32768 + extra_col_1 = "B" * 65536 + extra_col_2 = "C" * 131072 + if lob_type == "BLOB": + main_col = main_col.encode() + extra_col_1 = extra_col_1.encode() + extra_col_2 = extra_col_2.encode() + conn.stmtcachesize = 0 + cursor.execute(f"delete from Test{lob_type}s") + conn.commit() + data = (1, main_col, 8, extra_col_1, 15, extra_col_2) + cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col, + ExtraNumCol1, Extra{lob_type}Col1, ExtraNumCol2, + Extra{lob_type}Col2) + values (:1, :2, :3, :4, :5, :6) + """, + data, + ) + cursor.execute(f"select * from Test{lob_type}s", fetch_lobs=False) + assert cursor.fetchone() == data + + +def _test_fetch_lobs_direct(cursor, lob_type): + cursor.execute(f"delete from Test{lob_type}s") + cursor.connection.commit() + data = [] + long_string = "" + for i in range(1, 11): + if i > 0: + char = chr(ord("A") + i - 1) + long_string += char * 25000 + if lob_type == "BLOB": + data.append((i, long_string.encode())) + else: + data.append((i, long_string)) + cursor.executemany( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:1, :2) + """, + data, + ) + cursor.execute( + f""" + select IntCol, {lob_type}Col + from Test{lob_type}s + order by IntCol + """ + ) + assert cursor.fetchall() == data + + +def _test_lob_operations(cursor, test_env, lob_type): + cursor.execute(f"delete from Test{lob_type}s") + cursor.connection.commit() + cursor.setinputsizes(long_string=getattr(oracledb, lob_type)) + long_string = "X" * 75000 + write_value = "TEST" + if lob_type == "BLOB": + long_string = long_string.encode("ascii") + write_value = write_value.encode("ascii") + cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:integer_value, :long_string) + """, + integer_value=1, + long_string=long_string, + ) + cursor.execute( + f""" + select {lob_type}Col + from Test{lob_type}s + where IntCol = 1 + """ + ) + (lob,) = cursor.fetchone() + assert not lob.isopen() + lob.open() + with test_env.assert_raises_full_code("ORA-22293"): lob.open() - with self.assertRaisesFullCode("ORA-22293"): - lob.open() - self.assertTrue(lob.isopen()) + assert lob.isopen() + lob.close() + with test_env.assert_raises_full_code("ORA-22289"): lob.close() - with self.assertRaisesFullCode("ORA-22289"): - lob.close() - self.assertFalse(lob.isopen()) - self.assertEqual(lob.size(), 75000) - lob.write(write_value, 75001) - self.assertRaises(TypeError, lob.write, 1000, 1) - self.assertRaises(TypeError, lob.write, "data", "1") - self.assertEqual(lob.size(), 75000 + len(write_value)) - with self.assertRaisesFullCode("DPY-2030"): - lob.read(0) - with self.assertRaisesFullCode("DPY-2030"): - lob.read(-25) - with self.assertRaisesFullCode("DPY-2047"): - lob.read(amount=0) - with self.assertRaisesFullCode("DPY-2047"): - lob.read(amount=-5) - self.assertEqual(lob.read(), long_string + write_value) - lob.write(write_value, 1) - self.assertEqual( - lob.read(), write_value + long_string[4:] + write_value - ) - lob.trim(25000) - self.assertEqual(lob.size(), 25000) - lob.trim(newSize=10000) - self.assertEqual(lob.size(), 10000) - with self.assertRaisesFullCode("DPY-2014"): - lob.trim(new_size=50, newSize=60) - self.assertRaises(TypeError, lob.trim, new_size="10000") - self.assertRaises(TypeError, lob.trim, newSize="10000") - lob.trim(new_size=40) - self.assertEqual(lob.size(), 40) - lob.trim() - self.assertEqual(lob.size(), 0) - self.assertIsInstance(lob.getchunksize(), int) - - def __test_pickle(self, lob_type): - value = "A test string value for pickling" - if lob_type == "BLOB": - value = value.encode("ascii") - db_type = getattr(oracledb, "DB_TYPE_" + lob_type) - lob = self.conn.createlob(db_type, value) - pickled_data = pickle.dumps(lob) - unpickled_value = pickle.loads(pickled_data) - self.assertEqual(unpickled_value, value) - - def __test_temporary_lob(self, lob_type): - self.cursor.execute(f"delete from Test{lob_type}s") - self.conn.commit() - value = "A test string value" - if lob_type == "BLOB": - value = value.encode("ascii") - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - lob = self.conn.createlob(db_type, value) - self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:int_val, :lob_val) - """, - int_val=1, - lob_val=lob, - ) - self.cursor.execute(f"select {lob_type}Col from Test{lob_type}s") - (lob,) = self.cursor.fetchone() - self.assertEqual(lob.read(), value) - - def __validate_query(self, rows, lob_type): - long_string = "" - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - for row in rows: - integer_value, lob = row - self.assertEqual(lob.type, db_type) - if integer_value == 0: - self.assertEqual(lob.size(), 0) - expected_value = "" - if lob_type == "BLOB": - expected_value = expected_value.encode() - self.assertEqual(lob.read(), expected_value) + assert not lob.isopen() + assert lob.size() == 75000 + lob.write(write_value, 75001) + pytest.raises(TypeError, lob.write, 1000, 1) + pytest.raises(TypeError, lob.write, "data", "1") + assert lob.size() == 75000 + len(write_value) + with test_env.assert_raises_full_code("DPY-2030"): + lob.read(0) + with test_env.assert_raises_full_code("DPY-2030"): + lob.read(-25) + with test_env.assert_raises_full_code("DPY-2047"): + lob.read(amount=0) + with test_env.assert_raises_full_code("DPY-2047"): + lob.read(amount=-5) + assert lob.read() == long_string + write_value + lob.write(write_value, 1) + assert lob.read() == write_value + long_string[4:] + write_value + lob.trim(25000) + assert lob.size() == 25000 + lob.trim(newSize=10000) + assert lob.size() == 10000 + with test_env.assert_raises_full_code("DPY-2014"): + lob.trim(new_size=50, newSize=60) + pytest.raises(TypeError, lob.trim, new_size="10000") + pytest.raises(TypeError, lob.trim, newSize="10000") + lob.trim(new_size=40) + assert lob.size() == 40 + lob.trim() + assert lob.size() == 0 + assert isinstance(lob.getchunksize(), int) + + +def _test_pickle(conn, lob_type): + value = "A test string value for pickling" + if lob_type == "BLOB": + value = value.encode("ascii") + db_type = getattr(oracledb, "DB_TYPE_" + lob_type) + lob = conn.createlob(db_type, value) + pickled_data = pickle.dumps(lob) + unpickled_value = pickle.loads(pickled_data) + assert unpickled_value == value + + +def _test_temporary_lob(conn, cursor, lob_type): + cursor.execute(f"delete from Test{lob_type}s") + conn.commit() + value = "A test string value" + if lob_type == "BLOB": + value = value.encode("ascii") + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + lob = conn.createlob(db_type, value) + cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:int_val, :lob_val) + """, + int_val=1, + lob_val=lob, + ) + cursor.execute(f"select {lob_type}Col from Test{lob_type}s") + (lob,) = cursor.fetchone() + assert lob.read() == value + + +def _validate_query(rows, lob_type): + long_string = "" + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + for row in rows: + integer_value, lob = row + assert lob.type == db_type + if integer_value == 0: + assert lob.size() == 0 + expected_value = "" + if lob_type == "BLOB": + expected_value = expected_value.encode() + assert lob.read() == expected_value + else: + char = chr(ord("A") + integer_value - 1) + prev_char = chr(ord("A") + integer_value - 2) + long_string += char * 25000 + if lob_type == "BLOB": + expected_value = long_string.encode("ascii") + char = char.encode("ascii") + prev_char = prev_char.encode("ascii") else: - char = chr(ord("A") + integer_value - 1) - prev_char = chr(ord("A") + integer_value - 2) - long_string += char * 25000 - if lob_type == "BLOB": - expected_value = long_string.encode("ascii") - char = char.encode("ascii") - prev_char = prev_char.encode("ascii") - else: - expected_value = long_string - self.assertEqual(lob.size(), len(expected_value)) - self.assertEqual(lob.read(), expected_value) - if lob_type == "CLOB": - self.assertEqual(str(lob), expected_value) - self.assertEqual(lob.read(len(expected_value)), char) - if integer_value > 1: - offset = (integer_value - 1) * 25000 - 4 - string = prev_char * 5 + char * 5 - self.assertEqual(lob.read(offset, 10), string) - - def test_1900(self): - "1900 - test binding a LOB value directly" - self.cursor.execute("delete from TestCLOBs") - self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, 'Short value') - """ - ) - self.conn.commit() - self.cursor.execute("select ClobCol from TestCLOBs") - (lob,) = self.cursor.fetchone() - self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (2, :value) - """, - value=lob, - ) + expected_value = long_string + assert lob.size() == len(expected_value) + assert lob.read() == expected_value + if lob_type == "CLOB": + assert str(lob) == expected_value + assert lob.read(len(expected_value)) == char + if integer_value > 1: + offset = (integer_value - 1) * 25000 - 4 + string = prev_char * 5 + char * 5 + assert lob.read(offset, 10) == string + + +def test_1900(conn, cursor): + "1900 - test binding a LOB value directly" + cursor.execute("delete from TestCLOBs") + cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, 'Short value') + """ + ) + conn.commit() + cursor.execute("select ClobCol from TestCLOBs") + (lob,) = cursor.fetchone() + cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (2, :value) + """, + value=lob, + ) + + +def test_1901(cursor): + "1901 - test cursor description is accurate for BLOBs" + cursor.execute("select IntCol, BlobCol from TestBLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), + ("BLOBCOL", oracledb.DB_TYPE_BLOB, None, None, None, None, 0), + ] + assert cursor.description == expected_value + + +def test_1902(cursor): + "1902 - test binding and fetching BLOB data (directly)" + _perform_test(cursor, "BLOB", oracledb.DB_TYPE_BLOB) + + +def test_1903(cursor): + "1903 - test binding and fetching BLOB data (indirectly)" + _perform_test(cursor, "BLOB", oracledb.DB_TYPE_LONG_RAW) + + +def test_1904(cursor, test_env): + "1904 - test operations on BLOBs" + _test_lob_operations(cursor, test_env, "BLOB") + + +def test_1905(cursor): + "1905 - test cursor description is accurate for CLOBs" + cursor.execute("select IntCol, ClobCol from TestCLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ("CLOBCOL", oracledb.DB_TYPE_CLOB, None, None, None, None, False), + ] + assert cursor.description == expected_value - def test_1901(self): - "1901 - test cursor description is accurate for BLOBs" - self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), - ("BLOBCOL", oracledb.DB_TYPE_BLOB, None, None, None, None, 0), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_1902(self): - "1902 - test binding and fetching BLOB data (directly)" - self.__perform_test("BLOB", oracledb.DB_TYPE_BLOB) - - def test_1903(self): - "1903 - test binding and fetching BLOB data (indirectly)" - self.__perform_test("BLOB", oracledb.DB_TYPE_LONG_RAW) - - def test_1904(self): - "1904 - test operations on BLOBs" - self.__test_lob_operations("BLOB") - - def test_1905(self): - "1905 - test cursor description is accurate for CLOBs" - self.cursor.execute("select IntCol, ClobCol from TestCLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ("CLOBCOL", oracledb.DB_TYPE_CLOB, None, None, None, None, False), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_1906(self): - "1906 - test binding and fetching CLOB data (directly)" - self.__perform_test("CLOB", oracledb.DB_TYPE_CLOB) - - def test_1907(self): - "1907 - test binding and fetching CLOB data (indirectly)" - self.__perform_test("CLOB", oracledb.DB_TYPE_LONG) - - def test_1908(self): - "1908 - test operations on CLOBs" - self.__test_lob_operations("CLOB") - - def test_1909(self): - "1909 - test creating a temporary BLOB" - self.__test_temporary_lob("BLOB") - - def test_1910(self): - "1910 - test creating a temporary CLOB" - self.__test_temporary_lob("CLOB") - - def test_1911(self): - "1911 - test creating a temporary NCLOB" - self.__test_temporary_lob("NCLOB") - - def test_1912(self): - "1912 - test retrieving data from a CLOB after multiple fetches" - self.cursor.arraysize = 1 - self.__perform_test("CLOB", oracledb.DB_TYPE_CLOB) - - def test_1913(self): - "1913 - test cursor description is accurate for NCLOBs" - self.cursor.execute("select IntCol, NClobCol from TestNCLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), - ("NCLOBCOL", oracledb.DB_TYPE_NCLOB, None, None, None, None, 0), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_1914(self): - "1914 - test binding and fetching NCLOB data (directly)" - self.__perform_test("NCLOB", oracledb.DB_TYPE_NCLOB) - - def test_1915(self): - "1915 - test binding and fetching NCLOB data (with non-ASCII chars)" - value = "\u03b4\u4e2a" - self.cursor.execute("delete from TestNCLOBs") - self.cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - """ - insert into TestNCLOBs (IntCol, NClobCol) - values (1, :val) - """, - val=value, - ) - self.conn.commit() - self.cursor.execute("select NCLOBCol from TestNCLOBs") - (nclob,) = self.cursor.fetchone() - self.cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - "update TestNCLOBs set NCLOBCol = :val", val=nclob.read() + value - ) - self.cursor.execute("select NCLOBCol from TestNCLOBs") - (nclob,) = self.cursor.fetchone() - self.assertEqual(nclob.read(), value + value) - def test_1916(self): - "1916 - test binding and fetching NCLOB data (indirectly)" - self.__perform_test("NCLOB", oracledb.DB_TYPE_LONG) +def test_1906(cursor): + "1906 - test binding and fetching CLOB data (directly)" + _perform_test(cursor, "CLOB", oracledb.DB_TYPE_CLOB) - def test_1917(self): - "1917 - test operations on NCLOBs" - self.__test_lob_operations("NCLOB") - @unittest.skipIf( - test_env.get_is_implicit_pooling(), - "sessions can change with implicit pooling", +def test_1907(cursor): + "1907 - test binding and fetching CLOB data (indirectly)" + _perform_test(cursor, "CLOB", oracledb.DB_TYPE_LONG) + + +def test_1908(cursor, test_env): + "1908 - test operations on CLOBs" + _test_lob_operations(cursor, test_env, "CLOB") + + +def test_1909(conn, cursor): + "1909 - test creating a temporary BLOB" + _test_temporary_lob(conn, cursor, "BLOB") + + +def test_1910(conn, cursor): + "1910 - test creating a temporary CLOB" + _test_temporary_lob(conn, cursor, "CLOB") + + +def test_1911(conn, cursor): + "1911 - test creating a temporary NCLOB" + _test_temporary_lob(conn, cursor, "NCLOB") + + +def test_1912(cursor): + "1912 - test retrieving data from a CLOB after multiple fetches" + cursor.arraysize = 1 + _perform_test(cursor, "CLOB", oracledb.DB_TYPE_CLOB) + + +def test_1913(cursor): + "1913 - test cursor description is accurate for NCLOBs" + cursor.execute("select IntCol, NClobCol from TestNCLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), + ("NCLOBCOL", oracledb.DB_TYPE_NCLOB, None, None, None, None, 0), + ] + assert cursor.description == expected_value + + +def test_1914(cursor): + "1914 - test binding and fetching NCLOB data (directly)" + _perform_test(cursor, "NCLOB", oracledb.DB_TYPE_NCLOB) + + +def test_1915(conn, cursor): + "1915 - test binding and fetching NCLOB data (with non-ASCII chars)" + value = "\u03b4\u4e2a" + cursor.execute("delete from TestNCLOBs") + cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + """ + insert into TestNCLOBs (IntCol, NClobCol) + values (1, :val) + """, + val=value, ) - def test_1918(self): - "1918 - test temporary LOBs" - self.cursor.execute("select sys_context('USERENV', 'SID') from dual") - (sid,) = self.cursor.fetchone() - temp_lobs = self.__get_temp_lobs(sid) - with self.conn.cursor() as cursor: - cursor.arraysize = 27 - self.assertEqual(temp_lobs, 0) - cursor.execute( - "select extract(xmlcol, '/').getclobval() from TestXML" - ) - for (lob,) in cursor: - lob.read() - del lob - temp_lobs = self.__get_temp_lobs(sid) - self.assertEqual(temp_lobs, 0) - - def test_1919(self): - "1919 - test assign string to NCLOB beyond array size" - nclobVar = self.cursor.var(oracledb.DB_TYPE_NCLOB) - self.assertRaises(IndexError, nclobVar.setvalue, 1, "test char") - - def test_1920(self): - "1920 - test read/write temporary LOBs using supplemental characters" - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - supplemental_chars = ( - "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 " - "𠽌 𠾴 𠾼 𠿪 𡁜 𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 𢱕 𢳂 𢴈 " - "𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" - ) - self.cursor.execute("delete from TestCLOBs") - lob = self.conn.createlob(oracledb.DB_TYPE_CLOB, supplemental_chars) - self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, :val) - """, - [lob], - ) - self.conn.commit() - self.cursor.execute("select ClobCol from TestCLOBs") - (lob,) = self.cursor.fetchone() - self.assertEqual(lob.read(), supplemental_chars) - - def test_1921(self): - "1921 - test automatic conversion to CLOB for PL/SQL" - var = self.cursor.var(str, outconverter=lambda v: v[-15:]) - var.setvalue(0, "A" * 50000) - self.cursor.execute( - """ - declare - t_Clob clob; - begin - t_Clob := :data; - dbms_lob.copy(:data, t_Clob, 50000); - dbms_lob.writeappend(:data, 5, 'BBBBB'); - end; - """, - data=var, - ) - self.assertEqual(var.getvalue(), "A" * 10 + "B" * 5) + conn.commit() + cursor.execute("select NCLOBCol from TestNCLOBs") + (nclob,) = cursor.fetchone() + cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + "update TestNCLOBs set NCLOBCol = :val", val=nclob.read() + value + ) + cursor.execute("select NCLOBCol from TestNCLOBs") + (nclob,) = cursor.fetchone() + assert nclob.read() == value + value + + +def test_1916(cursor): + "1916 - test binding and fetching NCLOB data (indirectly)" + _perform_test(cursor, "NCLOB", oracledb.DB_TYPE_LONG) + + +def test_1917(cursor, test_env): + "1917 - test operations on NCLOBs" + _test_lob_operations(cursor, test_env, "NCLOB") + + +def test_1918(skip_if_implicit_pooling, conn, cursor): + "1918 - test temporary LOBs" + cursor.execute("select sys_context('USERENV', 'SID') from dual") + (sid,) = cursor.fetchone() + temp_lobs = _get_temp_lobs(conn, sid) + with conn.cursor() as cursor: + cursor.arraysize = 27 + assert temp_lobs == 0 + cursor.execute("select extract(xmlcol, '/').getclobval() from TestXML") + for (lob,) in cursor: + lob.read() + del lob + temp_lobs = _get_temp_lobs(conn, sid) + assert temp_lobs == 0 + + +def test_1919(cursor): + "1919 - test assign string to NCLOB beyond array size" + nclobVar = cursor.var(oracledb.DB_TYPE_NCLOB) + pytest.raises(IndexError, nclobVar.setvalue, 1, "test char") + + +def test_1920(conn, cursor, test_env): + "1920 - test read/write temporary LOBs using supplemental characters" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + supplemental_chars = ( + "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 " + "𠽌 𠾴 𠾼 𠿪 𡁜 𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 𢱕 𢳂 𢴈 " + "𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" + ) + cursor.execute("delete from TestCLOBs") + lob = conn.createlob(oracledb.DB_TYPE_CLOB, supplemental_chars) + cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, :val) + """, + [lob], + ) + conn.commit() + cursor.execute("select ClobCol from TestCLOBs") + (lob,) = cursor.fetchone() + assert lob.read() == supplemental_chars + + +def test_1921(cursor): + "1921 - test automatic conversion to CLOB for PL/SQL" + var = cursor.var(str, outconverter=lambda v: v[-15:]) + var.setvalue(0, "A" * 50000) + cursor.execute( + """ + declare + t_Clob clob; + begin + t_Clob := :data; + dbms_lob.copy(:data, t_Clob, 50000); + dbms_lob.writeappend(:data, 5, 'BBBBB'); + end; + """, + data=var, + ) + assert var.getvalue() == "A" * 10 + "B" * 5 + + +def test_1922(cursor): + "1922 - test automatic conversion to NCLOB for PL/SQL" + var = cursor.var(oracledb.DB_TYPE_NCHAR, outconverter=lambda v: v[-12:]) + var.setvalue(0, "N" * 51234) + cursor.execute( + """ + declare + t_Clob nclob; + begin + t_Clob := :data; + dbms_lob.copy(:data, t_Clob, 51234); + dbms_lob.writeappend(:data, 7, 'PPPPPPP'); + end; + """, + data=var, + ) + assert var.getvalue() == "N" * 5 + "P" * 7 + + +def test_1923(cursor): + "1923 - test automatic conversion to BLOB for PL/SQL" + var = cursor.var(bytes, outconverter=lambda v: v[-14:]) + var.setvalue(0, b"L" * 52345) + cursor.execute( + """ + declare + t_Blob blob; + begin + t_Blob := :data; + dbms_lob.copy(:data, t_Blob, 52345); + dbms_lob.writeappend(:data, 6, '515151515151'); + end; + """, + data=var, + ) + assert var.getvalue() == b"L" * 8 + b"Q" * 6 - def test_1922(self): - "1922 - test automatic conversion to NCLOB for PL/SQL" - var = self.cursor.var( - oracledb.DB_TYPE_NCHAR, outconverter=lambda v: v[-12:] - ) - var.setvalue(0, "N" * 51234) - self.cursor.execute( - """ - declare - t_Clob nclob; - begin - t_Clob := :data; - dbms_lob.copy(:data, t_Clob, 51234); - dbms_lob.writeappend(:data, 7, 'PPPPPPP'); - end; - """, - data=var, - ) - self.assertEqual(var.getvalue(), "N" * 5 + "P" * 7) - - def test_1923(self): - "1923 - test automatic conversion to BLOB for PL/SQL" - var = self.cursor.var(bytes, outconverter=lambda v: v[-14:]) - var.setvalue(0, b"L" * 52345) - self.cursor.execute( - """ - declare - t_Blob blob; - begin - t_Blob := :data; - dbms_lob.copy(:data, t_Blob, 52345); - dbms_lob.writeappend(:data, 6, '515151515151'); - end; - """, - data=var, - ) - self.assertEqual(var.getvalue(), b"L" * 8 + b"Q" * 6) - - def test_1924(self): - "1924 - test pickling of BLOB" - self.__test_pickle("BLOB") - - def test_1925(self): - "1925 - test pickling of CLOB" - self.__test_pickle("CLOB") - - def test_1926(self): - "1925 - test pickling of NCLOB" - self.__test_pickle("NCLOB") - - def test_1927(self): - "1927 - test fetching BLOB as bytes" - self.__test_fetch_lobs_direct("BLOB") - - def test_1928(self): - "1928 - test fetching CLOB as str" - self.__test_fetch_lobs_direct("CLOB") - - def test_1929(self): - "1929 - test fetching NCLOB as str" - self.__test_fetch_lobs_direct("NCLOB") - - def test_1930(self): - "1930 - test bind ordering with BLOB" - self.__test_bind_ordering("BLOB") - - def test_1931(self): - "1931 - test bind ordering with CLOB" - self.__test_bind_ordering("CLOB") - - def test_1932(self): - "1932 - test bind ordering with NCLOB" - self.__test_bind_ordering("NCLOB") - - def test_1933(self): - "1933 - test creating a lob with an invalid type" - with self.assertRaises(TypeError): - self.conn.createlob(oracledb.DB_TYPE_NUMBER) - with self.assertRaises(TypeError): - self.conn.createlob(oracledb.DB_TYPE_BFILE) - - def test_1934(self): - "1934 - test creation of temporary LOBs with varying data" - cases = [ - (oracledb.DB_TYPE_BLOB, b"test_1934A", b"!", b"test_1934A!"), - (oracledb.DB_TYPE_BLOB, "test_1934B", "!", b"test_1934B!"), - (oracledb.DB_TYPE_CLOB, b"test_1934C", b"!", "test_1934C!"), - (oracledb.DB_TYPE_CLOB, "test_1934D", "!", "test_1934D!"), - (oracledb.DB_TYPE_NCLOB, b"test_1934E", b"!", "test_1934E!"), - (oracledb.DB_TYPE_NCLOB, "test_1934F", "!", "test_1934F!"), - ] - for typ, initial_data, additional_data, expected_result in cases: - with self.subTest(): - lob = self.conn.createlob(typ, initial_data) - lob.write(additional_data, len(initial_data) + 1) - self.assertEqual(lob.read(), expected_result) - - def test_1935(self): - "1935 - test reading and writing a LOB with a closed connection" - types = [ - oracledb.DB_TYPE_BLOB, - oracledb.DB_TYPE_CLOB, - oracledb.DB_TYPE_NCLOB, - ] - for typ in types: - conn = test_env.get_connection() - lob = conn.createlob(typ, "Temp LOB") - conn.close() - with self.assertRaisesFullCode("DPY-1001"): - lob.read() - with self.assertRaisesFullCode("DPY-1001"): - lob.write("x") - - def test_1936(self): - "1936 - test reading a non-existent directory" - directory_name = "TEST_1936_MISSING_DIR" - file_name = "test_1936_missing_file.txt" - self.cursor.execute( - "select BFILENAME(:1, :2) from dual", [directory_name, file_name] - ) - (bfile,) = self.cursor.fetchone() - self.assertEqual(bfile.getfilename(), (directory_name, file_name)) - with self.assertRaisesFullCode("ORA-22285"): - bfile.fileexists() - with self.assertRaisesFullCode("ORA-22285"): - bfile.read() - - def test_1937(self): - "1937 - test using BFILE methods on non-BFILE LOBs" - types = [ - oracledb.DB_TYPE_BLOB, - oracledb.DB_TYPE_CLOB, - oracledb.DB_TYPE_NCLOB, - ] - for typ in types: - lob = self.conn.createlob(typ) - with self.assertRaisesFullCode("DPY-3026"): - lob.getfilename() - with self.assertRaisesFullCode("DPY-3026"): - lob.setfilename("not_relevant", "not_relevant") - with self.assertRaisesFullCode("DPY-3026"): - lob.fileexists() - - def test_1938(self): - "1938 - confirm that LOB objects are retained across getvalue() calls" - for typ in ( - oracledb.DB_TYPE_BLOB, - oracledb.DB_TYPE_CLOB, - oracledb.DB_TYPE_NCLOB, - ): - var = self.cursor.var(typ) - lob = self.conn.createlob(typ, "Some data for test 1938") - var.setvalue(0, lob) - self.assertIs(var.getvalue(), lob) - - def test_1939(self): - "1939 - temporary LOB in/out without modification" - value = "test - 1939" - var = self.cursor.var(oracledb.DB_TYPE_CLOB) - var.setvalue(0, value) - self.assertEqual(var.getvalue().read(), value) - self.cursor.callproc("pkg_TestLOBs.TestInOut", [var, None, None]) - self.assertEqual(var.getvalue().read(), value) - - def test_1940(self): - "1940 - temporary LOB in/out with modification" - search_value = "test" - replace_value = "replaced" - initial_value = f"{search_value} - 1939" - final_value = f"{replace_value} - 1939" - var = self.cursor.var(oracledb.DB_TYPE_CLOB) - var.setvalue(0, initial_value) - self.assertEqual(var.getvalue().read(), initial_value) - self.cursor.callproc( - "pkg_TestLOBs.TestInOut", [var, search_value, replace_value] - ) - self.assertEqual(var.getvalue().read(), final_value) + +def test_1924(conn): + "1924 - test pickling of BLOB" + _test_pickle(conn, "BLOB") + + +def test_1925(conn): + "1925 - test pickling of CLOB" + _test_pickle(conn, "CLOB") + + +def test_1926(conn): + "1925 - test pickling of NCLOB" + _test_pickle(conn, "NCLOB") + + +def test_1927(cursor, disable_fetch_lobs): + "1927 - test fetching BLOB as bytes" + _test_fetch_lobs_direct(cursor, "BLOB") + + +def test_1928(cursor, disable_fetch_lobs): + "1928 - test fetching CLOB as str" + _test_fetch_lobs_direct(cursor, "CLOB") + + +def test_1929(cursor, disable_fetch_lobs): + "1929 - test fetching NCLOB as str" + _test_fetch_lobs_direct(cursor, "NCLOB") -if __name__ == "__main__": - test_env.run_test_cases() +def test_1930(conn, cursor): + "1930 - test bind ordering with BLOB" + _test_bind_ordering(conn, cursor, "BLOB") + + +def test_1931(conn, cursor): + "1931 - test bind ordering with CLOB" + _test_bind_ordering(conn, cursor, "CLOB") + + +def test_1932(conn, cursor): + "1932 - test bind ordering with NCLOB" + _test_bind_ordering(conn, cursor, "NCLOB") + + +def test_1933(conn): + "1933 - test creating a lob with an invalid type" + with pytest.raises(TypeError): + conn.createlob(oracledb.DB_TYPE_NUMBER) + with pytest.raises(TypeError): + conn.createlob(oracledb.DB_TYPE_BFILE) + + +def test_1934(conn): + "1934 - test creation of temporary LOBs with varying data" + cases = [ + (oracledb.DB_TYPE_BLOB, b"test_1934A", b"!", b"test_1934A!"), + (oracledb.DB_TYPE_BLOB, "test_1934B", "!", b"test_1934B!"), + (oracledb.DB_TYPE_CLOB, b"test_1934C", b"!", "test_1934C!"), + (oracledb.DB_TYPE_CLOB, "test_1934D", "!", "test_1934D!"), + (oracledb.DB_TYPE_NCLOB, b"test_1934E", b"!", "test_1934E!"), + (oracledb.DB_TYPE_NCLOB, "test_1934F", "!", "test_1934F!"), + ] + for typ, initial_data, additional_data, expected_result in cases: + lob = conn.createlob(typ, initial_data) + lob.write(additional_data, len(initial_data) + 1) + assert lob.read() == expected_result + + +def test_1935(test_env): + "1935 - test reading and writing a LOB with a closed connection" + types = [ + oracledb.DB_TYPE_BLOB, + oracledb.DB_TYPE_CLOB, + oracledb.DB_TYPE_NCLOB, + ] + for typ in types: + conn = test_env.get_connection() + lob = conn.createlob(typ, "Temp LOB") + conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + lob.read() + with test_env.assert_raises_full_code("DPY-1001"): + lob.write("x") + + +def test_1936(cursor, test_env): + "1936 - test reading a non-existent directory" + directory_name = "TEST_1936_MISSING_DIR" + file_name = "test_1936_missing_file.txt" + cursor.execute( + "select BFILENAME(:1, :2) from dual", [directory_name, file_name] + ) + (bfile,) = cursor.fetchone() + assert bfile.getfilename() == (directory_name, file_name) + with test_env.assert_raises_full_code("ORA-22285"): + bfile.fileexists() + with test_env.assert_raises_full_code("ORA-22285"): + bfile.read() + + +def test_1937(conn, test_env): + "1937 - test using BFILE methods on non-BFILE LOBs" + types = [ + oracledb.DB_TYPE_BLOB, + oracledb.DB_TYPE_CLOB, + oracledb.DB_TYPE_NCLOB, + ] + for typ in types: + lob = conn.createlob(typ) + with test_env.assert_raises_full_code("DPY-3026"): + lob.getfilename() + with test_env.assert_raises_full_code("DPY-3026"): + lob.setfilename("not_relevant", "not_relevant") + with test_env.assert_raises_full_code("DPY-3026"): + lob.fileexists() + + +def test_1938(conn, cursor): + "1938 - confirm that LOB objects are retained across getvalue() calls" + for typ in ( + oracledb.DB_TYPE_BLOB, + oracledb.DB_TYPE_CLOB, + oracledb.DB_TYPE_NCLOB, + ): + var = cursor.var(typ) + lob = conn.createlob(typ, "Some data for test 1938") + var.setvalue(0, lob) + assert var.getvalue() is lob + + +def test_1939(cursor): + "1939 - temporary LOB in/out without modification" + value = "test - 1939" + var = cursor.var(oracledb.DB_TYPE_CLOB) + var.setvalue(0, value) + assert var.getvalue().read() == value + cursor.callproc("pkg_TestLOBs.TestInOut", [var, None, None]) + assert var.getvalue().read() == value + + +def test_1940(cursor): + "1940 - temporary LOB in/out with modification" + search_value = "test" + replace_value = "replaced" + initial_value = f"{search_value} - 1939" + final_value = f"{replace_value} - 1939" + var = cursor.var(oracledb.DB_TYPE_CLOB) + var.setvalue(0, initial_value) + assert var.getvalue().read() == initial_value + cursor.callproc( + "pkg_TestLOBs.TestInOut", [var, search_value, replace_value] + ) + assert var.getvalue().read() == final_value diff --git a/tests/test_2000_long_var.py b/tests/test_2000_long_var.py index 612c64f5..887d84e4 100644 --- a/tests/test_2000_long_var.py +++ b/tests/test_2000_long_var.py @@ -27,105 +27,104 @@ """ import oracledb -import test_env -class TestCase(test_env.BaseTestCase): - def __perform_test(self, typ): - name_part = "Long" if typ is oracledb.DB_TYPE_LONG else "LongRaw" +def _perform_test(cursor, typ): + name_part = "Long" if typ is oracledb.DB_TYPE_LONG else "LongRaw" - self.cursor.execute(f"truncate table Test{name_part}s") - self.cursor.setinputsizes(long_string=typ) - long_string = "" - for i in range(1, 11): - char = chr(ord("A") + i - 1) - long_string += char * 25000 - if i % 3 == 1: - bind_value = None + cursor.execute(f"truncate table Test{name_part}s") + cursor.setinputsizes(long_string=typ) + long_string = "" + for i in range(1, 11): + char = chr(ord("A") + i - 1) + long_string += char * 25000 + if i % 3 == 1: + bind_value = None + else: + if typ is oracledb.DB_TYPE_LONG_RAW: + bind_value = long_string.encode() else: - if typ is oracledb.DB_TYPE_LONG_RAW: - bind_value = long_string.encode() - else: - bind_value = long_string - self.cursor.execute( - f""" - insert into Test{name_part}s (IntCol, {name_part}Col) - values (:integer_value, :long_string) - """, - integer_value=i, - long_string=bind_value, - ) - self.conn.commit() - self.cursor.execute(f"select * from Test{name_part}s order by IntCol") - long_string = "" - for integer_value, fetched_value in self.cursor: - char = chr(ord("A") + integer_value - 1) - long_string += char * 25000 - if integer_value % 3 == 1: - expected_value = None + bind_value = long_string + cursor.execute( + f""" + insert into Test{name_part}s (IntCol, {name_part}Col) + values (:integer_value, :long_string) + """, + integer_value=i, + long_string=bind_value, + ) + cursor.connection.commit() + cursor.execute(f"select * from Test{name_part}s order by IntCol") + long_string = "" + for integer_value, fetched_value in cursor: + char = chr(ord("A") + integer_value - 1) + long_string += char * 25000 + if integer_value % 3 == 1: + expected_value = None + else: + if typ is oracledb.DB_TYPE_LONG_RAW: + expected_value = long_string.encode() else: - if typ is oracledb.DB_TYPE_LONG_RAW: - expected_value = long_string.encode() - else: - expected_value = long_string - if fetched_value is not None: - self.assertEqual(len(fetched_value), integer_value * 25000) - self.assertEqual(fetched_value, expected_value) - - def test_2000(self): - "2000 - test binding and fetching long data" - self.__perform_test(oracledb.DB_TYPE_LONG) - - def test_2001(self): - "2001 - test binding long data with executemany()" - data = [] - self.cursor.execute("truncate table TestLongs") - for i in range(5): - char = chr(ord("A") + i) - long_str = char * (32768 * (i + 1)) - data.append((i + 1, long_str)) - self.cursor.executemany("insert into TestLongs values (:1, :2)", data) - self.conn.commit() - self.cursor.execute("select * from TestLongs order by IntCol") - self.assertEqual(self.cursor.fetchall(), data) - - def test_2002(self): - "2002 - test binding and fetching long raw data" - self.__perform_test(oracledb.DB_TYPE_LONG_RAW) - - def test_2003(self): - "2003 - test cursor description is accurate for longs" - self.cursor.execute("select * from TestLongs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ("LONGCOL", oracledb.DB_TYPE_LONG, None, None, None, None, True), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_2004(self): - "2004 - test cursor description is accurate for long raws" - self.cursor.execute("select * from TestLongRaws") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "LONGRAWCOL", - oracledb.DB_TYPE_LONG_RAW, - None, - None, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - @test_env.skip_unless_thick_mode() - def test_2005(self): - "2005 - test array size too large generates an exception" - self.cursor.arraysize = 268435456 - with self.assertRaisesFullCode("DPI-1015"): - self.cursor.execute("select * from TestLongRaws") - - -if __name__ == "__main__": - test_env.run_test_cases() + expected_value = long_string + if fetched_value is not None: + assert len(fetched_value) == integer_value * 25000 + assert fetched_value == expected_value + + +def test_2000(cursor): + "2000 - test binding and fetching long data" + _perform_test(cursor, oracledb.DB_TYPE_LONG) + + +def test_2001(conn, cursor): + "2001 - test binding long data with executemany()" + data = [] + cursor.execute("truncate table TestLongs") + for i in range(5): + char = chr(ord("A") + i) + long_str = char * (32768 * (i + 1)) + data.append((i + 1, long_str)) + cursor.executemany("insert into TestLongs values (:1, :2)", data) + conn.commit() + cursor.execute("select * from TestLongs order by IntCol") + assert cursor.fetchall() == data + + +def test_2002(cursor): + "2002 - test binding and fetching long raw data" + _perform_test(cursor, oracledb.DB_TYPE_LONG_RAW) + + +def test_2003(cursor): + "2003 - test cursor description is accurate for longs" + cursor.execute("select * from TestLongs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ("LONGCOL", oracledb.DB_TYPE_LONG, None, None, None, None, True), + ] + assert cursor.description == expected_value + + +def test_2004(cursor): + "2004 - test cursor description is accurate for long raws" + cursor.execute("select * from TestLongRaws") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "LONGRAWCOL", + oracledb.DB_TYPE_LONG_RAW, + None, + None, + None, + None, + True, + ), + ] + assert cursor.description == expected_value + + +def test_2005(skip_unless_thick_mode, cursor, test_env): + "2005 - test array size too large generates an exception" + cursor.arraysize = 268435456 + with test_env.assert_raises_full_code("DPI-1015"): + cursor.execute("select * from TestLongRaws") diff --git a/tests/test_2100_nchar_var.py b/tests/test_2100_nchar_var.py index aeffcd3e..66a0d223 100644 --- a/tests/test_2100_nchar_var.py +++ b/tests/test_2100_nchar_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,288 +27,306 @@ """ import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - unicode_col = f"Unicode \u3042 {i}" - fixed_char_col = f"Fixed Unicode {i}".ljust(40) - if i % 2: - nullable_col = f"Nullable {i}" - else: - nullable_col = None - data_tuple = (i, unicode_col, fixed_char_col, nullable_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_2100(self): - "2100 - test value length" - return_value = self.cursor.var(int) - self.cursor.execute( - """ - begin - :retval := LENGTH(:value); - end; - """, - value="InVal \u3042", - retval=return_value, - ) - self.assertEqual(return_value.getvalue(), 7) - - def test_2101(self): - "2101 - test binding in a unicode" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - "select * from TestUnicodes where UnicodeCol = :value", - value="Unicode \u3042 5", - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_2102(self): - "2102 - test binding a different variable on second execution" - retval_1 = self.cursor.var(oracledb.DB_TYPE_NVARCHAR, 30) - retval_2 = self.cursor.var(oracledb.DB_TYPE_NVARCHAR, 30) - self.cursor.execute( - r"begin :retval := unistr('Called \3042'); end;", retval=retval_1 - ) - self.assertEqual(retval_1.getvalue(), "Called \u3042") - self.cursor.execute("begin :retval := 'Called'; end;", retval=retval_2) - self.assertEqual(retval_2.getvalue(), "Called") - - def test_2103(self): - "2103 - test binding in a string after setting input sizes to a number" - unicode_val = self.cursor.var(oracledb.DB_TYPE_NVARCHAR) - unicode_val.setvalue(0, "Unicode \u3042 6") - self.cursor.setinputsizes(value=oracledb.NUMBER) - self.cursor.execute( - "select * from TestUnicodes where UnicodeCol = :value", - value=unicode_val, - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[6]]) - - def test_2104(self): - "2104 - test binding in a unicode array" - return_value = self.cursor.var(oracledb.NUMBER) - array = [r[1] for r in self.raw_data] - array_var = self.cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, array) - statement = """ - begin - :retval := pkg_TestUnicodeArrays.TestInArrays( - :integer_value, :array); - end;""" - self.cursor.execute( - statement, retval=return_value, integer_value=5, array=array_var - ) - self.assertEqual(return_value.getvalue(), 116) - array = [f"Unicode - \u3042 {i}" for i in range(15)] - array_var = self.cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, array) - self.cursor.execute(statement, integer_value=8, array=array_var) - self.assertEqual(return_value.getvalue(), 208) - - def test_2105(self): - "2105 - test binding in a unicode array (with setinputsizes)" - return_value = self.cursor.var(oracledb.NUMBER) - self.cursor.setinputsizes(array=[oracledb.DB_TYPE_NVARCHAR, 10]) - array = [r[1] for r in self.raw_data] - self.cursor.execute( - """ - begin - :retval := pkg_TestUnicodeArrays.TestInArrays( - :integer_value, :array); - end; - """, - retval=return_value, - integer_value=6, - array=array, - ) - self.assertEqual(return_value.getvalue(), 117) - - def test_2106(self): - "2106 - test binding in a unicode array (with arrayvar)" - return_value = self.cursor.var(oracledb.NUMBER) - array = self.cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 10, 20) - array.setvalue(0, [r[1] for r in self.raw_data]) - self.cursor.execute( - """ +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + unicode_col = f"Unicode \u3042 {i}" + fixed_char_col = f"Fixed Unicode {i}".ljust(40) + if i % 2: + nullable_col = f"Nullable {i}" + else: + nullable_col = None + data_tuple = (i, unicode_col, fixed_char_col, nullable_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def test_2100(cursor): + "2100 - test value length" + return_value = cursor.var(int) + cursor.execute( + """ + begin + :retval := LENGTH(:value); + end; + """, + value="InVal \u3042", + retval=return_value, + ) + assert return_value.getvalue() == 7 + + +def test_2101(cursor, module_data_by_key): + "2101 - test binding in a unicode" + cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + "select * from TestUnicodes where UnicodeCol = :value", + value="Unicode \u3042 5", + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_2102(cursor): + "2102 - test binding a different variable on second execution" + retval_1 = cursor.var(oracledb.DB_TYPE_NVARCHAR, 30) + retval_2 = cursor.var(oracledb.DB_TYPE_NVARCHAR, 30) + cursor.execute( + r"begin :retval := unistr('Called \3042'); end;", retval=retval_1 + ) + assert retval_1.getvalue() == "Called \u3042" + cursor.execute("begin :retval := 'Called'; end;", retval=retval_2) + assert retval_2.getvalue() == "Called" + + +def test_2103(cursor, module_data_by_key): + "2103 - test binding in a string after setting input sizes to a number" + unicode_val = cursor.var(oracledb.DB_TYPE_NVARCHAR) + unicode_val.setvalue(0, "Unicode \u3042 6") + cursor.setinputsizes(value=oracledb.NUMBER) + cursor.execute( + "select * from TestUnicodes where UnicodeCol = :value", + value=unicode_val, + ) + assert cursor.fetchall() == [module_data_by_key[6]] + + +def test_2104(cursor, module_data): + "2104 - test binding in a unicode array" + return_value = cursor.var(oracledb.NUMBER) + array = [r[1] for r in module_data] + array_var = cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, array) + statement = """ begin :retval := pkg_TestUnicodeArrays.TestInArrays( :integer_value, :array); - end; - """, - retval=return_value, - integer_value=7, - array=array, - ) - self.assertEqual(return_value.getvalue(), 118) - - def test_2107(self): - "2107 - test binding in/out a unicode array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 10, 100) - original_data = [r[1] for r in self.raw_data] - fmt = "Converted element \u3042 # %d originally had length %d" - expected_data = [ - fmt % (i, len(original_data[i - 1])) for i in range(1, 6) - ] + original_data[5:] - array.setvalue(0, original_data) - self.cursor.execute( - """ - begin - pkg_TestUnicodeArrays.TestInOutArrays(:numElems, :array); - end; - """, - numElems=5, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) - - def test_2108(self): - "2108 - test binding out a unicode array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 6, 100) - fmt = "Test out element \u3042 # %d" - expected_data = [fmt % i for i in range(1, 7)] - self.cursor.execute( - """ - begin - pkg_TestUnicodeArrays.TestOutArrays(:numElems, :array); - end; - """, - numElems=6, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) - - def test_2109(self): - "2109 - test binding in a null" - self.cursor.execute( - "select * from TestUnicodes where UnicodeCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2110(self): - "2110 - test binding out with set input sizes defined (by type)" - bind_vars = self.cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - r""" - begin - :value := unistr('TSI \3042'); - end; - """ - ) - self.assertEqual(bind_vars["value"].getvalue(), "TSI \u3042") - - def test_2111(self): - "2111 - test binding in/out with set input sizes defined (by type)" - bind_vars = self.cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - r""" - begin - :value := :value || unistr(' TSI \3042'); - end; - """, - value="InVal \u3041", - ) - self.assertEqual( - bind_vars["value"].getvalue(), "InVal \u3041 TSI \u3042" - ) - - def test_2112(self): - "2112 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_NVARCHAR) - self.cursor.execute( - r""" - begin - :value := unistr('TSI (VAR) \3042'); - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), "TSI (VAR) \u3042") - - def test_2113(self): - "2113 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_NVARCHAR) - var.setvalue(0, "InVal \u3041") - self.cursor.execute( - r""" - begin - :value := :value || unistr(' TSI (VAR) \3042'); - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), "InVal \u3041 TSI (VAR) \u3042") - - def test_2114(self): - "2114 - test cursor description is accurate" - self.cursor.execute("select * from TestUnicodes") - varchar_ratio, nvarchar_ratio = test_env.get_charset_ratios() - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "UNICODECOL", - oracledb.DB_TYPE_NVARCHAR, - 20, - 20 * nvarchar_ratio, - None, - None, - False, - ), - ( - "FIXEDUNICODECOL", - oracledb.DB_TYPE_NCHAR, - 40, - 40 * nvarchar_ratio, - None, - None, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_NVARCHAR, - 50, - 50 * nvarchar_ratio, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_2115(self): - "2115 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestUnicodes order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2116(self): - "2116 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestUnicodes order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_2117(self): - "2117 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestUnicodes - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - -if __name__ == "__main__": - test_env.run_test_cases() + end;""" + cursor.execute( + statement, retval=return_value, integer_value=5, array=array_var + ) + assert return_value.getvalue() == 116 + array = [f"Unicode - \u3042 {i}" for i in range(15)] + array_var = cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, array) + cursor.execute(statement, integer_value=8, array=array_var) + assert return_value.getvalue() == 208 + + +def test_2105(cursor, module_data): + "2105 - test binding in a unicode array (with setinputsizes)" + return_value = cursor.var(oracledb.NUMBER) + cursor.setinputsizes(array=[oracledb.DB_TYPE_NVARCHAR, 10]) + array = [r[1] for r in module_data] + cursor.execute( + """ + begin + :retval := pkg_TestUnicodeArrays.TestInArrays( + :integer_value, :array); + end; + """, + retval=return_value, + integer_value=6, + array=array, + ) + assert return_value.getvalue() == 117 + + +def test_2106(cursor, module_data): + "2106 - test binding in a unicode array (with arrayvar)" + return_value = cursor.var(oracledb.NUMBER) + array = cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 10, 20) + array.setvalue(0, [r[1] for r in module_data]) + cursor.execute( + """ + begin + :retval := pkg_TestUnicodeArrays.TestInArrays( + :integer_value, :array); + end; + """, + retval=return_value, + integer_value=7, + array=array, + ) + assert return_value.getvalue() == 118 + + +def test_2107(cursor, module_data): + "2107 - test binding in/out a unicode array (with arrayvar)" + array = cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 10, 100) + original_data = [r[1] for r in module_data] + fmt = "Converted element \u3042 # %d originally had length %d" + expected_data = [ + fmt % (i, len(original_data[i - 1])) for i in range(1, 6) + ] + original_data[5:] + array.setvalue(0, original_data) + cursor.execute( + """ + begin + pkg_TestUnicodeArrays.TestInOutArrays(:numElems, :array); + end; + """, + numElems=5, + array=array, + ) + assert array.getvalue() == expected_data + + +def test_2108(cursor): + "2108 - test binding out a unicode array (with arrayvar)" + array = cursor.arrayvar(oracledb.DB_TYPE_NVARCHAR, 6, 100) + fmt = "Test out element \u3042 # %d" + expected_data = [fmt % i for i in range(1, 7)] + cursor.execute( + """ + begin + pkg_TestUnicodeArrays.TestOutArrays(:numElems, :array); + end; + """, + numElems=6, + array=array, + ) + assert array.getvalue() == expected_data + + +def test_2109(cursor): + "2109 - test binding in a null" + cursor.execute( + "select * from TestUnicodes where UnicodeCol = :value", + value=None, + ) + assert cursor.fetchall() == [] + + +def test_2110(cursor): + "2110 - test binding out with set input sizes defined (by type)" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + r""" + begin + :value := unistr('TSI \3042'); + end; + """ + ) + assert bind_vars["value"].getvalue() == "TSI \u3042" + + +def test_2111(cursor): + "2111 - test binding in/out with set input sizes defined (by type)" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + r""" + begin + :value := :value || unistr(' TSI \3042'); + end; + """, + value="InVal \u3041", + ) + assert bind_vars["value"].getvalue() == "InVal \u3041 TSI \u3042" + + +def test_2112(cursor): + "2112 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_NVARCHAR) + cursor.execute( + r""" + begin + :value := unistr('TSI (VAR) \3042'); + end; + """, + value=var, + ) + assert var.getvalue() == "TSI (VAR) \u3042" + + +def test_2113(cursor): + "2113 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_NVARCHAR) + var.setvalue(0, "InVal \u3041") + cursor.execute( + r""" + begin + :value := :value || unistr(' TSI (VAR) \3042'); + end; + """, + value=var, + ) + assert var.getvalue() == "InVal \u3041 TSI (VAR) \u3042" + + +def test_2114(cursor, test_env): + "2114 - test cursor description is accurate" + cursor.execute("select * from TestUnicodes") + varchar_ratio, nvarchar_ratio = test_env.charset_ratios + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "UNICODECOL", + oracledb.DB_TYPE_NVARCHAR, + 20, + 20 * nvarchar_ratio, + None, + None, + False, + ), + ( + "FIXEDUNICODECOL", + oracledb.DB_TYPE_NCHAR, + 40, + 40 * nvarchar_ratio, + None, + None, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_NVARCHAR, + 50, + 50 * nvarchar_ratio, + None, + None, + True, + ), + ] + assert cursor.description == expected_value + + +def test_2115(cursor, module_data): + "2115 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestUnicodes order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_2116(cursor, module_data): + "2116 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestUnicodes order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_2117(cursor, module_data_by_key): + "2117 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestUnicodes + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None diff --git a/tests/test_2200_number_var.py b/tests/test_2200_number_var.py index 0c0438c4..bd15d320 100644 --- a/tests/test_2200_number_var.py +++ b/tests/test_2200_number_var.py @@ -29,550 +29,573 @@ import decimal import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def output_type_handler_binary_int(self, cursor, metadata): - return cursor.var( - oracledb.DB_TYPE_BINARY_INTEGER, arraysize=cursor.arraysize +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + number_col = i + i * 0.25 + float_col = i + i * 0.75 + unconstrained_col = i**3 + i * 0.5 + if i % 2: + nullable_col = 143**i + else: + nullable_col = None + data_tuple = ( + i, + 38**i, + number_col, + float_col, + unconstrained_col, + nullable_col, ) + data.append(data_tuple) + return data - def output_type_handler_decimal(self, cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_NUMBER: - return cursor.var( - str, - 255, - outconverter=decimal.Decimal, - arraysize=cursor.arraysize, - ) - - def output_type_handler_str(self, cursor, metadata): - return cursor.var(str, 255, arraysize=cursor.arraysize) - - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - number_col = i + i * 0.25 - float_col = i + i * 0.75 - unconstrained_col = i**3 + i * 0.5 - if i % 2: - nullable_col = 143**i - else: - nullable_col = None - data_tuple = ( - i, - 38**i, - number_col, - float_col, - unconstrained_col, - nullable_col, - ) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - @test_env.skip_unless_plsql_boolean_supported() - def test_2200(self): - "2200 - test binding in a boolean" - result = self.cursor.callfunc( - "pkg_TestBooleans.GetStringRep", str, [True] - ) - self.assertEqual(result, "TRUE") - - def test_2201(self): - "2201 - test binding in a boolean as a number" - var = self.cursor.var(oracledb.NUMBER) - var.setvalue(0, True) - self.cursor.execute("select :1 from dual", [var]) - (result,) = self.cursor.fetchone() - self.assertEqual(result, 1) - var.setvalue(0, False) - self.cursor.execute("select :1 from dual", [var]) - (result,) = self.cursor.fetchone() - self.assertEqual(result, 0) - - def test_2202(self): - "2202 - test binding in a decimal.Decimal" - self.cursor.execute( - """ - select * - from TestNumbers - where NumberCol - :value1 - :value2 = trunc(NumberCol) - """, - value1=decimal.Decimal("0.20"), - value2=decimal.Decimal("0.05"), - ) - expected_data = [ - self.data_by_key[1], - self.data_by_key[5], - self.data_by_key[9], - ] - self.assertEqual(self.cursor.fetchall(), expected_data) - - def test_2203(self): - "2203 - test binding in a float" - self.cursor.execute( - """ - select * - from TestNumbers - where NumberCol - :value = trunc(NumberCol) - """, - value=0.25, - ) - expected_data = [ - self.data_by_key[1], - self.data_by_key[5], - self.data_by_key[9], - ] - self.assertEqual(self.cursor.fetchall(), expected_data) - - def test_2204(self): - "2204 - test binding in an integer" - self.cursor.execute( - "select * from TestNumbers where IntCol = :value", - value=2, - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[2]]) - - def test_2205(self): - "2205 - test binding in a large long integer as Oracle number" - in_val = 6088343244 - value_var = self.cursor.var(oracledb.NUMBER) - value_var.setvalue(0, in_val) - self.cursor.execute( - """ - begin - :value := :value + 5; - end; - """, - value=value_var, - ) - self.assertEqual(value_var.getvalue(), in_val + 5) - - def test_2206(self): - "2206 - test binding in a large long integer as Python integer" - long_value = -9999999999999999999 - self.cursor.execute("select :value from dual", value=long_value) - (result,) = self.cursor.fetchone() - self.assertEqual(result, long_value) - - def test_2207(self): - "2207 - test binding in an integer after setting input sizes to string" - self.cursor.setinputsizes(value=15) - self.cursor.execute( - "select * from TestNumbers where IntCol = :value", - value=3, - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[3]]) - - def test_2208(self): - "2208 - test binding in a decimal after setting input sizes to number" - cursor = self.conn.cursor() - value = decimal.Decimal("319438950232418390.273596") - cursor.setinputsizes(value=oracledb.NUMBER) - cursor.outputtypehandler = self.output_type_handler_decimal - cursor.execute("select :value from dual", value=value) - (out_value,) = cursor.fetchone() - self.assertEqual(out_value, value) - - def test_2209(self): - "2209 - test binding in a null" - self.cursor.execute( - "select * from TestNumbers where IntCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2210(self): - "2210 - test binding in a number array" - return_value = self.cursor.var(oracledb.NUMBER) - array = [r[2] for r in self.raw_data] - statement = """ - begin - :return_value := pkg_TestNumberArrays.TestInArrays( - :start_value, :array); - end;""" - self.cursor.execute( - statement, return_value=return_value, start_value=5, array=array - ) - self.assertEqual(return_value.getvalue(), 73.75) - array = list(range(15)) - self.cursor.execute(statement, start_value=10, array=array) - self.assertEqual(return_value.getvalue(), 115.0) - - def test_2211(self): - "2211 - test binding in a number array (with setinputsizes)" - return_value = self.cursor.var(oracledb.NUMBER) - self.cursor.setinputsizes(array=[oracledb.NUMBER, 10]) - array = [r[2] for r in self.raw_data] - self.cursor.execute( - """ - begin - :return_value := pkg_TestNumberArrays.TestInArrays( - :start_value, :array); - end; - """, - return_value=return_value, - start_value=6, - array=array, - ) - self.assertEqual(return_value.getvalue(), 74.75) - def test_2212(self): - "2212 - test binding in a number array (with arrayvar)" - return_value = self.cursor.var(oracledb.NUMBER) - array = self.cursor.arrayvar( - oracledb.NUMBER, [r[2] for r in self.raw_data] - ) - self.cursor.execute( - """ - begin - :return_value := pkg_TestNumberArrays.TestInArrays( - :integer_value, :array); - end; - """, - return_value=return_value, - integer_value=7, - array=array, - ) - self.assertEqual(return_value.getvalue(), 75.75) - - def test_2213(self): - "2213 - test binding in a zero length number array (with arrayvar)" - return_value = self.cursor.var(oracledb.NUMBER) - array = self.cursor.arrayvar(oracledb.NUMBER, 0) - self.cursor.execute( - """ - begin - :return_value := pkg_TestNumberArrays.TestInArrays( - :integer_value, :array); - end; - """, - return_value=return_value, - integer_value=8, - array=array, - ) - self.assertEqual(return_value.getvalue(), 8.0) - self.assertEqual(array.getvalue(), []) - - def test_2214(self): - "2214 - test binding in/out a number array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.NUMBER, 10) - original_data = [r[2] for r in self.raw_data] - expected_data = [ - original_data[i - 1] * 10 for i in range(1, 6) - ] + original_data[5:] - array.setvalue(0, original_data) - self.cursor.execute( - """ - begin - pkg_TestNumberArrays.TestInOutArrays(:num_elems, :array); - end; - """, - num_elems=5, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) - - def test_2215(self): - "2215 - test binding out a Number array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.NUMBER, 6) - expected_data = [i * 100 for i in range(1, 7)] - self.cursor.execute( - """ - begin - pkg_TestNumberArrays.TestOutArrays(:num_elems, :array); - end; - """, - num_elems=6, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key - def test_2216(self): - "2216 - test binding out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.NUMBER) - self.cursor.execute( - """ - begin - :value := 5; - end; - """ - ) - self.assertEqual(bind_vars["value"].getvalue(), 5) - def test_2217(self): - "2217 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.NUMBER) - self.cursor.execute( - """ - begin - :value := :value + 5; - end; - """, - value=1.25, - ) - self.assertEqual(bind_vars["value"].getvalue(), 6.25) +def output_type_handler_binary_int(cursor, metadata): + return cursor.var( + oracledb.DB_TYPE_BINARY_INTEGER, arraysize=cursor.arraysize + ) - def test_2218(self): - "2218 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.NUMBER) - self.cursor.execute( - """ - begin - :value := 5; - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), 5) - - def test_2219(self): - "2219 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.NUMBER) - var.setvalue(0, 2.25) - self.cursor.execute( - """ - begin - :value := :value + 5; - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), 7.25) - - def test_2220(self): - "2220 - test cursor description is accurate" - self.cursor.execute("select * from TestNumbers") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, False), - ("NUMBERCOL", oracledb.DB_TYPE_NUMBER, 13, None, 9, 2, False), - ("FLOATCOL", oracledb.DB_TYPE_NUMBER, 127, None, 126, -127, False), - ( - "UNCONSTRAINEDCOL", - oracledb.DB_TYPE_NUMBER, - 127, - None, - 0, - -127, - False, - ), - ("NULLABLECOL", oracledb.DB_TYPE_NUMBER, 39, None, 38, 0, True), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_2221(self): - "2221 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestNumbers order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2222(self): - "2222 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestNumbers order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_2223(self): - "2223 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestNumbers - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - def test_2224(self): - "2224 - test that fetching a long integer returns such in Python" - self.cursor.execute( - """ - select NullableCol - from TestNumbers - where IntCol = 9 - """ - ) - (col,) = self.cursor.fetchone() - self.assertEqual(col, 25004854810776297743) - - def test_2225(self): - "2225 - test fetching a floating point number returns such in Python" - self.cursor.execute("select 1.25 from dual") - (result,) = self.cursor.fetchone() - self.assertEqual(result, 1.25) - - def test_2226(self): - "2226 - test that fetching an integer returns such in Python" - self.cursor.execute("select 148 from dual") - (result,) = self.cursor.fetchone() - self.assertEqual(result, 148) - self.assertIsInstance(result, int, "integer not returned") - - def test_2227(self): - "2227 - test that acceptable boundary numbers are handled properly" - in_values = [ - decimal.Decimal("9.99999999999999e+125"), - decimal.Decimal("-9.99999999999999e+125"), - 0.0, - 1e-130, - -1e-130, - ] - out_values = [ - int("9" * 15 + "0" * 111), - -int("9" * 15 + "0" * 111), - 0, - 1e-130, - -1e-130, - ] - for in_value, out_value in zip(in_values, out_values): - self.cursor.execute("select :1 from dual", [in_value]) - (result,) = self.cursor.fetchone() - self.assertEqual(result, out_value) - - def test_2228(self): - "2228 - test that unacceptable boundary numbers are rejected" - test_values = [ - (1e126, "DPY-4003"), - (-1e126, "DPY-4003"), - (float("inf"), "DPY-4004"), - (float("-inf"), "DPY-4004"), - (float("NaN"), "DPY-4004"), - (decimal.Decimal("1e126"), "DPY-4003"), - (decimal.Decimal("-1e126"), "DPY-4003"), - (decimal.Decimal("inf"), "DPY-4004"), - (decimal.Decimal("-inf"), "DPY-4004"), - (decimal.Decimal("NaN"), "DPY-4004"), - ] - for value, error in test_values: - with self.assertRaisesFullCode(error): - self.cursor.execute("select :1 from dual", [value]) - - def test_2229(self): - "2229 - test that fetching the result of division returns a float" - self.cursor.execute( - """ - select IntCol / 7 - from TestNumbers - where IntCol = 1 - """ - ) - (result,) = self.cursor.fetchone() - self.assertEqual(result, 1.0 / 7.0) - self.assertIsInstance(result, float, "float not returned") - - def test_2230(self): - "2230 - test that string format is returned properly" - var = self.cursor.var(oracledb.NUMBER) - self.assertIs(var.type, oracledb.DB_TYPE_NUMBER) - self.assertEqual( - str(var), "" - ) - var.setvalue(0, 4.5) - self.assertEqual( - str(var), "" - ) - - def test_2231(self): - "2231 - test that binding binary double is possible" - statement = "select :1 from dual" - self.cursor.setinputsizes(oracledb.DB_TYPE_BINARY_DOUBLE) - self.cursor.execute(statement, (5,)) - self.assertEqual( - self.cursor.bindvars[0].type, oracledb.DB_TYPE_BINARY_DOUBLE - ) - (value,) = self.cursor.fetchone() - self.assertEqual(value, 5) - self.cursor.execute(statement, (1.5,)) - self.assertEqual( - self.cursor.bindvars[0].type, oracledb.DB_TYPE_BINARY_DOUBLE +def output_type_handler_decimal(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_NUMBER: + return cursor.var( + str, + 255, + outconverter=decimal.Decimal, + arraysize=cursor.arraysize, ) - (value,) = self.cursor.fetchone() - self.assertEqual(value, 1.5) - self.cursor.execute(statement, [decimal.Decimal("NaN")]) - self.assertEqual( - self.cursor.bindvars[0].type, oracledb.DB_TYPE_BINARY_DOUBLE - ) - (value,) = self.cursor.fetchone() - self.assertEqual(str(value), str(float("NaN"))) - - def test_2232(self): - "2232 - test fetching numbers as binary integers" - self.cursor.outputtypehandler = self.output_type_handler_binary_int - for value in (1, 2**31, 2**63 - 1, -1, -(2**31), -(2**63) + 1): - self.cursor.execute("select :1 from dual", [str(value)]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(value, fetched_value) - - def test_2233(self): - "2233 - test binding native integer as an out bind" - simple_var = self.cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) - self.cursor.execute("begin :value := 2.9; end;", [simple_var]) - self.assertEqual(simple_var.getvalue(), 2) - - simple_var = self.cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) - self.cursor.execute("begin :value := 1.5; end;", [simple_var]) - self.assertEqual(simple_var.getvalue(), 1) - - def test_2234(self): - "2234 - test binding in a native integer" - statement = "begin :value := :value + 2.5; end;" - simple_var = self.cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) - simple_var.setvalue(0, 0) - self.cursor.execute(statement, [simple_var]) - self.assertEqual(simple_var.getvalue(), 2) - - simple_var.setvalue(0, -5) - self.cursor.execute(statement, [simple_var]) - self.assertEqual(simple_var.getvalue(), -2) - - def test_2235(self): - "2235 - test setting decimal value for binary int" - simple_var = self.cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) - simple_var.setvalue(0, 2.5) - self.cursor.execute("begin :value := :value + 2.5; end;", [simple_var]) - self.assertEqual(simple_var.getvalue(), 4) - - def test_2236(self): - "2236 - bind a large value to binary int" - simple_var = self.cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) - self.cursor.execute( - "begin :value := POWER(2, 31) - 1; end;", [simple_var] - ) - self.assertEqual(simple_var.getvalue(), 2**31 - 1) - self.cursor.execute( - "begin :value := POWER(-2, 31) - 1; end;", [simple_var] - ) - self.assertEqual(simple_var.getvalue(), -(2**31) - 1) - - def test_2237(self): - "2237 - fetch a number with oracledb.defaults.fetch_lobs = False" - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute("select 1 from dual") - (result,) = self.cursor.fetchone() - self.assertIsInstance(result, int) - - def test_2238(self): - "2238 - fetch a small constant with a decimal point" - self.cursor.outputtypehandler = self.output_type_handler_str - self.cursor.execute("select 3 / 2 from dual") - (result,) = self.cursor.fetchone() - self.assertEqual(len(result), 3) - self.assertEqual(result[0], "1") - self.assertEqual(result[2], "5") - - -if __name__ == "__main__": - test_env.run_test_cases() +def output_type_handler_str(cursor, metadata): + return cursor.var(str, 255, arraysize=cursor.arraysize) + + +def test_2200(skip_unless_plsql_boolean_supported, cursor): + "2200 - test binding in a boolean" + result = cursor.callfunc("pkg_TestBooleans.GetStringRep", str, [True]) + assert result == "TRUE" + + +def test_2201(cursor): + "2201 - test binding in a boolean as a number" + var = cursor.var(oracledb.NUMBER) + var.setvalue(0, True) + cursor.execute("select :1 from dual", [var]) + (result,) = cursor.fetchone() + assert result == 1 + var.setvalue(0, False) + cursor.execute("select :1 from dual", [var]) + (result,) = cursor.fetchone() + assert result == 0 + + +def test_2202(cursor, module_data_by_key): + "2202 - test binding in a decimal.Decimal" + cursor.execute( + """ + select * + from TestNumbers + where NumberCol - :value1 - :value2 = trunc(NumberCol) + """, + value1=decimal.Decimal("0.20"), + value2=decimal.Decimal("0.05"), + ) + expected_data = [ + module_data_by_key[1], + module_data_by_key[5], + module_data_by_key[9], + ] + assert cursor.fetchall() == expected_data + + +def test_2203(cursor, module_data_by_key): + "2203 - test binding in a float" + cursor.execute( + """ + select * + from TestNumbers + where NumberCol - :value = trunc(NumberCol) + """, + value=0.25, + ) + expected_data = [ + module_data_by_key[1], + module_data_by_key[5], + module_data_by_key[9], + ] + assert cursor.fetchall() == expected_data + + +def test_2204(cursor, module_data_by_key): + "2204 - test binding in an integer" + cursor.execute( + "select * from TestNumbers where IntCol = :value", + value=2, + ) + assert cursor.fetchall() == [module_data_by_key[2]] + + +def test_2205(cursor): + "2205 - test binding in a large long integer as Oracle number" + in_val = 6088343244 + value_var = cursor.var(oracledb.NUMBER) + value_var.setvalue(0, in_val) + cursor.execute( + """ + begin + :value := :value + 5; + end; + """, + value=value_var, + ) + assert value_var.getvalue() == in_val + 5 + + +def test_2206(cursor): + "2206 - test binding in a large long integer as Python integer" + long_value = -9999999999999999999 + cursor.execute("select :value from dual", value=long_value) + (result,) = cursor.fetchone() + assert result == long_value + + +def test_2207(cursor, module_data_by_key): + "2207 - test binding in an integer after setting input sizes to string" + cursor.setinputsizes(value=15) + cursor.execute( + "select * from TestNumbers where IntCol = :value", + value=3, + ) + assert cursor.fetchall() == [module_data_by_key[3]] + + +def test_2208(cursor): + "2208 - test binding in a decimal after setting input sizes to number" + value = decimal.Decimal("319438950232418390.273596") + cursor.setinputsizes(value=oracledb.NUMBER) + cursor.outputtypehandler = output_type_handler_decimal + cursor.execute("select :value from dual", value=value) + (out_value,) = cursor.fetchone() + assert out_value == value + + +def test_2209(cursor): + "2209 - test binding in a null" + cursor.execute( + "select * from TestNumbers where IntCol = :value", + value=None, + ) + assert cursor.fetchall() == [] + + +def test_2210(cursor, module_data): + "2210 - test binding in a number array" + return_value = cursor.var(oracledb.NUMBER) + array = [r[2] for r in module_data] + statement = """ + begin + :return_value := pkg_TestNumberArrays.TestInArrays( + :start_value, :array); + end;""" + cursor.execute( + statement, return_value=return_value, start_value=5, array=array + ) + assert return_value.getvalue() == 73.75 + array = list(range(15)) + cursor.execute(statement, start_value=10, array=array) + assert return_value.getvalue() == 115.0 + + +def test_2211(cursor, module_data): + "2211 - test binding in a number array (with setinputsizes)" + return_value = cursor.var(oracledb.NUMBER) + cursor.setinputsizes(array=[oracledb.NUMBER, 10]) + array = [r[2] for r in module_data] + cursor.execute( + """ + begin + :return_value := pkg_TestNumberArrays.TestInArrays( + :start_value, :array); + end; + """, + return_value=return_value, + start_value=6, + array=array, + ) + assert return_value.getvalue() == 74.75 + + +def test_2212(cursor, module_data): + "2212 - test binding in a number array (with arrayvar)" + return_value = cursor.var(oracledb.NUMBER) + array = cursor.arrayvar(oracledb.NUMBER, [r[2] for r in module_data]) + cursor.execute( + """ + begin + :return_value := pkg_TestNumberArrays.TestInArrays( + :integer_value, :array); + end; + """, + return_value=return_value, + integer_value=7, + array=array, + ) + assert return_value.getvalue() == 75.75 + + +def test_2213(cursor): + "2213 - test binding in a zero length number array (with arrayvar)" + return_value = cursor.var(oracledb.NUMBER) + array = cursor.arrayvar(oracledb.NUMBER, 0) + cursor.execute( + """ + begin + :return_value := pkg_TestNumberArrays.TestInArrays( + :integer_value, :array); + end; + """, + return_value=return_value, + integer_value=8, + array=array, + ) + assert return_value.getvalue() == 8.0 + assert array.getvalue() == [] + + +def test_2214(cursor, module_data): + "2214 - test binding in/out a number array (with arrayvar)" + array = cursor.arrayvar(oracledb.NUMBER, 10) + original_data = [r[2] for r in module_data] + expected_data = [ + original_data[i - 1] * 10 for i in range(1, 6) + ] + original_data[5:] + array.setvalue(0, original_data) + cursor.execute( + """ + begin + pkg_TestNumberArrays.TestInOutArrays(:num_elems, :array); + end; + """, + num_elems=5, + array=array, + ) + assert array.getvalue() == expected_data + + +def test_2215(cursor): + "2215 - test binding out a Number array (with arrayvar)" + array = cursor.arrayvar(oracledb.NUMBER, 6) + expected_data = [i * 100 for i in range(1, 7)] + cursor.execute( + """ + begin + pkg_TestNumberArrays.TestOutArrays(:num_elems, :array); + end; + """, + num_elems=6, + array=array, + ) + assert array.getvalue() == expected_data + + +def test_2216(cursor): + "2216 - test binding out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.NUMBER) + cursor.execute( + """ + begin + :value := 5; + end; + """ + ) + assert bind_vars["value"].getvalue() == 5 + + +def test_2217(cursor): + "2217 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.NUMBER) + cursor.execute( + """ + begin + :value := :value + 5; + end; + """, + value=1.25, + ) + assert bind_vars["value"].getvalue() == 6.25 + + +def test_2218(cursor): + "2218 - test binding out with cursor.var() method" + var = cursor.var(oracledb.NUMBER) + cursor.execute( + """ + begin + :value := 5; + end; + """, + value=var, + ) + assert var.getvalue() == 5 + + +def test_2219(cursor): + "2219 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.NUMBER) + var.setvalue(0, 2.25) + cursor.execute( + """ + begin + :value := :value + 5; + end; + """, + value=var, + ) + assert var.getvalue() == 7.25 + + +def test_2220(cursor): + "2220 - test cursor description is accurate" + cursor.execute("select * from TestNumbers") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, False), + ("NUMBERCOL", oracledb.DB_TYPE_NUMBER, 13, None, 9, 2, False), + ("FLOATCOL", oracledb.DB_TYPE_NUMBER, 127, None, 126, -127, False), + ( + "UNCONSTRAINEDCOL", + oracledb.DB_TYPE_NUMBER, + 127, + None, + 0, + -127, + False, + ), + ("NULLABLECOL", oracledb.DB_TYPE_NUMBER, 39, None, 38, 0, True), + ] + assert cursor.description == expected_value + + +def test_2221(cursor, module_data): + "2221 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestNumbers order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_2222(cursor, module_data): + "2222 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestNumbers order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_2223(cursor, module_data_by_key): + "2223 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestNumbers + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_2224(cursor): + "2224 - test that fetching a long integer returns such in Python" + cursor.execute( + """ + select NullableCol + from TestNumbers + where IntCol = 9 + """ + ) + (col,) = cursor.fetchone() + assert col == 25004854810776297743 + + +def test_2225(cursor): + "2225 - test fetching a floating point number returns such in Python" + cursor.execute("select 1.25 from dual") + (result,) = cursor.fetchone() + assert result == 1.25 + + +def test_2226(cursor): + "2226 - test that fetching an integer returns such in Python" + cursor.execute("select 148 from dual") + (result,) = cursor.fetchone() + assert result == 148 + assert isinstance(result, int) + + +def test_2227(cursor): + "2227 - test that acceptable boundary numbers are handled properly" + in_values = [ + decimal.Decimal("9.99999999999999e+125"), + decimal.Decimal("-9.99999999999999e+125"), + 0.0, + 1e-130, + -1e-130, + ] + out_values = [ + int("9" * 15 + "0" * 111), + -int("9" * 15 + "0" * 111), + 0, + 1e-130, + -1e-130, + ] + for in_value, out_value in zip(in_values, out_values): + cursor.execute("select :1 from dual", [in_value]) + (result,) = cursor.fetchone() + assert result == out_value + + +def test_2228(cursor, test_env): + "2228 - test that unacceptable boundary numbers are rejected" + test_values = [ + (1e126, "DPY-4003"), + (-1e126, "DPY-4003"), + (float("inf"), "DPY-4004"), + (float("-inf"), "DPY-4004"), + (float("NaN"), "DPY-4004"), + (decimal.Decimal("1e126"), "DPY-4003"), + (decimal.Decimal("-1e126"), "DPY-4003"), + (decimal.Decimal("inf"), "DPY-4004"), + (decimal.Decimal("-inf"), "DPY-4004"), + (decimal.Decimal("NaN"), "DPY-4004"), + ] + for value, error in test_values: + with test_env.assert_raises_full_code(error): + cursor.execute("select :1 from dual", [value]) + + +def test_2229(cursor): + "2229 - test that fetching the result of division returns a float" + cursor.execute( + """ + select IntCol / 7 + from TestNumbers + where IntCol = 1 + """ + ) + (result,) = cursor.fetchone() + assert result == 1.0 / 7.0 + assert isinstance(result, float) + + +def test_2230(cursor): + "2230 - test that string format is returned properly" + var = cursor.var(oracledb.NUMBER) + assert var.type is oracledb.DB_TYPE_NUMBER + assert str(var) == "" + var.setvalue(0, 4.5) + assert str(var) == "" + + +def test_2231(cursor): + "2231 - test that binding binary double is possible" + statement = "select :1 from dual" + cursor.setinputsizes(oracledb.DB_TYPE_BINARY_DOUBLE) + cursor.execute(statement, (5,)) + assert cursor.bindvars[0].type == oracledb.DB_TYPE_BINARY_DOUBLE + (value,) = cursor.fetchone() + assert value == 5 + + cursor.execute(statement, (1.5,)) + assert cursor.bindvars[0].type == oracledb.DB_TYPE_BINARY_DOUBLE + (value,) = cursor.fetchone() + assert value == 1.5 + + cursor.execute(statement, [decimal.Decimal("NaN")]) + assert cursor.bindvars[0].type == oracledb.DB_TYPE_BINARY_DOUBLE + (value,) = cursor.fetchone() + assert str(value) == str(float("NaN")) + + +def test_2232(cursor): + "2232 - test fetching numbers as binary integers" + cursor.outputtypehandler = output_type_handler_binary_int + for value in (1, 2**31, 2**63 - 1, -1, -(2**31), -(2**63) + 1): + cursor.execute("select :1 from dual", [str(value)]) + (fetched_value,) = cursor.fetchone() + assert value == fetched_value + + +def test_2233(cursor): + "2233 - test binding native integer as an out bind" + simple_var = cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) + cursor.execute("begin :value := 2.9; end;", [simple_var]) + assert simple_var.getvalue() == 2 + + simple_var = cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) + cursor.execute("begin :value := 1.5; end;", [simple_var]) + assert simple_var.getvalue() == 1 + + +def test_2234(cursor): + "2234 - test binding in a native integer" + statement = "begin :value := :value + 2.5; end;" + simple_var = cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) + simple_var.setvalue(0, 0) + cursor.execute(statement, [simple_var]) + assert simple_var.getvalue() == 2 + + simple_var.setvalue(0, -5) + cursor.execute(statement, [simple_var]) + assert simple_var.getvalue() == -2 + + +def test_2235(cursor): + "2235 - test setting decimal value for binary int" + simple_var = cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) + simple_var.setvalue(0, 2.5) + cursor.execute("begin :value := :value + 2.5; end;", [simple_var]) + assert simple_var.getvalue() == 4 + + +def test_2236(cursor): + "2236 - bind a large value to binary int" + simple_var = cursor.var(oracledb.DB_TYPE_BINARY_INTEGER) + cursor.execute("begin :value := POWER(2, 31) - 1; end;", [simple_var]) + assert simple_var.getvalue() == 2**31 - 1 + + cursor.execute("begin :value := POWER(-2, 31) - 1; end;", [simple_var]) + assert simple_var.getvalue() == -(2**31) - 1 + + +def test_2237(cursor, disable_fetch_lobs): + "2237 - fetch a number with oracledb.defaults.fetch_lobs = False" + cursor.execute("select 1 from dual") + (result,) = cursor.fetchone() + assert isinstance(result, int) + + +def test_2238(cursor): + "2238 - fetch a small constant with a decimal point" + cursor.outputtypehandler = output_type_handler_str + cursor.execute("select 3 / 2 from dual") + (result,) = cursor.fetchone() + assert len(result) == 3 + assert result[0] == "1" + assert result[2] == "5" diff --git a/tests/test_2300_object_var.py b/tests/test_2300_object_var.py index da7e6d69..0e736b44 100644 --- a/tests/test_2300_object_var.py +++ b/tests/test_2300_object_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -28,950 +28,968 @@ import datetime import decimal +import re import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def __test_data( - self, expected_int_value, expected_obj_value, expected_array_value - ): - int_value, object_value, array_value = self.cursor.fetchone() - if object_value is not None: - self.assertIsInstance(object_value.INTVALUE, int) - self.assertIsInstance(object_value.SMALLINTVALUE, int) - self.assertIsInstance(object_value.FLOATVALUE, float) - if object_value is not None: - object_value = self.get_db_object_as_plain_object(object_value) - if array_value is not None: - array_value = array_value.aslist() - self.assertEqual(int_value, expected_int_value) - self.assertEqual(object_value, expected_obj_value) - self.assertEqual(array_value, expected_array_value) - - def test_2300(self): - "2300 - test binding a null value (IN)" - var = self.cursor.var(oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT") - result = self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [var] - ) - self.assertEqual(result, "null") - - def test_2301(self): - "2301 - test binding an object (IN)" - type_obj = self.conn.gettype("UDT_OBJECT") - obj = type_obj.newobject() - obj.NUMBERVALUE = 13 - obj.STRINGVALUE = "Test String" - result = self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - exp = "udt_Object(13, 'Test String', null, null, null, null, null)" - self.assertEqual(result, exp) - obj.NUMBERVALUE = None - obj.STRINGVALUE = "Test With Dates" - obj.DATEVALUE = datetime.datetime(2016, 2, 10) - obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 10, 14, 13, 50) - result = self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - expected_value = ( - "udt_Object(null, 'Test With Dates', null, " - "to_date('2016-02-10', 'YYYY-MM-DD'), " - "to_timestamp('2016-02-10 14:13:50', " - "'YYYY-MM-DD HH24:MI:SS'), " - "null, null)" - ) - self.assertEqual(result, expected_value) - obj.DATEVALUE = None - obj.TIMESTAMPVALUE = None - sub_type_obj = self.conn.gettype("UDT_SUBOBJECT") - sub_obj = sub_type_obj.newobject() - sub_obj.SUBNUMBERVALUE = decimal.Decimal("18.25") - sub_obj.SUBSTRINGVALUE = "Sub String" - obj.SUBOBJECTVALUE = sub_obj - result = self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - expected_value = ( - "udt_Object(null, 'Test With Dates', null, null, " - "null, udt_SubObject(18.25, 'Sub String'), null)" - ) - self.assertEqual(result, expected_value) - - def test_2302(self): - "2302 - test copying an object" - type_obj = self.conn.gettype("UDT_OBJECT") - obj = type_obj() - obj.NUMBERVALUE = 5124 - obj.STRINGVALUE = "A test string" - obj.DATEVALUE = datetime.datetime(2016, 2, 24) - obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 24, 13, 39, 10) - copied_obj = obj.copy() - self.assertEqual(obj.NUMBERVALUE, copied_obj.NUMBERVALUE) - self.assertEqual(obj.STRINGVALUE, copied_obj.STRINGVALUE) - self.assertEqual(obj.DATEVALUE, copied_obj.DATEVALUE) - self.assertEqual(obj.TIMESTAMPVALUE, copied_obj.TIMESTAMPVALUE) - - def test_2303(self): - "2303 - test getting an empty collection as a list" - type_obj = self.conn.gettype("UDT_ARRAY") - obj = type_obj.newobject() - self.assertEqual(obj.aslist(), []) - - def test_2304(self): - "2304 - test fetching objects" - self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - order by IntCol - """ - ) - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "OBJECTCOL", - oracledb.DB_TYPE_OBJECT, - None, - None, - None, - None, - True, - ), - ( - "ARRAYCOL", - oracledb.DB_TYPE_OBJECT, - None, - None, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - expected_value = ( - 1, - "First row", - "First ", - "N First Row", - "N First ", - b"Raw Data 1", - 2, - 5, - 12.125, - 0.5, - 12.5, - 25.25, - 50.125, - datetime.datetime(2007, 3, 6, 0, 0, 0), - datetime.datetime(2008, 9, 12, 16, 40), - datetime.datetime(2009, 10, 13, 17, 50), - oracledb.Timestamp(2010, 11, 14, 18, 55), - "Short CLOB value", - "Short NCLOB Value", - b"Short BLOB value", - (11, "Sub object 1"), - [(5, "first element"), (6, "second element")], - ) - self.__test_data(1, expected_value, [5, 10, None, 20]) - self.__test_data(2, None, [3, None, 9, 12, 15]) - expected_value = ( - 3, - "Third row", - "Third ", - "N Third Row", - "N Third ", - b"Raw Data 3", - 4, - 10, - 6.5, - 0.75, - 43.25, - 86.5, - 192.125, - datetime.datetime(2007, 6, 21, 0, 0, 0), - datetime.datetime(2007, 12, 13, 7, 30, 45), - datetime.datetime(2017, 6, 21, 23, 18, 45), - oracledb.Timestamp(2017, 7, 21, 8, 27, 13), - "Another short CLOB value", - "Another short NCLOB Value", - b"Yet another short BLOB value", - (13, "Sub object 3"), - [ - (10, "element #1"), - (20, "element #2"), - (30, "element #3"), - (40, "element #4"), - ], - ) - self.__test_data(3, expected_value, None) - - def test_2305(self): - "2305 - test getting object type" - type_obj = self.conn.gettype("UDT_OBJECT") - self.assertFalse(type_obj.iscollection) - self.assertEqual(type_obj.schema, self.conn.username.upper()) - self.assertEqual(type_obj.name, "UDT_OBJECT") - sub_object_value_type = self.conn.gettype("UDT_SUBOBJECT") - sub_object_array_type = self.conn.gettype("UDT_OBJECTARRAY") - expected_metadata = [ - ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), - ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), - ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), - ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), - ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), - ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), - ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), - ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ( - "BINARYFLOATVALUE", - oracledb.DB_TYPE_BINARY_FLOAT, - None, - None, - None, - ), - ( - "BINARYDOUBLEVALUE", - oracledb.DB_TYPE_BINARY_DOUBLE, - None, - None, - None, - ), - ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), - ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), - ( - "TIMESTAMPTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_TZ, - None, - None, - None, - ), - ( - "TIMESTAMPLTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - None, - None, - None, - ), - ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), - ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), - ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), - ("SUBOBJECTVALUE", sub_object_value_type, None, None, None), - ("SUBOBJECTARRAY", sub_object_array_type, None, None, None), - ] - actual_metadata = [ - (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) - for attr in type_obj.attributes - ] - self.assertEqual(actual_metadata, expected_metadata) - self.assertEqual(sub_object_array_type.iscollection, True) - self.assertEqual(sub_object_array_type.attributes, []) - - def test_2306(self): - "2306 - test object type data" - self.cursor.execute( - """ - select ObjectCol - from TestObjects - where ObjectCol is not null - and rownum <= 1 - """ - ) - (obj,) = self.cursor.fetchone() - self.assertEqual(obj.type.schema, self.conn.username.upper()) - self.assertEqual(obj.type.name, "UDT_OBJECT") - self.assertEqual(obj.type.attributes[0].name, "NUMBERVALUE") - - def test_2307(self): - "2307 - test inserting and then querying object with all data types" - self.cursor.execute("delete from TestClobs") - self.cursor.execute("delete from TestNClobs") - self.cursor.execute("delete from TestBlobs") - self.cursor.execute("delete from TestObjects where IntCol > 3") - self.cursor.execute( - """ - insert into TestClobs (IntCol, ClobCol) - values (1, 'A short CLOB') - """ - ) - self.cursor.execute( - """ - insert into TestNClobs (IntCol, NClobCol) - values (1, 'A short NCLOB') - """ - ) - self.cursor.execute( - """ - insert into TestBlobs (IntCol, BlobCol) - values (1, utl_raw.cast_to_raw('A short BLOB')) - """ - ) - self.conn.commit() - self.cursor.execute("select CLOBCol from TestClobs") - (clob,) = self.cursor.fetchone() - self.cursor.execute("select NCLOBCol from TestNClobs") - (nclob,) = self.cursor.fetchone() - self.cursor.execute("select BLOBCol from TestBlobs") - (blob,) = self.cursor.fetchone() - type_obj = self.conn.gettype("UDT_OBJECT") - obj = type_obj.newobject() - obj.NUMBERVALUE = 5 - obj.STRINGVALUE = "A string" - obj.FIXEDCHARVALUE = "Fixed str" - obj.NSTRINGVALUE = "A NCHAR string" - obj.NFIXEDCHARVALUE = "Fixed N" - obj.RAWVALUE = b"Raw Value" - obj.INTVALUE = 27 - obj.SMALLINTVALUE = 13 - obj.REALVALUE = 184.875 - obj.DOUBLEPRECISIONVALUE = 1.375 - obj.FLOATVALUE = 23.0 - obj.DATEVALUE = datetime.date(2017, 5, 9) - obj.TIMESTAMPVALUE = datetime.datetime(2017, 5, 9, 9, 41, 13) - obj.TIMESTAMPTZVALUE = datetime.datetime(1986, 8, 2, 15, 27, 38) - obj.TIMESTAMPLTZVALUE = datetime.datetime(1999, 11, 12, 23, 5, 2) - obj.BINARYFLOATVALUE = 14.25 - obj.BINARYDOUBLEVALUE = 29.1625 - obj.CLOBVALUE = clob - obj.NCLOBVALUE = nclob - obj.BLOBVALUE = blob - sub_type_obj = self.conn.gettype("UDT_SUBOBJECT") - sub_obj = sub_type_obj.newobject() - sub_obj.SUBNUMBERVALUE = 23 - sub_obj.SUBSTRINGVALUE = "Substring value" - obj.SUBOBJECTVALUE = sub_obj - self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (4, :obj) - """, - obj=obj, - ) - self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - where IntCol = 4 - """ - ) - expected_value = ( - 5, - "A string", - "Fixed str ", - "A NCHAR string", - "Fixed N ", - b"Raw Value", - 27, - 13, - 184.875, - 1.375, - 23.0, - 14.25, - 29.1625, - datetime.datetime(2017, 5, 9, 0, 0, 0), - datetime.datetime(2017, 5, 9, 9, 41, 13), - datetime.datetime(1986, 8, 2, 15, 27, 38), - oracledb.Timestamp(1999, 11, 12, 23, 5, 2), - "A short CLOB", - "A short NCLOB", - b"A short BLOB", - (23, "Substring value"), +import pytest + + +def _test_data( + cursor, + test_env, + expected_int_value, + expected_obj_value, + expected_array_value, +): + int_value, object_value, array_value = cursor.fetchone() + if object_value is not None: + assert isinstance(object_value.INTVALUE, int) + assert isinstance(object_value.SMALLINTVALUE, int) + assert isinstance(object_value.FLOATVALUE, float) + if object_value is not None: + object_value = test_env.get_db_object_as_plain_object(object_value) + if array_value is not None: + array_value = array_value.aslist() + assert int_value == expected_int_value + assert object_value == expected_obj_value + assert array_value == expected_array_value + + +def test_2300(cursor): + "2300 - test binding a null value (IN)" + var = cursor.var(oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT") + result = cursor.callfunc("pkg_TestBindObject.GetStringRep", str, [var]) + assert result == "null" + + +def test_2301(conn, cursor): + "2301 - test binding an object (IN)" + type_obj = conn.gettype("UDT_OBJECT") + obj = type_obj.newobject() + obj.NUMBERVALUE = 13 + obj.STRINGVALUE = "Test String" + result = cursor.callfunc("pkg_TestBindObject.GetStringRep", str, [obj]) + exp = "udt_Object(13, 'Test String', null, null, null, null, null)" + assert result == exp + obj.NUMBERVALUE = None + obj.STRINGVALUE = "Test With Dates" + obj.DATEVALUE = datetime.datetime(2016, 2, 10) + obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 10, 14, 13, 50) + result = cursor.callfunc("pkg_TestBindObject.GetStringRep", str, [obj]) + expected_value = ( + "udt_Object(null, 'Test With Dates', null, " + "to_date('2016-02-10', 'YYYY-MM-DD'), " + "to_timestamp('2016-02-10 14:13:50', " + "'YYYY-MM-DD HH24:MI:SS'), " + "null, null)" + ) + assert result == expected_value + obj.DATEVALUE = None + obj.TIMESTAMPVALUE = None + sub_type_obj = conn.gettype("UDT_SUBOBJECT") + sub_obj = sub_type_obj.newobject() + sub_obj.SUBNUMBERVALUE = decimal.Decimal("18.25") + sub_obj.SUBSTRINGVALUE = "Sub String" + obj.SUBOBJECTVALUE = sub_obj + result = cursor.callfunc("pkg_TestBindObject.GetStringRep", str, [obj]) + expected_value = ( + "udt_Object(null, 'Test With Dates', null, null, " + "null, udt_SubObject(18.25, 'Sub String'), null)" + ) + assert result == expected_value + + +def test_2302(conn): + "2302 - test copying an object" + type_obj = conn.gettype("UDT_OBJECT") + obj = type_obj() + obj.NUMBERVALUE = 5124 + obj.STRINGVALUE = "A test string" + obj.DATEVALUE = datetime.datetime(2016, 2, 24) + obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 24, 13, 39, 10) + copied_obj = obj.copy() + assert obj.NUMBERVALUE == copied_obj.NUMBERVALUE + assert obj.STRINGVALUE == copied_obj.STRINGVALUE + assert obj.DATEVALUE == copied_obj.DATEVALUE + assert obj.TIMESTAMPVALUE == copied_obj.TIMESTAMPVALUE + + +def test_2303(conn): + "2303 - test getting an empty collection as a list" + type_obj = conn.gettype("UDT_ARRAY") + obj = type_obj.newobject() + assert obj.aslist() == [] + + +def test_2304(cursor, test_env): + "2304 - test fetching objects" + cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + order by IntCol + """ + ) + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "OBJECTCOL", + oracledb.DB_TYPE_OBJECT, None, - ) - self.__test_data(4, expected_value, None) - obj.CLOBVALUE = "A short CLOB (modified)" - obj.NCLOBVALUE = "A short NCLOB (modified)" - obj.BLOBVALUE = "A short BLOB (modified)" - self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (5, :obj) - """, - obj=obj, - ) - self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - where IntCol = 5 - """ - ) - expected_value = ( - 5, - "A string", - "Fixed str ", - "A NCHAR string", - "Fixed N ", - b"Raw Value", - 27, - 13, - 184.875, - 1.375, - 23.0, - 14.25, - 29.1625, - datetime.datetime(2017, 5, 9, 0, 0, 0), - datetime.datetime(2017, 5, 9, 9, 41, 13), - datetime.datetime(1986, 8, 2, 15, 27, 38), - oracledb.Timestamp(1999, 11, 12, 23, 5, 2), - "A short CLOB (modified)", - "A short NCLOB (modified)", - b"A short BLOB (modified)", - (23, "Substring value"), None, - ) - self.__test_data(5, expected_value, None) - self.conn.rollback() - - def test_2308(self): - "2308 - test trying to find an object type that does not exist" - self.assertRaises(TypeError, self.conn.gettype, 2) - with self.assertRaisesFullCode("DPY-2035"): - self.conn.gettype("A TYPE THAT DOES NOT EXIST") - - def test_2309(self): - "2309 - test appending an object of the wrong type to a collection" - collection_obj_type = self.conn.gettype("UDT_OBJECTARRAY") - collection_obj = collection_obj_type.newobject() - array_obj_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_obj_type.newobject() - with self.assertRaisesFullCode("DPY-2008"): - collection_obj.append(array_obj) - - def test_2310(self): - "2310 - test that referencing a sub object affects the parent object" - obj_type = self.conn.gettype("UDT_OBJECT") - sub_obj_type = self.conn.gettype("UDT_SUBOBJECT") - obj = obj_type.newobject() - obj.SUBOBJECTVALUE = sub_obj_type.newobject() - obj.SUBOBJECTVALUE.SUBNUMBERVALUE = 5 - obj.SUBOBJECTVALUE.SUBSTRINGVALUE = "Substring" - self.assertEqual(obj.SUBOBJECTVALUE.SUBNUMBERVALUE, 5) - self.assertEqual(obj.SUBOBJECTVALUE.SUBSTRINGVALUE, "Substring") - - def test_2311(self): - "2311 - test accessing sub object after parent object destroyed" - obj_type = self.conn.gettype("UDT_OBJECT") - sub_obj_type = self.conn.gettype("UDT_SUBOBJECT") - array_type = self.conn.gettype("UDT_OBJECTARRAY") - sub_obj1 = sub_obj_type.newobject() - sub_obj1.SUBNUMBERVALUE = 2 - sub_obj1.SUBSTRINGVALUE = "AB" - sub_obj2 = sub_obj_type.newobject() - sub_obj2.SUBNUMBERVALUE = 3 - sub_obj2.SUBSTRINGVALUE = "CDE" - obj = obj_type.newobject() - obj.SUBOBJECTARRAY = array_type.newobject([sub_obj1, sub_obj2]) - sub_obj_array = obj.SUBOBJECTARRAY - del obj - self.assertEqual( - self.get_db_object_as_plain_object(sub_obj_array), - [(2, "AB"), (3, "CDE")], - ) - - def test_2312(self): - "2312 - test assigning an object of wrong type to an object attribute" - obj_type = self.conn.gettype("UDT_OBJECT") - obj = obj_type.newobject() - wrong_obj_type = self.conn.gettype("UDT_OBJECTARRAY") - wrong_obj = wrong_obj_type.newobject() - with self.assertRaisesFullCode("DPY-2008"): - setattr(obj, "SUBOBJECTVALUE", wrong_obj) - - def test_2313(self): - "2313 - test setting value of object variable to wrong object type" - wrong_obj_type = self.conn.gettype("UDT_OBJECTARRAY") - wrong_obj = wrong_obj_type.newobject() - var = self.cursor.var(oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT") - with self.assertRaisesFullCode("DPY-2008"): - var.setvalue(0, wrong_obj) - - def test_2314(self): - "2314 - test object string format" - obj_type = self.conn.gettype("UDT_OBJECT") - user = test_env.get_main_user() - self.assertEqual( - str(obj_type), f"" - ) - self.assertEqual( - str(obj_type.attributes[0]), "" - ) - - def test_2315(self): - "2315 - test Trim number of elements from collection" - sub_obj_type = self.conn.gettype("UDT_SUBOBJECT") - array_type = self.conn.gettype("UDT_OBJECTARRAY") - data = [(1, "AB"), (2, "CDE"), (3, "FGH"), (4, "IJK"), (5, "LMN")] - array_obj = array_type() - for num_val, str_val in data: - subObj = sub_obj_type() - subObj.SUBNUMBERVALUE = num_val - subObj.SUBSTRINGVALUE = str_val - array_obj.append(subObj) - self.assertEqual(self.get_db_object_as_plain_object(array_obj), data) - array_obj.trim(2) - self.assertEqual( - self.get_db_object_as_plain_object(array_obj), data[:3] - ) - array_obj.trim(1) - self.assertEqual( - self.get_db_object_as_plain_object(array_obj), data[:2] - ) - array_obj.trim(0) - self.assertEqual( - self.get_db_object_as_plain_object(array_obj), data[:2] - ) - array_obj.trim(2) - self.assertEqual(self.get_db_object_as_plain_object(array_obj), []) - - def test_2316(self): - "2316 - test the metadata of a SQL type" - user = test_env.get_main_user() - typ = self.conn.gettype("UDT_OBJECTARRAY") - self.assertEqual(typ.schema, user.upper()) - self.assertEqual(typ.name, "UDT_OBJECTARRAY") - self.assertIsNone(typ.package_name) - self.assertEqual(typ.element_type.schema, user.upper()) - self.assertEqual(typ.element_type.name, "UDT_SUBOBJECT") - self.assertIsNone(typ.element_type.package_name) - self.assertEqual(typ.attributes, []) - self.assertTrue(typ.iscollection) - - def test_2317(self): - "2317 - test the metadata of a PL/SQL type" - user = test_env.get_main_user() - typ = self.conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") - self.assertEqual(typ.schema, user.upper()) - self.assertEqual(typ.name, "UDT_STRINGLIST") - self.assertEqual(typ.package_name, "PKG_TESTSTRINGARRAYS") - self.assertEqual(typ.element_type, oracledb.DB_TYPE_VARCHAR) - self.assertEqual(typ.attributes, []) - self.assertTrue(typ.iscollection) - - def test_2318(self): - "2318 - test creating an object variable without a type name" - with self.assertRaisesFullCode("DPY-2037"): - self.cursor.var(oracledb.DB_TYPE_OBJECT) - - def test_2319(self): - "2319 - test getting an empty collection as a dictionary" - type_obj = self.conn.gettype("UDT_ARRAY") - obj = type_obj.newobject() - self.assertEqual(obj.asdict(), {}) - - def test_2320(self): - "2320 - test if an element exists in a collection" - array_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_type.newobject() - self.assertFalse(array_obj.exists(0)) - array_obj.append(40) - self.assertTrue(array_obj.exists(0)) - self.assertFalse(array_obj.exists(1)) - - def test_2321(self): - "2321 - test first and last methods" - array_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_type.newobject() - self.assertIsNone(array_obj.first()) - self.assertIsNone(array_obj.last()) - for i in range(7): - array_obj.append(i) - self.assertEqual(array_obj.first(), 0) - self.assertEqual(array_obj.last(), 6) - - def test_2322(self): - "2322 - test getting the size of a collections" - array_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_type.newobject() - self.assertEqual(array_obj.size(), 0) - for i in range(5): - array_obj.append(i) - self.assertEqual(array_obj.size(), 5) - - def test_2323(self): - "2323 - test prev and next methods" - array_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_type.newobject() - self.assertIsNone(array_obj.prev(0)) - self.assertIsNone(array_obj.next(0)) - for i in range(2): - array_obj.append(i) - self.assertIsNone(array_obj.prev(0)) - self.assertEqual(array_obj.prev(1), 0) - self.assertEqual(array_obj.next(0), 1) - self.assertIsNone(array_obj.next(1)) - - def test_2324(self): - "2324 - test setting and getting elements from a collection" - array_type = self.conn.gettype("UDT_ARRAY") - array_obj = array_type.newobject() - with self.assertRaisesFullCode("DPY-2038"): - array_obj.getelement(0) - with self.assertRaisesFullCode("DPY-2039"): - array_obj.setelement(0, 7) - array_obj.append(7) - self.assertEqual(array_obj.getelement(0), 7) - array_obj.setelement(0, 10) - self.assertEqual(array_obj.getelement(0), 10) - with self.assertRaisesFullCode("DPY-2039"): - array_obj.setelement(3, 4) - - def test_2325(self): - "2325 - test appending too many elements to a collection" - array_type = self.conn.gettype("UDT_ARRAY") - numbers = [i for i in range(11)] - with self.assertRaisesFullCode("DPY-2039"): - array_type.newobject(numbers) - - array_obj = array_type.newobject() - with self.assertRaisesFullCode("DPY-2039"): - array_obj.extend(numbers) - - array_obj = array_type.newobject() - for elem in numbers[:10]: - array_obj.append(elem) - with self.assertRaisesFullCode("DPY-2039"): - array_obj.append(numbers[10]) - - def test_2326(self): - "2326 - test appending elements to an unconstrained table" - data = [1, 3, 6, 10, 15, 21] - typ = self.conn.gettype("UDT_UNCONSTRAINEDTABLE") - obj = typ.newobject(data) - self.cursor.execute("select :1 from dual", [obj]) - (output_obj,) = self.cursor.fetchone() - self.assertEqual(output_obj.aslist(), data) - - def test_2327(self): - "2327 - test collection with thousands of entries" - typ = self.conn.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST") - obj = typ.newobject() - obj.setelement(1, 1) - running_total = 1 - for i in range(1, 35000): - running_total += i + 1 - obj.append(running_total) - result = self.cursor.callfunc( - "pkg_TestNumberArrays.TestInArrays", int, (2327, obj) - ) - self.assertEqual(result, 7146445847327) - - @test_env.skip_unless_thick_mode() - def test_2328(self): - "2328 - test object with unknown type in one of its attributes" - typ = self.conn.gettype("UDT_OBJECTWITHXMLTYPE") - self.assertEqual(typ.attributes[1].type, oracledb.DB_TYPE_UNKNOWN) - - @test_env.skip_unless_thick_mode() - def test_2329(self): - "2329 - test object with unknown type as the element type" - typ = self.conn.gettype("UDT_XMLTYPEARRAY") - self.assertEqual(typ.element_type, oracledb.DB_TYPE_UNKNOWN) - - def test_2330(self): - "2330 - test DB Object repr()" - typ = self.conn.gettype("UDT_ARRAY") - obj = typ.newobject() - fqn = f"{typ.schema}.{typ.name}" - expected_str = f"^$" - self.assertRegex(repr(obj), expected_str) - - # object of a package - typ = self.conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") - obj = typ.newobject() - fqn = f"{typ.schema}.{typ.package_name}.{typ.name}" - expected_str = f"^$" - self.assertRegex(repr(obj), expected_str) - - def test_2331(self): - "2331 - test creating an object with invalid data type" - type_obj = self.conn.gettype("UDT_ARRAY") - with self.assertRaisesFullCode("DPY-3013"): - type_obj.newobject([490, "not a number"]) - with self.assertRaisesFullCode("DPY-3013"): - type_obj([71, "not a number"]) - - def test_2332(self): - "2332 - test getting an invalid attribute name from an object" - typ = self.conn.gettype("UDT_OBJECT") - obj = typ.newobject() - with self.assertRaises(AttributeError): - obj.MISSING - - def test_2333(self): - "2333 - test validating a string attribute" - typ = self.conn.gettype("UDT_OBJECT") - obj = typ.newobject() - for attr_name, max_size in [ - ("STRINGVALUE", 60), - ("FIXEDCHARVALUE", 10), - ("NSTRINGVALUE", 120), - ("NFIXEDCHARVALUE", 20), - ("RAWVALUE", 16), - ]: - with self.subTest(attr_name=attr_name, max_size=max_size): - value = "A" * max_size - setattr(obj, attr_name, value) - value += "X" - with self.assertRaisesFullCode("DPY-2043"): - setattr(obj, attr_name, value) - - def test_2334(self): - "2334 - test validating a string element value" - typ = self.conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") - obj = typ.newobject() - obj.append("A" * 100) - with self.assertRaisesFullCode("DPY-2044"): - obj.append("A" * 101) - obj.append("B" * 100) - with self.assertRaisesFullCode("DPY-2044"): - obj.setelement(2, "C" * 101) - - def test_2335(self): - "2335 - test validating a string attribute with null value" - typ = self.conn.gettype("UDT_OBJECT") - obj = typ.newobject() - obj.STRINGVALUE = None - - def test_2336(self): - "2336 - test initializing (with a sequence) a non collection obj" - obj_type = self.conn.gettype("UDT_OBJECT") - with self.assertRaisesFullCode("DPY-2036"): - obj_type.newobject([1, 2]) - with self.assertRaisesFullCode("DPY-2036"): - obj_type([3, 4]) - - def test_2337(self): - "2337 - test %ROWTYPE with all types" - sub_obj_type = self.conn.gettype("UDT_SUBOBJECT") - sub_arr_type = self.conn.gettype("UDT_OBJECTARRAY") - expected_metadata = [ - ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), - ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), - ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), - ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), - ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), - ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), - ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), - ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), - ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ( - "BINARYFLOATVALUE", - oracledb.DB_TYPE_BINARY_FLOAT, - None, - None, - None, - ), - ( - "BINARYDOUBLEVALUE", - oracledb.DB_TYPE_BINARY_DOUBLE, - None, - None, - None, - ), - ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), - ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), - ( - "TIMESTAMPTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_TZ, - None, - None, - None, - ), - ( - "TIMESTAMPLTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - None, - None, - None, - ), - ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), - ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), - ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), - ("SUBOBJECTVALUE", sub_obj_type, None, None, None), - ("SUBOBJECTARRAY", sub_arr_type, None, None, None), - ] - obj_type = self.conn.gettype("TESTALLTYPES%ROWTYPE") - actual_metadata = [ - (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) - for attr in obj_type.attributes - ] - self.assertEqual(actual_metadata, expected_metadata) - - def test_2338(self): - "2338 - test collection iteration" - self.cursor.execute("select udt_array(5, 10, 15) from dual") - (obj,) = self.cursor.fetchone() - result = [i for i in obj] - self.assertEqual(result, [5, 10, 15]) - - @test_env.skip_unless_thin_mode() - def test_2339(self): - "2339 - test fetching an object containing an XmlType instance" - num_val = 2339 - xml_val = "test_2339" - str_val = "A string for test 2339" - self.cursor.execute( - f""" - select udt_ObjectWithXmlType({num_val}, sys.xmltype('{xml_val}'), - '{str_val}') from dual - """ - ) - (obj,) = self.cursor.fetchone() - self.assertEqual(obj.NUMBERVALUE, num_val) - self.assertEqual(obj.XMLVALUE, xml_val) - self.assertEqual(obj.STRINGVALUE, str_val) - - def test_2340(self): - "2340 - test DbObject instances are retained across getvalue() calls" - typ = self.conn.gettype("UDT_OBJECT") - obj = typ.newobject() - var = self.cursor.var(typ) - var.setvalue(0, obj) - self.assertIs(var.getvalue(), obj) - - def test_2341(self): - "2341 - test insufficient privileges for gettype()" - user = test_env.get_proxy_user() - password = test_env.get_proxy_password() - main_user = test_env.get_main_user().upper() - conn = test_env.get_connection(user=user, password=password) - with self.assertRaisesFullCode("DPY-2035"): - conn.gettype(f"{main_user}.UDT_OBJECTARRAY") - - def test_2342(self): - "2342 - test nested records" - options = [(None, None), (1, None), (None, 2), (1, 2)] - typ = self.conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") - for option in options: - with self.subTest(option=option): - value1, value2 = option - obj = self.cursor.callfunc( - "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) - ) - self.assertIsNotNone(obj.INNER1) - self.assertIsNone(obj.INNER1.ATTR1) - self.assertEqual(obj.INNER1.ATTR2, value1) - self.assertIsNotNone(obj.INNER2) - self.assertIsNone(obj.INNER2.ATTR1) - self.assertEqual(obj.INNER2.ATTR2, value2) - - def test_2343(self): - "2343 - test varray of numbers" - obj_type = self.conn.gettype("UDT_VARRAYOFNUMBER") - obj = self.cursor.callfunc( - "pkg_NestedTable.GetVarrayOfNumber", obj_type - ) - self.assertEqual(obj.aslist(), [10, 20, 30]) - - def test_2344(self): - "2344 - test table of numbers" - obj_type = self.conn.gettype("UDT_TABLEOFNUMBER") - obj = self.cursor.callfunc( - "pkg_NestedTable.GetTableOfNumber", obj_type - ) - self.assertEqual(obj.aslist(), [15, 25, 35, 45]) - - def test_2345(self): - "2345 - test table of varray of numbers" - obj_type = self.conn.gettype("UDT_TABLEOFVARRAYOFNUMBER") - obj = self.cursor.callfunc( - "pkg_NestedTable.GetTableOfVarrayOfNumber", obj_type - ) - plain_obj = self.get_db_object_as_plain_object(obj) - self.assertEqual(plain_obj, [[10, 20], [30, 40]]) - - def test_2346(self): - "2346 - test nested table of nested tables" - num_tab_type = self.conn.gettype("UDT_TABLEOFNUMBER") - tab_num_tab_type = self.conn.gettype("UDT_TABLEOFTABLEOFNUMBER") - - num_tab_1 = num_tab_type.newobject([1, 2]) - num_tab_2 = num_tab_type.newobject([3, 4, 5]) - num_tab_3 = num_tab_type.newobject([6, 7, 8, 9, 10]) - tab_num_tab = tab_num_tab_type.newobject( - [num_tab_1, None, num_tab_2, None, num_tab_3] - ) - - self.cursor.execute( - """ - insert into NestedCollectionTests (Id, TableCol) - values (:1, :2) - """, - [1, tab_num_tab], - ) - self.cursor.execute("select TableCol from NestedCollectionTests") - (obj,) = self.cursor.fetchone() - plain_obj = self.get_db_object_as_plain_object(obj) - expected_data = [[1, 2], None, [3, 4, 5], None, [6, 7, 8, 9, 10]] - self.assertEqual(plain_obj, expected_data) - - def test_2347(self): - "2347 - test nested table of varrays" - num_tab_type = self.conn.gettype("UDT_TABLEOFNUMBER") - arr_num_tab_type = self.conn.gettype("UDT_VARRAYOFTABLEOFNUMBER") - - num_tab_1 = num_tab_type.newobject([4, 8]) - num_tab_2 = num_tab_type.newobject([1, 3, 5]) - num_tab_3 = num_tab_type.newobject([2, 6, 10, 7, 9]) - tab_num_tab = arr_num_tab_type.newobject( - [num_tab_1, None, num_tab_2, None, num_tab_3] - ) - - self.cursor.execute( - """ - insert into NestedCollectionTests (Id, VarrayCol) - values (:1, :2) - """, - [1, tab_num_tab], - ) - self.cursor.execute("select VarrayCol from NestedCollectionTests") - (obj,) = self.cursor.fetchone() - plain_obj = self.get_db_object_as_plain_object(obj) - expected_data = [[4, 8], None, [1, 3, 5], None, [2, 6, 10, 7, 9]] - self.assertEqual(plain_obj, expected_data) - - def test_2348(self): - "2348 - test using collection methods on an object that is not one" - obj_type = self.conn.gettype("UDT_OBJECT") - obj = obj_type.newobject() - with self.assertRaisesFullCode("DPY-2036"): - obj.append(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.asdict() - with self.assertRaisesFullCode("DPY-2036"): - obj.aslist() - with self.assertRaisesFullCode("DPY-2036"): - obj.delete(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.exists(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.extend([5]) - with self.assertRaisesFullCode("DPY-2036"): - obj.first() - with self.assertRaisesFullCode("DPY-2036"): - obj.getelement(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.last() - with self.assertRaisesFullCode("DPY-2036"): - obj.next(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.prev(5) - with self.assertRaisesFullCode("DPY-2036"): - obj.setelement(5, None) - with self.assertRaisesFullCode("DPY-2036"): - obj.size() - with self.assertRaisesFullCode("DPY-2036"): - obj.trim(0) - - -if __name__ == "__main__": - test_env.run_test_cases() + None, + None, + True, + ), + ( + "ARRAYCOL", + oracledb.DB_TYPE_OBJECT, + None, + None, + None, + None, + True, + ), + ] + assert cursor.description == expected_value + expected_value = ( + 1, + "First row", + "First ", + "N First Row", + "N First ", + b"Raw Data 1", + 2, + 5, + 12.125, + 0.5, + 12.5, + 25.25, + 50.125, + datetime.datetime(2007, 3, 6, 0, 0, 0), + datetime.datetime(2008, 9, 12, 16, 40), + datetime.datetime(2009, 10, 13, 17, 50), + oracledb.Timestamp(2010, 11, 14, 18, 55), + "Short CLOB value", + "Short NCLOB Value", + b"Short BLOB value", + (11, "Sub object 1"), + [(5, "first element"), (6, "second element")], + ) + _test_data(cursor, test_env, 1, expected_value, [5, 10, None, 20]) + _test_data(cursor, test_env, 2, None, [3, None, 9, 12, 15]) + expected_value = ( + 3, + "Third row", + "Third ", + "N Third Row", + "N Third ", + b"Raw Data 3", + 4, + 10, + 6.5, + 0.75, + 43.25, + 86.5, + 192.125, + datetime.datetime(2007, 6, 21, 0, 0, 0), + datetime.datetime(2007, 12, 13, 7, 30, 45), + datetime.datetime(2017, 6, 21, 23, 18, 45), + oracledb.Timestamp(2017, 7, 21, 8, 27, 13), + "Another short CLOB value", + "Another short NCLOB Value", + b"Yet another short BLOB value", + (13, "Sub object 3"), + [ + (10, "element #1"), + (20, "element #2"), + (30, "element #3"), + (40, "element #4"), + ], + ) + _test_data(cursor, test_env, 3, expected_value, None) + + +def test_2305(conn): + "2305 - test getting object type" + type_obj = conn.gettype("UDT_OBJECT") + assert not type_obj.iscollection + assert type_obj.schema == conn.username.upper() + assert type_obj.name == "UDT_OBJECT" + sub_object_value_type = conn.gettype("UDT_SUBOBJECT") + sub_object_array_type = conn.gettype("UDT_OBJECTARRAY") + expected_metadata = [ + ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), + ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), + ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), + ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), + ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), + ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), + ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ( + "BINARYFLOATVALUE", + oracledb.DB_TYPE_BINARY_FLOAT, + None, + None, + None, + ), + ( + "BINARYDOUBLEVALUE", + oracledb.DB_TYPE_BINARY_DOUBLE, + None, + None, + None, + ), + ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), + ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), + ( + "TIMESTAMPTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_TZ, + None, + None, + None, + ), + ( + "TIMESTAMPLTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + None, + None, + None, + ), + ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), + ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), + ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), + ("SUBOBJECTVALUE", sub_object_value_type, None, None, None), + ("SUBOBJECTARRAY", sub_object_array_type, None, None, None), + ] + actual_metadata = [ + (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) + for attr in type_obj.attributes + ] + assert actual_metadata == expected_metadata + assert sub_object_array_type.iscollection + assert sub_object_array_type.attributes == [] + + +def test_2306(conn, cursor): + "2306 - test object type data" + cursor.execute( + """ + select ObjectCol + from TestObjects + where ObjectCol is not null + and rownum <= 1 + """ + ) + (obj,) = cursor.fetchone() + assert obj.type.schema == conn.username.upper() + assert obj.type.name == "UDT_OBJECT" + assert obj.type.attributes[0].name == "NUMBERVALUE" + + +def test_2307(conn, cursor, test_env): + "2307 - test inserting and then querying object with all data types" + cursor.execute("delete from TestClobs") + cursor.execute("delete from TestNClobs") + cursor.execute("delete from TestBlobs") + cursor.execute("delete from TestObjects where IntCol > 3") + cursor.execute( + """ + insert into TestClobs (IntCol, ClobCol) + values (1, 'A short CLOB') + """ + ) + cursor.execute( + """ + insert into TestNClobs (IntCol, NClobCol) + values (1, 'A short NCLOB') + """ + ) + cursor.execute( + """ + insert into TestBlobs (IntCol, BlobCol) + values (1, utl_raw.cast_to_raw('A short BLOB')) + """ + ) + conn.commit() + cursor.execute("select CLOBCol from TestClobs") + (clob,) = cursor.fetchone() + cursor.execute("select NCLOBCol from TestNClobs") + (nclob,) = cursor.fetchone() + cursor.execute("select BLOBCol from TestBlobs") + (blob,) = cursor.fetchone() + type_obj = conn.gettype("UDT_OBJECT") + obj = type_obj.newobject() + obj.NUMBERVALUE = 5 + obj.STRINGVALUE = "A string" + obj.FIXEDCHARVALUE = "Fixed str" + obj.NSTRINGVALUE = "A NCHAR string" + obj.NFIXEDCHARVALUE = "Fixed N" + obj.RAWVALUE = b"Raw Value" + obj.INTVALUE = 27 + obj.SMALLINTVALUE = 13 + obj.REALVALUE = 184.875 + obj.DOUBLEPRECISIONVALUE = 1.375 + obj.FLOATVALUE = 23.0 + obj.DATEVALUE = datetime.date(2017, 5, 9) + obj.TIMESTAMPVALUE = datetime.datetime(2017, 5, 9, 9, 41, 13) + obj.TIMESTAMPTZVALUE = datetime.datetime(1986, 8, 2, 15, 27, 38) + obj.TIMESTAMPLTZVALUE = datetime.datetime(1999, 11, 12, 23, 5, 2) + obj.BINARYFLOATVALUE = 14.25 + obj.BINARYDOUBLEVALUE = 29.1625 + obj.CLOBVALUE = clob + obj.NCLOBVALUE = nclob + obj.BLOBVALUE = blob + sub_type_obj = conn.gettype("UDT_SUBOBJECT") + sub_obj = sub_type_obj.newobject() + sub_obj.SUBNUMBERVALUE = 23 + sub_obj.SUBSTRINGVALUE = "Substring value" + obj.SUBOBJECTVALUE = sub_obj + cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (4, :obj) + """, + obj=obj, + ) + cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + where IntCol = 4 + """ + ) + expected_value = ( + 5, + "A string", + "Fixed str ", + "A NCHAR string", + "Fixed N ", + b"Raw Value", + 27, + 13, + 184.875, + 1.375, + 23.0, + 14.25, + 29.1625, + datetime.datetime(2017, 5, 9, 0, 0, 0), + datetime.datetime(2017, 5, 9, 9, 41, 13), + datetime.datetime(1986, 8, 2, 15, 27, 38), + oracledb.Timestamp(1999, 11, 12, 23, 5, 2), + "A short CLOB", + "A short NCLOB", + b"A short BLOB", + (23, "Substring value"), + None, + ) + _test_data(cursor, test_env, 4, expected_value, None) + obj.CLOBVALUE = "A short CLOB (modified)" + obj.NCLOBVALUE = "A short NCLOB (modified)" + obj.BLOBVALUE = "A short BLOB (modified)" + cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (5, :obj) + """, + obj=obj, + ) + cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + where IntCol = 5 + """ + ) + expected_value = ( + 5, + "A string", + "Fixed str ", + "A NCHAR string", + "Fixed N ", + b"Raw Value", + 27, + 13, + 184.875, + 1.375, + 23.0, + 14.25, + 29.1625, + datetime.datetime(2017, 5, 9, 0, 0, 0), + datetime.datetime(2017, 5, 9, 9, 41, 13), + datetime.datetime(1986, 8, 2, 15, 27, 38), + oracledb.Timestamp(1999, 11, 12, 23, 5, 2), + "A short CLOB (modified)", + "A short NCLOB (modified)", + b"A short BLOB (modified)", + (23, "Substring value"), + None, + ) + _test_data(cursor, test_env, 5, expected_value, None) + conn.rollback() + + +def test_2308(conn, test_env): + "2308 - test trying to find an object type that does not exist" + pytest.raises(TypeError, conn.gettype, 2) + with test_env.assert_raises_full_code("DPY-2035"): + conn.gettype("A TYPE THAT DOES NOT EXIST") + + +def test_2309(conn, test_env): + "2309 - test appending an object of the wrong type to a collection" + collection_obj_type = conn.gettype("UDT_OBJECTARRAY") + collection_obj = collection_obj_type.newobject() + array_obj_type = conn.gettype("UDT_ARRAY") + array_obj = array_obj_type.newobject() + with test_env.assert_raises_full_code("DPY-2008"): + collection_obj.append(array_obj) + + +def test_2310(conn): + "2310 - test that referencing a sub object affects the parent object" + obj_type = conn.gettype("UDT_OBJECT") + sub_obj_type = conn.gettype("UDT_SUBOBJECT") + obj = obj_type.newobject() + obj.SUBOBJECTVALUE = sub_obj_type.newobject() + obj.SUBOBJECTVALUE.SUBNUMBERVALUE = 5 + obj.SUBOBJECTVALUE.SUBSTRINGVALUE = "Substring" + assert obj.SUBOBJECTVALUE.SUBNUMBERVALUE == 5 + assert obj.SUBOBJECTVALUE.SUBSTRINGVALUE == "Substring" + + +def test_2311(conn, test_env): + "2311 - test accessing sub object after parent object destroyed" + obj_type = conn.gettype("UDT_OBJECT") + sub_obj_type = conn.gettype("UDT_SUBOBJECT") + array_type = conn.gettype("UDT_OBJECTARRAY") + sub_obj1 = sub_obj_type.newobject() + sub_obj1.SUBNUMBERVALUE = 2 + sub_obj1.SUBSTRINGVALUE = "AB" + sub_obj2 = sub_obj_type.newobject() + sub_obj2.SUBNUMBERVALUE = 3 + sub_obj2.SUBSTRINGVALUE = "CDE" + obj = obj_type.newobject() + obj.SUBOBJECTARRAY = array_type.newobject([sub_obj1, sub_obj2]) + sub_obj_array = obj.SUBOBJECTARRAY + del obj + expected = [(2, "AB"), (3, "CDE")] + assert test_env.get_db_object_as_plain_object(sub_obj_array) == expected + + +def test_2312(conn, test_env): + "2312 - test assigning an object of wrong type to an object attribute" + obj_type = conn.gettype("UDT_OBJECT") + obj = obj_type.newobject() + wrong_obj_type = conn.gettype("UDT_OBJECTARRAY") + wrong_obj = wrong_obj_type.newobject() + with test_env.assert_raises_full_code("DPY-2008"): + setattr(obj, "SUBOBJECTVALUE", wrong_obj) + + +def test_2313(conn, cursor, test_env): + "2313 - test setting value of object variable to wrong object type" + wrong_obj_type = conn.gettype("UDT_OBJECTARRAY") + wrong_obj = wrong_obj_type.newobject() + var = cursor.var(oracledb.DB_TYPE_OBJECT, typename="UDT_OBJECT") + with test_env.assert_raises_full_code("DPY-2008"): + var.setvalue(0, wrong_obj) + + +def test_2314(conn, test_env): + "2314 - test object string format" + obj_type = conn.gettype("UDT_OBJECT") + user = test_env.main_user.upper() + assert str(obj_type) == f"" + assert str(obj_type.attributes[0]) == "" + + +def test_2315(conn, test_env): + "2315 - test Trim number of elements from collection" + sub_obj_type = conn.gettype("UDT_SUBOBJECT") + array_type = conn.gettype("UDT_OBJECTARRAY") + data = [(1, "AB"), (2, "CDE"), (3, "FGH"), (4, "IJK"), (5, "LMN")] + array_obj = array_type() + for num_val, str_val in data: + subObj = sub_obj_type() + subObj.SUBNUMBERVALUE = num_val + subObj.SUBSTRINGVALUE = str_val + array_obj.append(subObj) + assert test_env.get_db_object_as_plain_object(array_obj) == data + array_obj.trim(2) + assert test_env.get_db_object_as_plain_object(array_obj) == data[:3] + array_obj.trim(1) + assert test_env.get_db_object_as_plain_object(array_obj) == data[:2] + array_obj.trim(0) + assert test_env.get_db_object_as_plain_object(array_obj) == data[:2] + array_obj.trim(2) + assert test_env.get_db_object_as_plain_object(array_obj) == [] + + +def test_2316(conn, test_env): + "2316 - test the metadata of a SQL type" + user = test_env.main_user + typ = conn.gettype("UDT_OBJECTARRAY") + assert typ.schema == user.upper() + assert typ.name == "UDT_OBJECTARRAY" + assert typ.package_name is None + assert typ.element_type.schema == user.upper() + assert typ.element_type.name == "UDT_SUBOBJECT" + assert typ.element_type.package_name is None + assert typ.attributes == [] + assert typ.iscollection + + +def test_2317(conn, test_env): + "2317 - test the metadata of a PL/SQL type" + user = test_env.main_user + typ = conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") + assert typ.schema == user.upper() + assert typ.name == "UDT_STRINGLIST" + assert typ.package_name == "PKG_TESTSTRINGARRAYS" + assert typ.element_type == oracledb.DB_TYPE_VARCHAR + assert typ.attributes == [] + assert typ.iscollection + + +def test_2318(cursor, test_env): + "2318 - test creating an object variable without a type name" + with test_env.assert_raises_full_code("DPY-2037"): + cursor.var(oracledb.DB_TYPE_OBJECT) + + +def test_2319(conn): + "2319 - test getting an empty collection as a dictionary" + type_obj = conn.gettype("UDT_ARRAY") + obj = type_obj.newobject() + assert obj.asdict() == {} + + +def test_2320(conn): + "2320 - test if an element exists in a collection" + array_type = conn.gettype("UDT_ARRAY") + array_obj = array_type.newobject() + assert not array_obj.exists(0) + array_obj.append(40) + assert array_obj.exists(0) + assert not array_obj.exists(1) + + +def test_2321(conn): + "2321 - test first and last methods" + array_type = conn.gettype("UDT_ARRAY") + array_obj = array_type.newobject() + assert array_obj.first() is None + assert array_obj.last() is None + for i in range(7): + array_obj.append(i) + assert array_obj.first() == 0 + assert array_obj.last() == 6 + + +def test_2322(conn): + "2322 - test getting the size of a collections" + array_type = conn.gettype("UDT_ARRAY") + array_obj = array_type.newobject() + assert array_obj.size() == 0 + for i in range(5): + array_obj.append(i) + assert array_obj.size() == 5 + + +def test_2323(conn): + "2323 - test prev and next methods" + array_type = conn.gettype("UDT_ARRAY") + array_obj = array_type.newobject() + assert array_obj.prev(0) is None + assert array_obj.next(0) is None + for i in range(2): + array_obj.append(i) + assert array_obj.prev(0) is None + assert array_obj.prev(1) == 0 + assert array_obj.next(0) == 1 + assert array_obj.next(1) is None + + +def test_2324(conn, test_env): + "2324 - test setting and getting elements from a collection" + array_type = conn.gettype("UDT_ARRAY") + array_obj = array_type.newobject() + with test_env.assert_raises_full_code("DPY-2038"): + array_obj.getelement(0) + with test_env.assert_raises_full_code("DPY-2039"): + array_obj.setelement(0, 7) + array_obj.append(7) + assert array_obj.getelement(0) == 7 + array_obj.setelement(0, 10) + assert array_obj.getelement(0) == 10 + with test_env.assert_raises_full_code("DPY-2039"): + array_obj.setelement(3, 4) + + +def test_2325(conn, test_env): + "2325 - test appending too many elements to a collection" + array_type = conn.gettype("UDT_ARRAY") + numbers = [i for i in range(11)] + with test_env.assert_raises_full_code("DPY-2039"): + array_type.newobject(numbers) + + array_obj = array_type.newobject() + with test_env.assert_raises_full_code("DPY-2039"): + array_obj.extend(numbers) + + array_obj = array_type.newobject() + for elem in numbers[:10]: + array_obj.append(elem) + with test_env.assert_raises_full_code("DPY-2039"): + array_obj.append(numbers[10]) + + +def test_2326(conn, cursor): + "2326 - test appending elements to an unconstrained table" + data = [1, 3, 6, 10, 15, 21] + typ = conn.gettype("UDT_UNCONSTRAINEDTABLE") + obj = typ.newobject(data) + cursor.execute("select :1 from dual", [obj]) + (output_obj,) = cursor.fetchone() + assert output_obj.aslist() == data + + +def test_2327(conn, cursor): + "2327 - test collection with thousands of entries" + typ = conn.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST") + obj = typ.newobject() + obj.setelement(1, 1) + running_total = 1 + for i in range(1, 35000): + running_total += i + 1 + obj.append(running_total) + result = cursor.callfunc( + "pkg_TestNumberArrays.TestInArrays", int, (2327, obj) + ) + assert result == 7146445847327 + + +def test_2328(skip_unless_thick_mode, conn): + "2328 - test object with unknown type in one of its attributes" + typ = conn.gettype("UDT_OBJECTWITHXMLTYPE") + assert typ.attributes[1].type == oracledb.DB_TYPE_UNKNOWN + + +def test_2329(skip_unless_thick_mode, conn): + "2329 - test object with unknown type as the element type" + typ = conn.gettype("UDT_XMLTYPEARRAY") + assert typ.element_type == oracledb.DB_TYPE_UNKNOWN + + +def test_2330(conn): + "2330 - test DB Object repr()" + typ = conn.gettype("UDT_ARRAY") + obj = typ.newobject() + fqn = f"{typ.schema}.{typ.name}" + expected_str = f"^$" + assert re.fullmatch(expected_str, repr(obj)) is not None + + # object of a package + typ = conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") + obj = typ.newobject() + fqn = f"{typ.schema}.{typ.package_name}.{typ.name}" + expected_str = f"^$" + assert re.fullmatch(expected_str, repr(obj)) is not None + + +def test_2331(conn, test_env): + "2331 - test creating an object with invalid data type" + type_obj = conn.gettype("UDT_ARRAY") + with test_env.assert_raises_full_code("DPY-3013"): + type_obj.newobject([490, "not a number"]) + with test_env.assert_raises_full_code("DPY-3013"): + type_obj([71, "not a number"]) + + +def test_2332(conn): + "2332 - test getting an invalid attribute name from an object" + typ = conn.gettype("UDT_OBJECT") + obj = typ.newobject() + with pytest.raises(AttributeError): + obj.MISSING + + +def test_2333(conn, test_env): + "2333 - test validating a string attribute" + typ = conn.gettype("UDT_OBJECT") + obj = typ.newobject() + for attr_name, max_size in [ + ("STRINGVALUE", 60), + ("FIXEDCHARVALUE", 10), + ("NSTRINGVALUE", 120), + ("NFIXEDCHARVALUE", 20), + ("RAWVALUE", 16), + ]: + value = "A" * max_size + setattr(obj, attr_name, value) + value += "X" + with test_env.assert_raises_full_code("DPY-2043"): + setattr(obj, attr_name, value) + + +def test_2334(conn, test_env): + "2334 - test validating a string element value" + typ = conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") + obj = typ.newobject() + obj.append("A" * 100) + with test_env.assert_raises_full_code("DPY-2044"): + obj.append("A" * 101) + obj.append("B" * 100) + with test_env.assert_raises_full_code("DPY-2044"): + obj.setelement(2, "C" * 101) + + +def test_2335(conn): + "2335 - test validating a string attribute with null value" + typ = conn.gettype("UDT_OBJECT") + obj = typ.newobject() + obj.STRINGVALUE = None + + +def test_2336(conn, test_env): + "2336 - test initializing (with a sequence) a non collection obj" + obj_type = conn.gettype("UDT_OBJECT") + with test_env.assert_raises_full_code("DPY-2036"): + obj_type.newobject([1, 2]) + with test_env.assert_raises_full_code("DPY-2036"): + obj_type([3, 4]) + + +def test_2337(conn): + "2337 - test %ROWTYPE with all types" + sub_obj_type = conn.gettype("UDT_SUBOBJECT") + sub_arr_type = conn.gettype("UDT_OBJECTARRAY") + expected_metadata = [ + ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), + ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), + ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), + ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), + ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), + ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), + ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), + ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ( + "BINARYFLOATVALUE", + oracledb.DB_TYPE_BINARY_FLOAT, + None, + None, + None, + ), + ( + "BINARYDOUBLEVALUE", + oracledb.DB_TYPE_BINARY_DOUBLE, + None, + None, + None, + ), + ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), + ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), + ( + "TIMESTAMPTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_TZ, + None, + None, + None, + ), + ( + "TIMESTAMPLTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + None, + None, + None, + ), + ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), + ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), + ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), + ("SUBOBJECTVALUE", sub_obj_type, None, None, None), + ("SUBOBJECTARRAY", sub_arr_type, None, None, None), + ] + obj_type = conn.gettype("TESTALLTYPES%ROWTYPE") + actual_metadata = [ + (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) + for attr in obj_type.attributes + ] + assert actual_metadata == expected_metadata + + +def test_2338(cursor): + "2338 - test collection iteration" + cursor.execute("select udt_array(5, 10, 15) from dual") + (obj,) = cursor.fetchone() + result = [i for i in obj] + assert result == [5, 10, 15] + + +def test_2339(skip_unless_thin_mode, cursor): + "2339 - test fetching an object containing an XmlType instance" + num_val = 2339 + xml_val = "test_2339" + str_val = "A string for test 2339" + cursor.execute( + f""" + select udt_ObjectWithXmlType({num_val}, sys.xmltype('{xml_val}'), + '{str_val}') from dual + """ + ) + (obj,) = cursor.fetchone() + assert obj.NUMBERVALUE == num_val + assert obj.XMLVALUE == xml_val + assert obj.STRINGVALUE == str_val + + +def test_2340(conn, cursor): + "2340 - test DbObject instances are retained across getvalue() calls" + typ = conn.gettype("UDT_OBJECT") + obj = typ.newobject() + var = cursor.var(typ) + var.setvalue(0, obj) + assert var.getvalue() is obj + + +def test_2341(test_env): + "2341 - test insufficient privileges for gettype()" + main_user = test_env.main_user.upper() + conn = test_env.get_connection( + user=test_env.proxy_user, password=test_env.proxy_password + ) + with test_env.assert_raises_full_code("DPY-2035"): + conn.gettype(f"{main_user}.UDT_OBJECTARRAY") + + +def test_2342(conn, cursor): + "2342 - test nested records" + options = [(None, None), (1, None), (None, 2), (1, 2)] + typ = conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") + for option in options: + value1, value2 = option + obj = cursor.callfunc( + "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) + ) + assert obj.INNER1 is not None + assert obj.INNER1.ATTR1 is None + assert obj.INNER1.ATTR2 == value1 + assert obj.INNER2 is not None + assert obj.INNER2.ATTR1 is None + assert obj.INNER2.ATTR2 == value2 + + +def test_2343(conn, cursor): + "2343 - test varray of numbers" + obj_type = conn.gettype("UDT_VARRAYOFNUMBER") + obj = cursor.callfunc("pkg_NestedTable.GetVarrayOfNumber", obj_type) + assert obj.aslist() == [10, 20, 30] + + +def test_2344(conn, cursor): + "2344 - test table of numbers" + obj_type = conn.gettype("UDT_TABLEOFNUMBER") + obj = cursor.callfunc("pkg_NestedTable.GetTableOfNumber", obj_type) + assert obj.aslist() == [15, 25, 35, 45] + + +def test_2345(conn, cursor, test_env): + "2345 - test table of varray of numbers" + obj_type = conn.gettype("UDT_TABLEOFVARRAYOFNUMBER") + obj = cursor.callfunc("pkg_NestedTable.GetTableOfVarrayOfNumber", obj_type) + plain_obj = test_env.get_db_object_as_plain_object(obj) + assert plain_obj == [[10, 20], [30, 40]] + + +def test_2346(conn, cursor, test_env): + "2346 - test nested table of nested tables" + num_tab_type = conn.gettype("UDT_TABLEOFNUMBER") + tab_num_tab_type = conn.gettype("UDT_TABLEOFTABLEOFNUMBER") + + num_tab_1 = num_tab_type.newobject([1, 2]) + num_tab_2 = num_tab_type.newobject([3, 4, 5]) + num_tab_3 = num_tab_type.newobject([6, 7, 8, 9, 10]) + tab_num_tab = tab_num_tab_type.newobject( + [num_tab_1, None, num_tab_2, None, num_tab_3] + ) + + cursor.execute( + """ + insert into NestedCollectionTests (Id, TableCol) + values (:1, :2) + """, + [1, tab_num_tab], + ) + cursor.execute("select TableCol from NestedCollectionTests") + (obj,) = cursor.fetchone() + plain_obj = test_env.get_db_object_as_plain_object(obj) + expected_data = [[1, 2], None, [3, 4, 5], None, [6, 7, 8, 9, 10]] + assert plain_obj == expected_data + + +def test_2347(conn, cursor, test_env): + "2347 - test nested table of varrays" + num_tab_type = conn.gettype("UDT_TABLEOFNUMBER") + arr_num_tab_type = conn.gettype("UDT_VARRAYOFTABLEOFNUMBER") + + num_tab_1 = num_tab_type.newobject([4, 8]) + num_tab_2 = num_tab_type.newobject([1, 3, 5]) + num_tab_3 = num_tab_type.newobject([2, 6, 10, 7, 9]) + tab_num_tab = arr_num_tab_type.newobject( + [num_tab_1, None, num_tab_2, None, num_tab_3] + ) + + cursor.execute( + """ + insert into NestedCollectionTests (Id, VarrayCol) + values (:1, :2) + """, + [1, tab_num_tab], + ) + cursor.execute("select VarrayCol from NestedCollectionTests") + (obj,) = cursor.fetchone() + plain_obj = test_env.get_db_object_as_plain_object(obj) + expected_data = [[4, 8], None, [1, 3, 5], None, [2, 6, 10, 7, 9]] + assert plain_obj == expected_data + + +def test_2348(conn, test_env): + "2348 - test using collection methods on an object that is not one" + obj_type = conn.gettype("UDT_OBJECT") + obj = obj_type.newobject() + with test_env.assert_raises_full_code("DPY-2036"): + obj.append(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.asdict() + with test_env.assert_raises_full_code("DPY-2036"): + obj.aslist() + with test_env.assert_raises_full_code("DPY-2036"): + obj.delete(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.exists(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.extend([5]) + with test_env.assert_raises_full_code("DPY-2036"): + obj.first() + with test_env.assert_raises_full_code("DPY-2036"): + obj.getelement(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.last() + with test_env.assert_raises_full_code("DPY-2036"): + obj.next(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.prev(5) + with test_env.assert_raises_full_code("DPY-2036"): + obj.setelement(5, None) + with test_env.assert_raises_full_code("DPY-2036"): + obj.size() + with test_env.assert_raises_full_code("DPY-2036"): + obj.trim(0) diff --git a/tests/test_2400_pool.py b/tests/test_2400_pool.py index c2244def..e30ab576 100644 --- a/tests/test_2400_pool.py +++ b/tests/test_2400_pool.py @@ -26,30 +26,19 @@ 2400 - Module for testing pools """ +import re import threading -import unittest import oracledb -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - require_connection = False +class CallableSessionCallback: - def __connect_and_drop(self): - with self.pool.acquire() as conn: - cursor = conn.cursor() - cursor.execute("select count(*) from TestNumbers") - (count,) = cursor.fetchone() - self.assertEqual(count, 10) - - def __connect_and_generate_error(self): - with self.pool.acquire() as conn: - cursor = conn.cursor() - with self.assertRaisesFullCode("ORA-01476"): - cursor.execute("select 1 / 0 from dual") + def __init__(self): + self.session_called = False - def __callable_session_callback(self, conn, requested_tag): + def __call__(self, conn, requested_tag): self.session_called = True supported_formats = { @@ -88,962 +77,986 @@ def __callable_session_callback(self, conn, requested_tag): cursor.execute(sql) conn.tag = requested_tag - def __perform_reconfigure_test( - self, - parameter_name, - parameter_value, - min=3, - max=30, - increment=4, - timeout=5, - wait_timeout=5000, - stmtcachesize=25, - max_lifetime_session=1000, - max_sessions_per_shard=3, - ping_interval=30, + +def _connect_and_drop(pool): + with pool.acquire() as conn: + cursor = conn.cursor() + cursor.execute("select count(*) from TestNumbers") + (count,) = cursor.fetchone() + assert count == 10 + + +def _connect_and_generate_error(pool, test_env): + with pool.acquire() as conn: + cursor = conn.cursor() + with test_env.assert_raises_full_code("ORA-01476"): + cursor.execute("select 1 / 0 from dual") + + +def _perform_reconfigure_test( + test_env, + parameter_name, + parameter_value, + min=3, + max=30, + increment=4, + timeout=5, + wait_timeout=5000, + stmtcachesize=25, + max_lifetime_session=1000, + max_sessions_per_shard=3, + ping_interval=30, + getmode=oracledb.POOL_GETMODE_WAIT, + soda_metadata_cache=False, +): + creation_args = dict( + min=min, + max=max, + increment=increment, + timeout=timeout, + stmtcachesize=stmtcachesize, + ping_interval=ping_interval, + getmode=getmode, + ) + if test_env.has_client_version(12, 1): + creation_args["max_lifetime_session"] = max_lifetime_session + if test_env.has_client_version(12, 2): + creation_args["wait_timeout"] = wait_timeout + if test_env.has_client_version(18, 3): + creation_args["max_sessions_per_shard"] = max_sessions_per_shard + if test_env.has_client_version(19, 11): + creation_args["soda_metadata_cache"] = soda_metadata_cache + + pool = test_env.get_pool(**creation_args) + conn = pool.acquire() + + reconfigure_args = {} + reconfigure_args[parameter_name] = parameter_value + pool.reconfigure(**reconfigure_args) + conn.close() + + actual_args = {} + for name in creation_args: + actual_args[name] = getattr(pool, name) + expected_args = creation_args.copy() + expected_args.update(reconfigure_args) + assert actual_args == expected_args + + +def _verify_connection(conn, expected_user, expected_proxy_user=None): + cursor = conn.cursor() + cursor.execute( + """ + select + sys_context('userenv', 'session_user'), + sys_context('userenv', 'proxy_user') + from dual + """ + ) + actual_user, actual_proxy_user = cursor.fetchone() + assert actual_user == expected_user.upper() + if expected_proxy_user is not None: + expected_proxy_user = expected_proxy_user.upper() + assert actual_proxy_user == expected_proxy_user + + +def _verify_create_arg(test_env, arg_name, arg_value, sql): + args = {} + args[arg_name] = arg_value + pool = test_env.get_pool(**args) + with pool.acquire() as conn: + cursor = conn.cursor() + cursor.execute(sql) + (fetched_value,) = cursor.fetchone() + assert fetched_value == arg_value + pool.close() + + +def test_2400(test_env): + "2400 - test getting default pool parameters" + pool = test_env.get_pool() + assert pool.busy == 0 + assert pool.dsn == test_env.connect_string + assert pool.tnsentry == pool.dsn + if test_env.has_client_version(12, 2): + assert pool.getmode == oracledb.POOL_GETMODE_WAIT + assert pool.getmode is oracledb.PoolGetMode.WAIT + assert pool.homogeneous + assert pool.increment == 1 + assert pool.max == 2 + if test_env.has_client_version(12, 1): + assert pool.max_lifetime_session == 0 + if not pool.thin and test_env.has_client_version(18, 3): + assert pool.max_sessions_per_shard == 0 + assert pool.min == 1 + if pool.thin: + assert pool.name is None + else: + assert re.search("^OCI:SP:.+", pool.name) is not None + assert pool.ping_interval == 60 + assert pool.stmtcachesize == oracledb.defaults.stmtcachesize + if not pool.thin and test_env.has_client_version(19, 11): + assert not pool.soda_metadata_cache + assert pool.thin == (not test_env.use_thick_mode) + assert pool.timeout == 0 + assert pool.username == test_env.main_user + + +def test_2401(skip_unless_thick_mode, test_env): + "2401 - test that proxy authentication is possible" + pool = test_env.get_pool( + min=2, max=8, increment=3, getmode=oracledb.POOL_GETMODE_WAIT + ) + assert pool.homogeneous, "homogeneous should be True by default" + with test_env.assert_raises_full_code("DPI-1012"): + pool.acquire(user="missing_proxyuser") + pool = test_env.get_pool( + min=2, + max=8, + increment=3, getmode=oracledb.POOL_GETMODE_WAIT, - soda_metadata_cache=False, - ): - creation_args = dict( - min=min, - max=max, - increment=increment, - timeout=timeout, - stmtcachesize=stmtcachesize, - ping_interval=ping_interval, - getmode=getmode, + homogeneous=False, + ) + assert not pool.homogeneous + conn = pool.acquire(user=test_env.proxy_user) + cursor = conn.cursor() + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == test_env.proxy_user.upper() + conn.close() + + +def test_2402(test_env): + "2402 - test setting pool attributes" + pool = test_env.get_pool() + test_values = [ + ((11, 2), "ping_interval", 30), + ((11, 2), "stmtcachesize", 100), + ((11, 2), "timeout", 10), + ((12, 2), "getmode", oracledb.POOL_GETMODE_TIMEDWAIT), + ((12, 1), "max_lifetime_session", 3), + ] + for version, attr_name, value in test_values: + if test_env.has_client_version(*version): + setattr(pool, attr_name, value) + assert getattr(pool, attr_name) == value + pytest.raises(TypeError, setattr, pool, attr_name, "invalid value") + + if not pool.thin and test_env.has_client_version(18, 3): + assert pool.max_sessions_per_shard == 0 + pytest.raises( + TypeError, setattr, pool, "max_sessions_per_shard", "bad_val" ) - if test_env.has_client_version(12, 1): - creation_args["max_lifetime_session"] = max_lifetime_session - if test_env.has_client_version(12, 2): - creation_args["wait_timeout"] = wait_timeout - if test_env.has_client_version(18, 3): - creation_args["max_sessions_per_shard"] = max_sessions_per_shard - if test_env.has_client_version(19, 11): - creation_args["soda_metadata_cache"] = soda_metadata_cache - - pool = test_env.get_pool(**creation_args) - conn = pool.acquire() - - reconfigure_args = {} - reconfigure_args[parameter_name] = parameter_value - pool.reconfigure(**reconfigure_args) - conn.close() - actual_args = {} - for name in creation_args: - actual_args[name] = getattr(pool, name) - expected_args = creation_args.copy() - expected_args.update(reconfigure_args) - self.assertEqual(actual_args, expected_args) - - def __verify_connection( - self, connection, expected_user, expected_proxy_user=None - ): - cursor = connection.cursor() - cursor.execute( - """ - select - sys_context('userenv', 'session_user'), - sys_context('userenv', 'proxy_user') - from dual - """ - ) - actual_user, actual_proxy_user = cursor.fetchone() - self.assertEqual(actual_user, expected_user.upper()) - self.assertEqual( - actual_proxy_user, - expected_proxy_user and expected_proxy_user.upper(), + if not pool.thin and test_env.has_client_version(19, 11): + pool.soda_metadata_cache = True + assert pool.soda_metadata_cache + pytest.raises(TypeError, setattr, pool, "soda_metadata_cache", 22) + + +def test_2403(test_env): + "2403 - connection rolls back before released back to the pool" + pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_WAIT) + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute("insert into TestTempTable (IntCol) values (1)") + cursor.close() + pool.release(conn) + pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_WAIT) + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + conn.close() + + +def test_2404(test_env): + "2404 - test session pool with multiple threads" + pool = test_env.get_pool( + min=5, max=20, increment=2, getmode=oracledb.POOL_GETMODE_WAIT + ) + threads = [] + for i in range(20): + thread = threading.Thread(None, _connect_and_drop, args=(pool,)) + threads.append(thread) + thread.start() + for thread in threads: + thread.join() + + +def test_2405(test_env): + "2405 - test session pool with multiple threads (with errors)" + pool = test_env.get_pool( + min=5, max=20, increment=2, getmode=oracledb.POOL_GETMODE_WAIT + ) + threads = [] + for i in range(20): + thread = threading.Thread( + None, _connect_and_generate_error, args=(pool, test_env) ) + threads.append(thread) + thread.start() + for thread in threads: + thread.join() - def __verify_create_arg(self, arg_name, arg_value, sql): - args = {} - args[arg_name] = arg_value - pool = test_env.get_pool(**args) - with pool.acquire() as conn: - cursor = conn.cursor() - cursor.execute(sql) - (fetched_value,) = cursor.fetchone() - self.assertEqual(fetched_value, arg_value) - pool.close() - def test_2400(self): - "2400 - test getting default pool parameters" - pool = test_env.get_pool() - self.assertEqual(pool.busy, 0) - self.assertEqual(pool.dsn, test_env.get_connect_string()) - self.assertEqual(pool.tnsentry, pool.dsn) - if test_env.has_client_version(12, 2): - self.assertEqual(pool.getmode, oracledb.POOL_GETMODE_WAIT) - self.assertIs(pool.getmode, oracledb.PoolGetMode.WAIT) - self.assertTrue(pool.homogeneous) - self.assertEqual(pool.increment, 1) - self.assertEqual(pool.max, 2) - if test_env.has_client_version(12, 1): - self.assertEqual(pool.max_lifetime_session, 0) - if not pool.thin and test_env.has_client_version(18, 3): - self.assertEqual(pool.max_sessions_per_shard, 0) - self.assertEqual(pool.min, 1) - if pool.thin: - self.assertIsNone(pool.name) - else: - self.assertRegex(pool.name, "^OCI:SP:.+") - self.assertEqual(pool.ping_interval, 60) - self.assertEqual(pool.stmtcachesize, oracledb.defaults.stmtcachesize) - if not pool.thin and test_env.has_client_version(19, 11): - self.assertFalse(pool.soda_metadata_cache) - self.assertEqual(pool.thin, not test_env.run_in_thick_mode()) - self.assertEqual(pool.timeout, 0) - self.assertEqual(pool.username, test_env.get_main_user()) - - @test_env.skip_unless_thick_mode() - def test_2401(self): - "2401 - test that proxy authentication is possible" - pool = test_env.get_pool( - min=2, max=8, increment=3, getmode=oracledb.POOL_GETMODE_WAIT - ) - self.assertTrue( - pool.homogeneous, "homogeneous should be True by default" - ) - with self.assertRaisesFullCode("DPI-1012"): - pool.acquire(user="missing_proxyuser") - pool = test_env.get_pool( - min=2, - max=8, - increment=3, - getmode=oracledb.POOL_GETMODE_WAIT, - homogeneous=False, - ) - msg = "homogeneous should be False after setting it in the constructor" - self.assertFalse(pool.homogeneous, msg) - conn = pool.acquire(user=test_env.get_proxy_user()) - cursor = conn.cursor() - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_proxy_user().upper()) +def test_2406(skip_if_drcp, test_env): + "2406 - test session pool with various types of purity" + pool = test_env.get_pool( + min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + + # get connection and set the action + action = "TEST_ACTION" + conn = pool.acquire() + conn.action = action + cursor = conn.cursor() + cursor.execute("select 1 from dual") + cursor.close() + pool.release(conn) + assert pool.opened == 1, "opened (1)" + + # verify that the connection still has the action set on it + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute("select sys_context('userenv', 'action') from dual") + (result,) = cursor.fetchone() + assert result == action + cursor.close() + pool.release(conn) + assert pool.opened == 1, "opened (2)" + + # get a new connection with new purity (should not have state) + conn = pool.acquire(purity=oracledb.ATTR_PURITY_NEW) + cursor = conn.cursor() + cursor.execute("select sys_context('userenv', 'action') from dual") + (result,) = cursor.fetchone() + assert result is None + cursor.close() + pool.release(conn) + + +def test_2407(skip_if_drcp, skip_unless_thick_mode, test_env): + "2407 - test heterogeneous pool with user and password specified" + pool = test_env.get_pool( + min=2, + max=8, + increment=3, + homogeneous=False, + getmode=oracledb.POOL_GETMODE_WAIT, + ) + assert pool.homogeneous == 0 + conn = pool.acquire() + _verify_connection(pool.acquire(), test_env.main_user) + conn.close() + conn = pool.acquire(test_env.main_user, test_env.main_password) + _verify_connection(conn, test_env.main_user) + conn.close() + conn = pool.acquire(test_env.proxy_user, test_env.proxy_password) + _verify_connection(conn, test_env.proxy_user) + conn.close() + user_str = f"{test_env.main_user}[{test_env.proxy_user}]" + conn = pool.acquire(user_str, test_env.main_password) + assert conn.username == test_env.main_user + assert conn.proxy_user == test_env.proxy_user + _verify_connection(conn, test_env.proxy_user, test_env.main_user) + conn.close() + + +def test_2408(skip_if_drcp, skip_unless_thick_mode, test_env): + "2408 - test heterogeneous pool without user and password specified" + pool = test_env.get_pool( + user="", + password="", + min=2, + max=8, + increment=3, + getmode=oracledb.POOL_GETMODE_WAIT, + homogeneous=False, + ) + conn = pool.acquire(test_env.main_user, test_env.main_password) + _verify_connection(conn, test_env.main_user) + conn.close() + conn = pool.acquire(test_env.proxy_user, test_env.proxy_password) + _verify_connection(conn, test_env.proxy_user) + conn.close() + user_str = f"{test_env.main_user}[{test_env.proxy_user}]" + conn = pool.acquire(user_str, test_env.main_password) + _verify_connection(conn, test_env.proxy_user, test_env.main_user) + + +def test_2409(skip_unless_thick_mode, test_env): + "2409 - test heterogeneous pool with wrong password specified" + pool = test_env.get_pool( + min=2, + max=8, + increment=3, + getmode=oracledb.POOL_GETMODE_WAIT, + homogeneous=False, + ) + with test_env.assert_raises_full_code("ORA-01017"): + pool.acquire(test_env.proxy_user, "this is the wrong password") + + +def test_2410(skip_unless_thick_mode, test_env): + "2410 - test tagging a session" + pool = test_env.get_pool( + min=2, max=8, increment=3, getmode=oracledb.POOL_GETMODE_NOWAIT + ) + tag_mst = "TIME_ZONE=MST" + tag_utc = "TIME_ZONE=UTC" + + conn = pool.acquire() + assert conn.tag is None + pool.release(conn, tag=tag_mst) + + conn = pool.acquire() + assert conn.tag is None + conn.tag = tag_utc + conn.close() + + conn = pool.acquire(tag=tag_mst) + assert conn.tag == tag_mst + conn.close() + + conn = pool.acquire(tag=tag_utc) + assert conn.tag == tag_utc + conn.close() + + +def test_2411(skip_unless_thick_mode, test_env): + "2411 - test PL/SQL session callbacks" + if not test_env.has_client_version(12, 2): + pytest.skip("PL/SQL session callbacks not supported before 12.2") + callback = "pkg_SessionCallback.TheCallback" + pool = test_env.get_pool( + min=2, + max=8, + increment=3, + getmode=oracledb.POOL_GETMODE_NOWAIT, + session_callback=callback, + ) + tags = [ + "NLS_DATE_FORMAT=SIMPLE", + "NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC", + "NLS_DATE_FORMAT=FULL;TIME_ZONE=MST", + ] + actual_tags = [None, None, "NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC"] + + # truncate PL/SQL session callback log + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute("truncate table PLSQLSessionCallbacks") + conn.close() + + # request sessions with each of the first two tags + for tag in tags[:2]: + conn = pool.acquire(tag=tag) conn.close() - def test_2402(self): - "2402 - test setting pool attributes" - pool = test_env.get_pool() - test_values = [ - ((11, 2), "ping_interval", 30), - ((11, 2), "stmtcachesize", 100), - ((11, 2), "timeout", 10), - ((12, 2), "getmode", oracledb.POOL_GETMODE_TIMEDWAIT), - ((12, 1), "max_lifetime_session", 3), - ] - for version, attr_name, value in test_values: - if test_env.has_client_version(*version): - setattr(pool, attr_name, value) - self.assertEqual(getattr(pool, attr_name), value) - self.assertRaises( - TypeError, setattr, pool, attr_name, "invalid value" - ) - - if not pool.thin and test_env.has_client_version(18, 3): - self.assertEqual(pool.max_sessions_per_shard, 0) - self.assertRaises( - TypeError, setattr, pool, "max_sessions_per_shard", "bad_val" - ) - - if not pool.thin and test_env.has_client_version(19, 11): - pool.soda_metadata_cache = True - self.assertTrue(pool.soda_metadata_cache) - self.assertRaises( - TypeError, setattr, pool, "soda_metadata_cache", 22 - ) - - def test_2403(self): - "2403 - connection rolls back before released back to the pool" - pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_WAIT) - conn = pool.acquire() - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute("insert into TestTempTable (IntCol) values (1)") - cursor.close() - pool.release(conn) - pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_WAIT) - conn = pool.acquire() - cursor = conn.cursor() - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 0) + # for the last tag, use the matchanytag flag + conn = pool.acquire(tag=tags[2], matchanytag=True) + conn.close() + + # verify the PL/SQL session callback log is accurate + conn = pool.acquire() + cursor = conn.cursor() + cursor.execute( + """ + select RequestedTag, ActualTag + from PLSQLSessionCallbacks + order by FixupTimestamp + """ + ) + results = cursor.fetchall() + expected_results = list(zip(tags, actual_tags)) + assert results == expected_results + conn.close() + + +def test_2412(skip_unless_thick_mode, test_env): + "2412 - testTagging with Invalid key" + pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_NOWAIT) + conn = pool.acquire() + pytest.raises(TypeError, pool.release, conn, tag=12345) + if test_env.has_client_version(12, 2): + with test_env.assert_raises_full_code("ORA-24488"): + pool.release(conn, tag="INVALID_TAG") + + +def test_2413(test_env): + "2413 - test dropping/closing a connection from the pool" + pool = test_env.get_pool(min=1, max=5, increment=2) + conns1 = [pool.acquire() for _ in range(2)] + conns2 = [oracledb.connect(pool=pool) for _ in range(3)] + assert pool.busy == 5 + assert pool.opened == 5 + + for conn in conns1: + pool.drop(conn) + assert pool.busy == 3 + assert pool.opened == 3 + + for conn in conns2: conn.close() + assert pool.busy == 0 + assert pool.opened == 3 - def test_2404(self): - "2404 - test session pool with multiple threads" - self.pool = test_env.get_pool( - min=5, max=20, increment=2, getmode=oracledb.POOL_GETMODE_WAIT - ) - threads = [] - for i in range(20): - thread = threading.Thread(None, self.__connect_and_drop) - threads.append(thread) - thread.start() - for thread in threads: - thread.join() - - def test_2405(self): - "2405 - test session pool with multiple threads (with errors)" - self.pool = test_env.get_pool( - min=5, max=20, increment=2, getmode=oracledb.POOL_GETMODE_WAIT - ) - threads = [] - for i in range(20): - thread = threading.Thread(None, self.__connect_and_generate_error) - threads.append(thread) - thread.start() - for thread in threads: - thread.join() - - @test_env.skip_if_drcp() - def test_2406(self): - "2406 - test session pool with various types of purity" - pool = test_env.get_pool( - min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - # get connection and set the action - action = "TEST_ACTION" - conn = pool.acquire() - conn.action = action +def test_2414(test_env): + "2414 - test to ensure pure connections are being created correctly" + pool = test_env.get_pool( + min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + conn1 = pool.acquire() + conn2 = pool.acquire() + assert pool.opened == 2, "opened (1)" + pool.release(conn1) + pool.release(conn2) + conn3 = pool.acquire(purity=oracledb.ATTR_PURITY_NEW) + assert pool.opened == 2, "opened (2)" + pool.release(conn3) + + +def test_2415(skip_unless_thick_mode, test_env): + "2415 - test the reconfigure values are changed and rest unchanged" + _perform_reconfigure_test(test_env, "min", 5) + _perform_reconfigure_test(test_env, "max", 20) + _perform_reconfigure_test(test_env, "increment", 5) + _perform_reconfigure_test(test_env, "timeout", 10) + _perform_reconfigure_test(test_env, "stmtcachesize", 40) + _perform_reconfigure_test(test_env, "ping_interval", 50) + _perform_reconfigure_test( + test_env, "getmode", oracledb.POOL_GETMODE_NOWAIT + ) + if test_env.has_client_version(12, 1): + _perform_reconfigure_test(test_env, "max_lifetime_session", 2000) + if test_env.has_client_version(12, 2): + _perform_reconfigure_test(test_env, "wait_timeout", 8000) + if test_env.has_client_version(18, 3): + _perform_reconfigure_test(test_env, "max_sessions_per_shard", 5) + if test_env.has_client_version(19, 11): + _perform_reconfigure_test(test_env, "soda_metadata_cache", True) + + +def test_2417(skip_unless_thick_mode, test_env): + "2417 - test that session callbacks are being called correctly" + callback_obj = CallableSessionCallback() + + pool = test_env.get_pool( + min=2, + max=5, + increment=1, + session_callback=callback_obj, + ) + + # new connection with a tag should invoke the session callback + with pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE") as conn: cursor = conn.cursor() - cursor.execute("select 1 from dual") - cursor.close() - pool.release(conn) - self.assertEqual(pool.opened, 1, "opened (1)") + cursor.execute("select to_char(2021-05-20) from dual") + (result,) = cursor.fetchone() + assert callback_obj.session_called - # verify that the connection still has the action set on it - conn = pool.acquire() + # acquiring a connection with the same tag should not invoke the + # session callback + callback_obj.session_called = False + with pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE") as conn: cursor = conn.cursor() - cursor.execute("select sys_context('userenv', 'action') from dual") + cursor.execute("select to_char(2021-05-20) from dual") (result,) = cursor.fetchone() - self.assertEqual(result, action) - cursor.close() - pool.release(conn) - self.assertEqual(pool.opened, 1, "opened (2)") + assert not callback_obj.session_called - # get a new connection with new purity (should not have state) - conn = pool.acquire(purity=oracledb.ATTR_PURITY_NEW) + # acquiring a connection with a new tag should invoke the session + # callback + callback_obj.session_called = False + with pool.acquire(tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC") as conn: cursor = conn.cursor() - cursor.execute("select sys_context('userenv', 'action') from dual") + cursor.execute("select to_char(current_date) from dual") (result,) = cursor.fetchone() - self.assertIsNone(result) - cursor.close() - pool.release(conn) - - @test_env.skip_if_drcp() - @test_env.skip_unless_thick_mode() - def test_2407(self): - "2407 - test heterogeneous pool with user and password specified" - pool = test_env.get_pool( - min=2, - max=8, - increment=3, - homogeneous=False, - getmode=oracledb.POOL_GETMODE_WAIT, - ) - self.assertEqual(pool.homogeneous, 0) - conn = pool.acquire() - self.__verify_connection(pool.acquire(), test_env.get_main_user()) - conn.close() - conn = pool.acquire( - test_env.get_main_user(), test_env.get_main_password() - ) - self.__verify_connection(conn, test_env.get_main_user()) - conn.close() - conn = pool.acquire( - test_env.get_proxy_user(), test_env.get_proxy_password() - ) - self.__verify_connection(conn, test_env.get_proxy_user()) - conn.close() - user_str = f"{test_env.get_main_user()}[{test_env.get_proxy_user()}]" - conn = pool.acquire(user_str, test_env.get_main_password()) - self.assertEqual(conn.username, test_env.get_main_user()) - self.assertEqual(conn.proxy_user, test_env.get_proxy_user()) - self.__verify_connection( - conn, test_env.get_proxy_user(), test_env.get_main_user() - ) - conn.close() + assert callback_obj.session_called + + # acquiring a connection with a new tag and specifying that a + # connection with any tag can be acquired should invoke the session + # callback + callback_obj.session_called = False + with pool.acquire( + tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=MST", matchanytag=True + ) as conn: + cursor = conn.cursor() + cursor.execute("select to_char(current_date) from dual") + (result,) = cursor.fetchone() + assert callback_obj.session_called - @test_env.skip_if_drcp() - @test_env.skip_unless_thick_mode() - def test_2408(self): - "2408 - test heterogeneous pool without user and password specified" - pool = test_env.get_pool( - user="", - password="", - min=2, - max=8, - increment=3, - getmode=oracledb.POOL_GETMODE_WAIT, - homogeneous=False, - ) - conn = pool.acquire( - test_env.get_main_user(), test_env.get_main_password() - ) - self.__verify_connection(conn, test_env.get_main_user()) - conn.close() - conn = pool.acquire( - test_env.get_proxy_user(), test_env.get_proxy_password() - ) - self.__verify_connection(conn, test_env.get_proxy_user()) - conn.close() - user_str = f"{test_env.get_main_user()}[{test_env.get_proxy_user()}]" - conn = pool.acquire(user_str, test_env.get_main_password()) - self.__verify_connection( - conn, test_env.get_proxy_user(), test_env.get_main_user() - ) + # new connection with no tag should invoke the session callback + callback_obj.session_called = False + with pool.acquire() as conn: + cursor = conn.cursor() + cursor.execute("select to_char(current_date) from dual") + (result,) = cursor.fetchone() + assert callback_obj.session_called - @test_env.skip_unless_thick_mode() - def test_2409(self): - "2409 - test heterogeneous pool with wrong password specified" - pool = test_env.get_pool( - min=2, - max=8, - increment=3, - getmode=oracledb.POOL_GETMODE_WAIT, - homogeneous=False, - ) - with self.assertRaisesFullCode("ORA-01017"): - pool.acquire( - test_env.get_proxy_user(), "this is the wrong password" - ) - - @test_env.skip_unless_thick_mode() - def test_2410(self): - "2410 - test tagging a session" - pool = test_env.get_pool( - min=2, max=8, increment=3, getmode=oracledb.POOL_GETMODE_NOWAIT - ) - tag_mst = "TIME_ZONE=MST" - tag_utc = "TIME_ZONE=UTC" - conn = pool.acquire() - self.assertIsNone(conn.tag) - pool.release(conn, tag=tag_mst) +def test_2418(test_env): + "2418 - test closing a pool normally with no connections checked out" + pool = test_env.get_pool( + min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + pool.close() - conn = pool.acquire() - self.assertIsNone(conn.tag) - conn.tag = tag_utc - conn.close() - conn = pool.acquire(tag=tag_mst) - self.assertEqual(conn.tag, tag_mst) - conn.close() +def test_2419(test_env): + "2419 - test closing a pool normally with connections checked out" + pool = test_env.get_pool( + min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + with pool.acquire(): + with test_env.assert_raises_full_code("DPY-1005"): + pool.close() - conn = pool.acquire(tag=tag_utc) - self.assertEqual(conn.tag, tag_utc) - conn.close() - @test_env.skip_unless_thick_mode() - def test_2411(self): - "2411 - test PL/SQL session callbacks" - if not test_env.has_client_version(12, 2): - self.skipTest("PL/SQL session callbacks not supported before 12.2") - callback = "pkg_SessionCallback.TheCallback" - pool = test_env.get_pool( - min=2, - max=8, - increment=3, - getmode=oracledb.POOL_GETMODE_NOWAIT, - session_callback=callback, - ) - tags = [ - "NLS_DATE_FORMAT=SIMPLE", - "NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC", - "NLS_DATE_FORMAT=FULL;TIME_ZONE=MST", - ] - actual_tags = [None, None, "NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC"] - - # truncate PL/SQL session callback log - conn = pool.acquire() - cursor = conn.cursor() - cursor.execute("truncate table PLSQLSessionCallbacks") - conn.close() +def test_2420(test_env): + "2420 - test closing a pool forcibly" + pool = test_env.get_pool( + min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + with pool.acquire(): + pool.close(force=True) + + +def test_2421(test_env): + "2421 - using the pool after it is closed raises an exception" + pool = test_env.get_pool( + min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + pool.close() + with test_env.assert_raises_full_code("DPY-1002"): + pool.acquire() + + +def test_2422(test_env): + "2422 - using the pool beyond max limit raises an error" + if not test_env.has_client_version(19): + pytest.skip("not supported on this client") + pool = test_env.get_pool( + min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + with pool.acquire(), pool.acquire(): + pool.getmode = oracledb.POOL_GETMODE_NOWAIT + with test_env.assert_raises_full_code("DPY-4005"): + pool.acquire() + + +def test_2423(test_env): + "2423 - callable session callback is executed for new connections" + + class Counter: + num_calls = 0 + + @classmethod + def session_callback(cls, conn, requested_tag): + cls.num_calls += 1 - # request sessions with each of the first two tags - for tag in tags[:2]: - conn = pool.acquire(tag=tag) + pool = test_env.get_pool( + min=1, + max=2, + increment=1, + session_callback=Counter.session_callback, + ) + with pool.acquire(), pool.acquire(): + pass + with pool.acquire(), pool.acquire(): + pass + assert Counter.num_calls == 2 + + +def test_2424(skip_if_drcp, admin_conn, test_env): + "2424 - drop the pooled connection on receiving dead connection error" + pool = test_env.get_pool(min=2, max=2, increment=2) + + # acquire connections from the pool and kill all the sessions + with admin_conn.cursor() as admin_cursor: + for conn in [pool.acquire() for i in range(2)]: + sid, serial = test_env.get_sid_serial(conn) + sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(sql) conn.close() + assert pool.opened == 2 - # for the last tag, use the matchanytag flag - conn = pool.acquire(tag=tags[2], matchanytag=True) + # when try to re-use the killed sessions error will be raised; + # release all such connections + for conn in [pool.acquire() for i in range(2)]: + with conn.cursor() as cursor: + with test_env.assert_raises_full_code("DPY-4011"): + cursor.execute("select user from dual") conn.close() - # verify the PL/SQL session callback log is accurate - conn = pool.acquire() - cursor = conn.cursor() - cursor.execute( - """ - select RequestedTag, ActualTag - from PLSQLSessionCallbacks - order by FixupTimestamp - """ - ) - results = cursor.fetchall() - expected_results = list(zip(tags, actual_tags)) - self.assertEqual(results, expected_results) + # if a free connection is available, it can be used; otherwise a new + # connection will be created + for conn in [pool.acquire() for i in range(2)]: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == test_env.main_user.upper() conn.close() + assert pool.opened == 2 - @test_env.skip_unless_thick_mode() - def test_2412(self): - "2412 - testTagging with Invalid key" - pool = test_env.get_pool(getmode=oracledb.POOL_GETMODE_NOWAIT) - conn = pool.acquire() - self.assertRaises(TypeError, pool.release, conn, tag=12345) - if test_env.has_client_version(12, 2): - with self.assertRaisesFullCode("ORA-24488"): - pool.release(conn, tag="INVALID_TAG") - - def test_2413(self): - "2413 - test dropping/closing a connection from the pool" - pool = test_env.get_pool(min=1, max=5, increment=2) - conns1 = [pool.acquire() for _ in range(2)] - conns2 = [oracledb.connect(pool=pool) for _ in range(3)] - self.assertEqual(pool.busy, 5) - self.assertEqual(pool.opened, 5) - - for conn in conns1: - pool.drop(conn) - self.assertEqual(pool.busy, 3) - self.assertEqual(pool.opened, 3) - - for conn in conns2: - conn.close() - self.assertEqual(pool.busy, 0) - self.assertEqual(pool.opened, 3) - - def test_2414(self): - "2414 - test to ensure pure connections are being created correctly" - pool = test_env.get_pool( - min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - conn1 = pool.acquire() - conn2 = pool.acquire() - self.assertEqual(pool.opened, 2, "opened (1)") - pool.release(conn1) - pool.release(conn2) - conn3 = pool.acquire(purity=oracledb.ATTR_PURITY_NEW) - self.assertEqual(pool.opened, 2, "opened (2)") - pool.release(conn3) - - @test_env.skip_unless_thick_mode() - def test_2415(self): - "2415 - test the reconfigure values are changed and rest unchanged" - self.__perform_reconfigure_test("min", 5) - self.__perform_reconfigure_test("max", 20) - self.__perform_reconfigure_test("increment", 5) - self.__perform_reconfigure_test("timeout", 10) - self.__perform_reconfigure_test("stmtcachesize", 40) - self.__perform_reconfigure_test("ping_interval", 50) - self.__perform_reconfigure_test( - "getmode", oracledb.POOL_GETMODE_NOWAIT - ) - if test_env.has_client_version(12, 1): - self.__perform_reconfigure_test("max_lifetime_session", 2000) - if test_env.has_client_version(12, 2): - self.__perform_reconfigure_test("wait_timeout", 8000) - if test_env.has_client_version(18, 3): - self.__perform_reconfigure_test("max_sessions_per_shard", 5) - if test_env.has_client_version(19, 11): - self.__perform_reconfigure_test("soda_metadata_cache", True) - - @test_env.skip_unless_thick_mode() - def test_2417(self): - "2417 - test that session callbacks are being called correctly" - pool = test_env.get_pool( - min=2, - max=5, - increment=1, - session_callback=self.__callable_session_callback, - ) - # new connection with a tag should invoke the session callback - with pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE") as conn: - cursor = conn.cursor() - cursor.execute("select to_char(2021-05-20) from dual") +def test_2425(test_env): + "2425 - acquire a connection from an empty pool (min=0)" + pool = test_env.get_pool(min=0, max=2, increment=2) + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") (result,) = cursor.fetchone() - self.assertTrue(self.session_called) + assert result == test_env.main_user.upper() - # acquiring a connection with the same tag should not invoke the - # session callback - self.session_called = False - with pool.acquire(tag="NLS_DATE_FORMAT=SIMPLE") as conn: - cursor = conn.cursor() - cursor.execute("select to_char(2021-05-20) from dual") - (result,) = cursor.fetchone() - self.assertFalse(self.session_called) - # acquiring a connection with a new tag should invoke the session - # callback - self.session_called = False - with pool.acquire(tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=UTC") as conn: - cursor = conn.cursor() - cursor.execute("select to_char(current_date) from dual") - (result,) = cursor.fetchone() - self.assertTrue(self.session_called) +def test_2426(test_env): + "2426 - get different object types from different connections" + pool = test_env.get_pool(min=1, max=2, increment=1) + with pool.acquire() as conn: + typ = conn.gettype("UDT_SUBOBJECT") + assert typ.name == "UDT_SUBOBJECT" + with pool.acquire() as conn: + typ = conn.gettype("UDT_OBJECTARRAY") + assert typ.name == "UDT_OBJECTARRAY" - # acquiring a connection with a new tag and specifying that a - # connection with any tag can be acquired should invoke the session - # callback - self.session_called = False - with pool.acquire( - tag="NLS_DATE_FORMAT=FULL;TIME_ZONE=MST", matchanytag=True - ) as conn: - cursor = conn.cursor() - cursor.execute("select to_char(current_date) from dual") - (result,) = cursor.fetchone() - self.assertTrue(self.session_called) - # new connection with no tag should invoke the session callback - self.session_called = False - with pool.acquire() as conn: - cursor = conn.cursor() - cursor.execute("select to_char(current_date) from dual") - (result,) = cursor.fetchone() - self.assertTrue(self.session_called) +def test_2427(test_env): + "2427 - test creating a pool using a proxy user" + user_str = f"{test_env.main_user}[{test_env.proxy_user}]" + pool = test_env.get_pool(user=user_str) + _verify_connection(pool.acquire(), test_env.proxy_user, test_env.main_user) - def test_2418(self): - "2418 - test closing a pool normally with no connections checked out" - pool = test_env.get_pool( - min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - pool.close() - def test_2419(self): - "2419 - test closing a pool normally with connections checked out" - pool = test_env.get_pool( - min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - with pool.acquire(): - with self.assertRaisesFullCode("DPY-1005"): - pool.close() +def test_2428(skip_if_drcp, test_env): + "2428 - test acquiring conn from pool in LIFO order" + pool = test_env.get_pool( + min=5, max=10, increment=1, getmode=oracledb.POOL_GETMODE_WAIT + ) + sql = "select sys_context('userenv', 'sid') from dual" + conns = [pool.acquire() for i in range(3)] + sids = [conn.cursor().execute(sql).fetchone()[0] for conn in conns] - def test_2420(self): - "2420 - test closing a pool forcibly" - pool = test_env.get_pool( - min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - with pool.acquire(): - pool.close(force=True) + conns[1].close() + conns[2].close() + conns[0].close() + + conn = pool.acquire() + sid = conn.cursor().execute(sql).fetchone()[0] + assert sid == sids[0], "not LIFO" + + +def test_2429(test_env): + "2429 - verify that dynamic pool cannot have an increment of zero" + pool = test_env.get_pool(min=1, max=3, increment=0) + assert pool.increment == 1 + with pool.acquire(), pool.acquire(): + pass - def test_2421(self): - "2421 - using the pool after it is closed raises an exception" - pool = test_env.get_pool( - min=1, max=8, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - pool.close() - with self.assertRaisesFullCode("DPY-1002"): - pool.acquire() - @unittest.skipUnless( - test_env.has_client_version(19), "not supported on this client" +def test_2430(test_env): + "2430 - verify that static pool can have an increment of zero" + pool = test_env.get_pool(min=1, max=1, increment=0) + assert pool.increment == 0 + with pool.acquire(): + pass + + +def test_2431(test_env): + "2431 - verify that connection with different cclass is reused" + cclass = "cclass2431" + pool = test_env.get_pool(min=1, max=1) + # ignore the first acquire which, depending on the speed with which the + # minimum connections are created, may create a connection that is + # discarded; instead, use the second acquire which should remain in the + # pool + with pool.acquire(cclass=cclass) as conn: + pass + with pool.acquire(cclass=cclass) as conn: + sid_serial = test_env.get_sid_serial(conn) + with pool.acquire(cclass=cclass) as conn: + next_sid_serial = test_env.get_sid_serial(conn) + assert next_sid_serial == sid_serial + assert pool.opened == 1 + + +def test_2432(test_env): + "2432 - test creating a pool invalid params" + with test_env.assert_raises_full_code("DPY-2027"): + oracledb.create_pool(params="bad params") + + +def test_2433(test_env): + "2433 - test releasing and dropping an invalid connection" + pool = test_env.get_pool() + pytest.raises(TypeError, pool.release, ["invalid connection"]) + pytest.raises(TypeError, pool.drop, ["invalid connection"]) + + +def test_2434(test_env): + "2434 - test creating a pool with invalid pool_class" + with test_env.assert_raises_full_code("DPY-2026"): + oracledb.create_pool(pool_class=int) + + +def test_2435(test_env): + "2435 - test creating a pool with a subclassed connection type" + + class MyConnection(oracledb.Connection): + pass + + pool = test_env.get_pool(connectiontype=MyConnection) + with pool.acquire() as conn: + assert isinstance(conn, MyConnection) + + +def test_2436(test_env): + "2436 - test creating a pool with a subclassed pool type" + + class MyPool(oracledb.ConnectionPool): + pass + + pool = test_env.get_pool(pool_class=MyPool) + assert isinstance(pool, MyPool) + + +def test_2437(test_env): + "2437 - test connectiontype with an invalid connection class" + with test_env.assert_raises_full_code("DPY-2023"): + test_env.get_pool(connectiontype=oracledb.AsyncConnection) + with test_env.assert_raises_full_code("DPY-2023"): + test_env.get_pool(connectiontype=int) + + +def test_2438(skip_unless_pool_timed_wait_supported, test_env): + "2438 - ensure that timed wait times out with appropriate exception" + pool = test_env.get_pool( + getmode=oracledb.POOL_GETMODE_TIMEDWAIT, min=0, wait_timeout=1 ) - def test_2422(self): - "2422 - using the pool beyond max limit raises an error" - pool = test_env.get_pool( - min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - with pool.acquire(), pool.acquire(): - pool.getmode = oracledb.POOL_GETMODE_NOWAIT - with self.assertRaisesFullCode("DPY-4005"): - pool.acquire() - - def test_2423(self): - "2423 - callable session callback is executed for new connections" - - class Counter: - num_calls = 0 - - @classmethod - def session_callback(cls, conn, requested_tag): - cls.num_calls += 1 - - pool = test_env.get_pool( - min=1, - max=2, - increment=1, - session_callback=Counter.session_callback, - ) - with pool.acquire(), pool.acquire(): - pass - with pool.acquire(), pool.acquire(): - pass - self.assertEqual(Counter.num_calls, 2) - - @test_env.skip_if_drcp() - def test_2424(self): - "2424 - drop the pooled connection on receiving dead connection error" - admin_conn = test_env.get_admin_connection() - pool = test_env.get_pool(min=2, max=2, increment=2) - - # acquire connections from the pool and kill all the sessions - with admin_conn.cursor() as admin_cursor: - for conn in [pool.acquire() for i in range(2)]: - sid, serial = self.get_sid_serial(conn) - sql = f"alter system kill session '{sid},{serial}'" - admin_cursor.execute(sql) - conn.close() - self.assertEqual(pool.opened, 2) - - # when try to re-use the killed sessions error will be raised; - # release all such connections - for conn in [pool.acquire() for i in range(2)]: - with conn.cursor() as cursor: - with self.assertRaisesFullCode("DPY-4011"): - cursor.execute("select user from dual") - conn.close() + with test_env.assert_raises_full_code("DPY-4005"): + pool.acquire() - # if a free connection is available, it can be used; otherwise a new - # connection will be created - for conn in [pool.acquire() for i in range(2)]: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - conn.close() - self.assertEqual(pool.opened, 2) - def test_2425(self): - "2425 - acquire a connection from an empty pool (min=0)" - pool = test_env.get_pool(min=0, max=2, increment=2) - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (result,) = cursor.fetchone() - self.assertEqual(result, test_env.get_main_user().upper()) +def test_2439(skip_unless_call_timeout_supported, test_env): + "2439 - ensure call timeout is reset on connections returned by pool" + pool = test_env.get_pool(ping_timeout=1000, ping_interval=0) + with pool.acquire() as conn: + assert conn.call_timeout == 0 + with pool.acquire() as conn: + assert conn.call_timeout == 0 - def test_2426(self): - "2426 - get different object types from different connections" - pool = test_env.get_pool(min=1, max=2, increment=1) - with pool.acquire() as conn: - typ = conn.gettype("UDT_SUBOBJECT") - self.assertEqual(typ.name, "UDT_SUBOBJECT") - with pool.acquire() as conn: - typ = conn.gettype("UDT_OBJECTARRAY") - self.assertEqual(typ.name, "UDT_OBJECTARRAY") - - def test_2427(self): - "2427 - test creating a pool using a proxy user" - user_str = f"{test_env.get_main_user()}[{test_env.get_proxy_user()}]" - pool = test_env.get_pool(user=user_str) - self.__verify_connection( - pool.acquire(), test_env.get_proxy_user(), test_env.get_main_user() - ) - @test_env.skip_if_drcp() - def test_2428(self): - "2428 - test acquiring conn from pool in LIFO order" - pool = test_env.get_pool( - min=5, max=10, increment=1, getmode=oracledb.POOL_GETMODE_WAIT - ) - sql = "select sys_context('userenv', 'sid') from dual" - conns = [pool.acquire() for i in range(3)] - sids = [conn.cursor().execute(sql).fetchone()[0] for conn in conns] - - conns[1].close() - conns[2].close() - conns[0].close() - - conn = pool.acquire() - sid = conn.cursor().execute(sql).fetchone()[0] - self.assertEqual(sid, sids[0], "not LIFO") - - def test_2429(self): - "2429 - verify that dynamic pool cannot have an increment of zero" - pool = test_env.get_pool(min=1, max=3, increment=0) - self.assertEqual(pool.increment, 1) - with pool.acquire(), pool.acquire(): - pass +def test_2440(test_env): + "2440 - test connection with an invalid pool" + with pytest.raises(TypeError): + oracledb.connect(pool="not a pool object") - def test_2430(self): - "2430 - verify that static pool can have an increment of zero" - pool = test_env.get_pool(min=1, max=1, increment=0) - self.assertEqual(pool.increment, 0) - with pool.acquire(): - pass - def test_2431(self): - "2431 - verify that connection with different cclass is reused" - cclass = "cclass2431" - pool = test_env.get_pool(min=1, max=1) - # ignore the first acquire which, depending on the speed with which the - # minimum connections are created, may create a connection that is - # discarded; instead, use the second acquire which should remain in the - # pool - with pool.acquire(cclass=cclass) as conn: - pass - with pool.acquire(cclass=cclass) as conn: - sid_serial = self.get_sid_serial(conn) - with pool.acquire(cclass=cclass) as conn: - next_sid_serial = self.get_sid_serial(conn) - self.assertEqual(next_sid_serial, sid_serial) - self.assertEqual(pool.opened, 1) - - def test_2432(self): - "2432 - test creating a pool invalid params" - with self.assertRaisesFullCode("DPY-2027"): - oracledb.create_pool(params="bad params") - - def test_2433(self): - "2433 - test releasing and dropping an invalid connection" - pool = test_env.get_pool() - self.assertRaises(TypeError, pool.release, ["invalid connection"]) - self.assertRaises(TypeError, pool.drop, ["invalid connection"]) +def test_2441(test_env): + "2441 - test oracledb.POOL_GETMODE_FORCEGET" + pool = test_env.get_pool( + min=1, max=3, increment=1, getmode=oracledb.POOL_GETMODE_FORCEGET + ) + num_conns = 10 + active_sessions = set() + conns = [pool.acquire() for _ in range(num_conns)] + for conn in conns: + active_sessions.add(test_env.get_sid_serial(conn)) + assert pool.opened == num_conns + assert pool.busy == num_conns + assert len(active_sessions) == num_conns + + +def test_2442(skip_unless_thin_mode, test_env): + "2442 - test passing program when creating a pool" + sql = ( + "select program from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_create_arg(test_env, "program", "newprogram", sql) - def test_2434(self): - "2434 - test creating a pool with invalid pool_class" - with self.assertRaisesFullCode("DPY-2026"): - oracledb.create_pool(pool_class=int) - def test_2435(self): - "2435 - test creating a pool with a subclassed connection type" +def test_2443(skip_unless_thin_mode, test_env): + "2443 - test passing machine when creating a pool" + sql = ( + "select machine from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_create_arg(test_env, "machine", "newmachine", sql) - class MyConnection(oracledb.Connection): - pass - pool = test_env.get_pool(connectiontype=MyConnection) - with pool.acquire() as conn: - self.assertIsInstance(conn, MyConnection) +def test_2444(skip_unless_thin_mode, test_env): + "2444 - test passing terminal when creating a pool" + sql = ( + "select terminal from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_create_arg(test_env, "terminal", "newterminal", sql) + - def test_2436(self): - "2436 - test creating a pool with a subclassed pool type" +def test_2445(skip_unless_thin_mode, test_env): + "2445 - test passing osuser when creating a pool" + sql = ( + "select osuser from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_create_arg(test_env, "osuser", "newosuser", sql) - class MyPool(oracledb.ConnectionPool): + +def test_2446(test_env): + "2446 - test passing driver_name when creating a pool" + sql = ( + "select distinct client_driver from v$session_connect_info " + "where sid = sys_context('userenv', 'sid')" + ) + _verify_create_arg(test_env, "driver_name", "newdriver", sql) + + +def test_2447(skip_unless_thin_mode, test_env): + "2447 - test register_parameter with pooled connection" + sdu = 4096 + params = test_env.get_pool_params() + protocol = "proto-test" + orig_connect_string = test_env.connect_string + connect_string = f"{protocol}://{orig_connect_string}" + + def hook(passed_protocol, passed_protocol_arg, passed_params): + assert passed_protocol == protocol + assert passed_protocol_arg == orig_connect_string + passed_params.parse_connect_string(passed_protocol_arg) + passed_params.set(sdu=sdu) + + try: + oracledb.register_protocol(protocol, hook) + pool = oracledb.create_pool(dsn=connect_string, params=params) + assert params.sdu == sdu + with pool.acquire(): pass + pool.close() + finally: + oracledb.register_protocol(protocol, None) - pool = test_env.get_pool(pool_class=MyPool) - self.assertIsInstance(pool, MyPool) - - def test_2437(self): - "2437 - test connectiontype with an invalid connection class" - with self.assertRaisesFullCode("DPY-2023"): - test_env.get_pool(connectiontype=oracledb.AsyncConnection) - with self.assertRaisesFullCode("DPY-2023"): - test_env.get_pool(connectiontype=int) - - @test_env.skip_unless_pool_timed_wait_supported() - def test_2438(self): - "2438 - ensure that timed wait times out with appropriate exception" - pool = test_env.get_pool( - getmode=oracledb.POOL_GETMODE_TIMEDWAIT, min=0, wait_timeout=1 - ) - with self.assertRaisesFullCode("DPY-4005"): - pool.acquire() - @test_env.skip_unless_call_timeout_supported() - def test_2439(self): - "2439 - ensure call timeout is reset on connections returned by pool" - pool = test_env.get_pool(ping_timeout=1000, ping_interval=0) - with pool.acquire() as conn: - self.assertEqual(conn.call_timeout, 0) - with pool.acquire() as conn: - self.assertEqual(conn.call_timeout, 0) +def test_2448(test_env): + "2448 - test create_pool() with edition" + edition = test_env.edition_name + pool = test_env.get_pool(edition=edition) + conn = pool.acquire() + assert conn.edition == edition - def test_2440(self): - "2440 - test connection with an invalid pool" - with self.assertRaises(TypeError): - oracledb.connect(pool="not a pool object") - def test_2441(self): - "2441 - test oracledb.POOL_GETMODE_FORCEGET" - pool = test_env.get_pool( - min=1, max=3, increment=1, getmode=oracledb.POOL_GETMODE_FORCEGET - ) - num_conns = 10 - active_sessions = set() - conns = [pool.acquire() for _ in range(num_conns)] - for conn in conns: - active_sessions.add(self.get_sid_serial(conn)) - self.assertEqual(pool.opened, num_conns) - self.assertEqual(pool.busy, num_conns) - self.assertEqual(len(active_sessions), num_conns) - - @test_env.skip_unless_thin_mode() - def test_2442(self): - "2442 - test passing program when creating a pool" - sql = ( - "select program from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_create_arg("program", "newprogram", sql) - - @test_env.skip_unless_thin_mode() - def test_2443(self): - "2443 - test passing machine when creating a pool" - sql = ( - "select machine from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_create_arg("machine", "newmachine", sql) - - @test_env.skip_unless_thin_mode() - def test_2444(self): - "2444 - test passing terminal when creating a pool" - sql = ( - "select terminal from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_create_arg("terminal", "newterminal", sql) - - @test_env.skip_unless_thin_mode() - def test_2445(self): - "2445 - test passing osuser when creating a pool" - sql = ( - "select osuser from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_create_arg("osuser", "newosuser", sql) +def test_2449(test_env): + "2449 - test create_pool() and get_pool() with alias" + alias = "pool_alias_2449" + pool = test_env.get_pool(pool_alias=alias) + assert pool is oracledb.get_pool(alias) + pool.close() - def test_2446(self): - "2446 - test passing driver_name when creating a pool" - sql = ( - "select distinct client_driver from v$session_connect_info " - "where sid = sys_context('userenv', 'sid')" - ) - self.__verify_create_arg("driver_name", "newdriver", sql) - - @test_env.skip_unless_thin_mode() - def test_2447(self): - "2447 - test register_parameter with pooled connection" - sdu = 4096 - params = test_env.get_pool_params() - protocol = "proto-test" - orig_connect_string = test_env.get_connect_string() - connect_string = f"{protocol}://{orig_connect_string}" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - self.assertEqual(passed_protocol, protocol) - self.assertEqual(passed_protocol_arg, orig_connect_string) - passed_params.parse_connect_string(passed_protocol_arg) - passed_params.set(sdu=sdu) - - try: - oracledb.register_protocol(protocol, hook) - pool = oracledb.create_pool(dsn=connect_string, params=params) - self.assertEqual(params.sdu, sdu) - with pool.acquire(): - pass - pool.close() - finally: - oracledb.register_protocol(protocol, None) - - def test_2448(self): - "2448 - test create_pool() with edition" - edition = test_env.get_edition_name() - pool = test_env.get_pool(edition=edition) - conn = pool.acquire() - self.assertEqual(conn.edition, edition) - - def test_2449(self): - "2449 - test create_pool() and get_pool() with alias" - alias = "pool_alias_2449" - pool = test_env.get_pool(pool_alias=alias) - self.assertIs(pool, oracledb.get_pool(alias)) - pool.close() - def test_2450(self): - "2450 - test create_pool() twice with the same alias" - alias = "pool_alias_2450" - pool = test_env.get_pool(pool_alias=alias) - with self.assertRaisesFullCode("DPY-2055"): - test_env.get_pool(pool_alias=alias) - pool.close() - self.assertIsNone(oracledb.get_pool(alias)) - - def test_2451(self): - "2451 - test connect() with pool alias" - alias = "pool_alias_2451" - pool = test_env.get_pool(pool_alias=alias) - with self.assertRaisesFullCode("DPY-2014"): - test_env.get_connection(pool=pool, pool_alias=alias) - with oracledb.connect(pool_alias=alias) as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (value,) = cursor.fetchone() - self.assertEqual(value, test_env.get_main_user().upper()) - pool.close() - with self.assertRaisesFullCode("DPY-2054"): - oracledb.connect(pool_alias=alias) - - def test_2452(self): - "2452 - test acquire() with pool alias and stmtcachesize" - alias = "pool_2452" - stmtcachesize = 35 - test_env.get_pool(pool_alias=alias, stmtcachesize=stmtcachesize) - with oracledb.connect(pool_alias=alias) as conn: - self.assertEqual(conn.stmtcachesize, stmtcachesize) - oracledb.get_pool(alias).close() - - def test_2453(self): - "2453 - test pool alias is case sensitive" - alias = "pool_2458" +def test_2450(test_env): + "2450 - test create_pool() twice with the same alias" + alias = "pool_alias_2450" + pool = test_env.get_pool(pool_alias=alias) + with test_env.assert_raises_full_code("DPY-2055"): test_env.get_pool(pool_alias=alias) - self.assertIsNone(oracledb.get_pool(alias.upper())) - with self.assertRaisesFullCode("DPY-2054"): - test_env.get_connection(pool_alias=alias.upper()) - oracledb.get_pool(alias).close() - - def test_2454(self): - "2454 - test pool alias with invalid types" - aliases = [5, set(), dict(), bytearray(1)] - for alias in aliases: - with self.subTest(alias=alias): - with self.assertRaises(TypeError): - test_env.get_pool(pool_alias=alias) - - def test_2455(self): - "2455 - test create_pool() with parameters hook" - pool = test_env.get_pool() - with pool.acquire() as conn: - orig_stmtcachesize = conn.stmtcachesize - stmtcachesize = orig_stmtcachesize + 10 - pool.close() + pool.close() + assert oracledb.get_pool(alias) is None + + +def test_2451(test_env): + "2451 - test connect() with pool alias" + alias = "pool_alias_2451" + pool = test_env.get_pool(pool_alias=alias) + with test_env.assert_raises_full_code("DPY-2014"): + test_env.get_connection(pool=pool, pool_alias=alias) + with oracledb.connect(pool_alias=alias) as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (value,) = cursor.fetchone() + assert value == test_env.main_user.upper() + pool.close() + with test_env.assert_raises_full_code("DPY-2054"): + oracledb.connect(pool_alias=alias) + + +def test_2452(test_env): + "2452 - test acquire() with pool alias and stmtcachesize" + alias = "pool_2452" + stmtcachesize = 35 + test_env.get_pool(pool_alias=alias, stmtcachesize=stmtcachesize) + with oracledb.connect(pool_alias=alias) as conn: + assert conn.stmtcachesize == stmtcachesize + oracledb.get_pool(alias).close() + + +def test_2453(test_env): + "2453 - test pool alias is case sensitive" + alias = "pool_2458" + test_env.get_pool(pool_alias=alias) + assert oracledb.get_pool(alias.upper()) is None + with test_env.assert_raises_full_code("DPY-2054"): + test_env.get_connection(pool_alias=alias.upper()) + oracledb.get_pool(alias).close() + + +def test_2454(test_env): + "2454 - test pool alias with invalid types" + aliases = [5, set(), dict(), bytearray(1)] + for alias in aliases: + with pytest.raises(TypeError): + test_env.get_pool(pool_alias=alias) - def hook(params): - params.set(stmtcachesize=stmtcachesize) - try: - oracledb.register_params_hook(hook) - pool = test_env.get_pool() - with pool.acquire() as conn: - self.assertEqual(conn.stmtcachesize, stmtcachesize) - pool.close() - finally: - oracledb.unregister_params_hook(hook) +def test_2455(test_env): + "2455 - test create_pool() with parameters hook" + pool = test_env.get_pool() + with pool.acquire() as conn: + orig_stmtcachesize = conn.stmtcachesize + stmtcachesize = orig_stmtcachesize + 10 + pool.close() + + def hook(params): + params.set(stmtcachesize=stmtcachesize) + try: + oracledb.register_params_hook(hook) pool = test_env.get_pool() with pool.acquire() as conn: - self.assertEqual(conn.stmtcachesize, orig_stmtcachesize) + assert conn.stmtcachesize == stmtcachesize pool.close() + finally: + oracledb.unregister_params_hook(hook) + + pool = test_env.get_pool() + with pool.acquire() as conn: + assert conn.stmtcachesize == orig_stmtcachesize + pool.close() - def test_2456(self): - "2456 - test creation of pool with min > max" - with self.assertRaisesFullCode("DPY-2064"): - test_env.get_pool(min=3, max=2) - - @test_env.skip_if_drcp() - def test_2457(self): - "2457 - ping pooled connection on receiving dead connection error" - admin_conn = test_env.get_admin_connection() - pool = test_env.get_pool(min=1, max=1, ping_interval=0) - - # kill connection in pool - with admin_conn.cursor() as admin_cursor: - with pool.acquire() as conn: - sid, serial = self.get_sid_serial(conn) - sql = f"alter system kill session '{sid},{serial}'" - admin_cursor.execute(sql) - - # acquire connection which should succeed without failure - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute("select user from dual") - (user,) = cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) +def test_2456(test_env): + "2456 - test creation of pool with min > max" + with test_env.assert_raises_full_code("DPY-2064"): + test_env.get_pool(min=3, max=2) -if __name__ == "__main__": - test_env.run_test_cases() + +def test_2457(skip_if_drcp, test_env): + "2457 - ping pooled connection on receiving dead connection error" + admin_conn = test_env.get_admin_connection() + pool = test_env.get_pool(min=1, max=1, ping_interval=0) + + # kill connection in pool + with admin_conn.cursor() as admin_cursor: + with pool.acquire() as conn: + sid, serial = test_env.get_sid_serial(conn) + sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(sql) + + # acquire connection which should succeed without failure + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute("select user from dual") + (user,) = cursor.fetchone() + assert user == test_env.main_user.upper() diff --git a/tests/test_2500_string_var.py b/tests/test_2500_string_var.py index 3dff96c6..fb450e25 100644 --- a/tests/test_2500_string_var.py +++ b/tests/test_2500_string_var.py @@ -28,538 +28,568 @@ import random import string -import unittest import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - string_col = f"String {i}" - fixed_char_col = f"Fixed Char {i}".ljust(40) - raw_col = f"Raw {i}".encode("ascii") - if i % 2: - nullable_col = f"Nullable {i}" - else: - nullable_col = None - data_tuple = (i, string_col, raw_col, fixed_char_col, nullable_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def __return_strings_as_bytes(self, cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_VARCHAR: - return cursor.var( - str, arraysize=cursor.arraysize, bypass_decode=True - ) - - def test_2500(self): - "2500 - test creating array var and then increasing the internal size" - val = ["12345678901234567890"] * 3 - var = self.cursor.arrayvar(str, len(val), 4) - var.setvalue(0, val) - self.assertEqual(var.getvalue(), val) - - def test_2501(self): - "2501 - test binding in a string" - self.cursor.execute( - "select * from TestStrings where StringCol = :value", - value="String 5", - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_2502(self): - "2502 - test binding a different variable on second execution" - retval_1 = self.cursor.var(oracledb.STRING, 30) - retval_2 = self.cursor.var(oracledb.STRING, 30) - self.cursor.execute("begin :retval := 'Called'; end;", retval=retval_1) - self.assertEqual(retval_1.getvalue(), "Called") - self.cursor.execute("begin :retval := 'Called'; end;", retval=retval_2) - self.assertEqual(retval_2.getvalue(), "Called") - - def test_2503(self): - "2503 - test exceeding the number of elements returns IndexError" - var = self.cursor.var(str) - self.assertRaises(IndexError, var.getvalue, 1) - - def test_2504(self): - "2504 - test binding in a string after setting input sizes to a number" - self.cursor.setinputsizes(value=oracledb.NUMBER) - self.cursor.execute( - "select * from TestStrings where StringCol = :value", - value="String 6", - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[6]]) - - def test_2505(self): - "2505 - test binding in a string array" - return_value = self.cursor.var(oracledb.NUMBER) - array = [r[1] for r in self.raw_data] - statement = """ - begin - :return_value := pkg_TestStringArrays.TestInArrays( - :integer_value, :array); - end;""" - self.cursor.execute( - statement, return_value=return_value, integer_value=5, array=array - ) - self.assertEqual(return_value.getvalue(), 86) - array = [f"String - {i}" for i in range(15)] - self.cursor.execute(statement, integer_value=8, array=array) - self.assertEqual(return_value.getvalue(), 163) - - def test_2506(self): - "2506 - test binding in a string array (with setinputsizes)" - return_value = self.cursor.var(oracledb.NUMBER) - self.cursor.setinputsizes(array=[oracledb.STRING, 10]) - array = [r[1] for r in self.raw_data] - self.cursor.execute( - """ - begin - :return_value := pkg_TestStringArrays.TestInArrays( - :integer_value, :array); - end; - """, - return_value=return_value, - integer_value=6, - array=array, - ) - self.assertEqual(return_value.getvalue(), 87) - - def test_2507(self): - "2507 - test binding in a string array (with arrayvar)" - return_value = self.cursor.var(oracledb.NUMBER) - array = self.cursor.arrayvar(oracledb.STRING, 10, 20) - array.setvalue(0, [r[1] for r in self.raw_data]) - self.cursor.execute( - """ +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + string_col = f"String {i}" + fixed_char_col = f"Fixed Char {i}".ljust(40) + raw_col = f"Raw {i}".encode("ascii") + if i % 2: + nullable_col = f"Nullable {i}" + else: + nullable_col = None + data_tuple = (i, string_col, raw_col, fixed_char_col, nullable_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def return_strings_as_bytes(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + return cursor.var(str, arraysize=cursor.arraysize, bypass_decode=True) + + +def test_2500(cursor): + "2500 - test creating array var and then increasing the internal size" + val = ["12345678901234567890"] * 3 + var = cursor.arrayvar(str, len(val), 4) + var.setvalue(0, val) + assert var.getvalue() == val + + +def test_2501(cursor, module_data_by_key): + "2501 - test binding in a string" + cursor.execute( + "select * from TestStrings where StringCol = :value", + value="String 5", + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_2502(cursor): + "2502 - test binding a different variable on second execution" + retval_1 = cursor.var(oracledb.STRING, 30) + retval_2 = cursor.var(oracledb.STRING, 30) + cursor.execute("begin :retval := 'Called'; end;", retval=retval_1) + assert retval_1.getvalue() == "Called" + cursor.execute("begin :retval := 'Called'; end;", retval=retval_2) + assert retval_2.getvalue() == "Called" + + +def test_2503(cursor): + "2503 - test exceeding the number of elements returns IndexError" + var = cursor.var(str) + pytest.raises(IndexError, var.getvalue, 1) + + +def test_2504(cursor, module_data_by_key): + "2504 - test binding in a string after setting input sizes to a number" + cursor.setinputsizes(value=oracledb.NUMBER) + cursor.execute( + "select * from TestStrings where StringCol = :value", + value="String 6", + ) + assert cursor.fetchall() == [module_data_by_key[6]] + + +def test_2505(cursor, module_data): + "2505 - test binding in a string array" + return_value = cursor.var(oracledb.NUMBER) + array = [r[1] for r in module_data] + statement = """ begin :return_value := pkg_TestStringArrays.TestInArrays( :integer_value, :array); - end; - """, - return_value=return_value, - integer_value=7, - array=array, - ) - self.assertEqual(return_value.getvalue(), 88) - - def test_2508(self): - "2508 - test binding in/out a string array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.STRING, 10, 100) - original_data = [r[1] for r in self.raw_data] - expected_data = [ - "Converted element # %d originally had length %d" - % (i, len(original_data[i - 1])) - for i in range(1, 6) - ] + original_data[5:] - array.setvalue(0, original_data) - self.cursor.execute( - """ - begin - pkg_TestStringArrays.TestInOutArrays(:num_elems, :array); - end; - """, - num_elems=5, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) - - def test_2509(self): - "2509 - test binding out a string array (with arrayvar)" - array = self.cursor.arrayvar(oracledb.STRING, 6, 100) - expected_data = [f"Test out element # {i}" for i in range(1, 7)] - self.cursor.execute( - """ - begin - pkg_TestStringArrays.TestOutArrays(:num_elems, :array); - end; - """, - num_elems=6, - array=array, - ) - self.assertEqual(array.getvalue(), expected_data) - - def test_2510(self): - "2510 - test binding in a raw" - self.cursor.setinputsizes(value=oracledb.BINARY) - self.cursor.execute( - "select * from TestStrings where RawCol = :value", - value="Raw 4".encode(), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[4]]) - - def test_2511(self): - "2511 - test binding (and fetching) a rowid" - self.cursor.execute("select rowid from TestStrings where IntCol = 3") - (rowid,) = self.cursor.fetchone() - self.cursor.execute( - "select * from TestStrings where rowid = :value", - value=rowid, - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[3]]) + end;""" + cursor.execute( + statement, return_value=return_value, integer_value=5, array=array + ) + assert return_value.getvalue() == 86 + array = [f"String - {i}" for i in range(15)] + cursor.execute(statement, integer_value=8, array=array) + assert return_value.getvalue() == 163 + + +def test_2506(cursor, module_data): + "2506 - test binding in a string array (with setinputsizes)" + return_value = cursor.var(oracledb.NUMBER) + cursor.setinputsizes(array=[oracledb.STRING, 10]) + array = [r[1] for r in module_data] + cursor.execute( + """ + begin + :return_value := pkg_TestStringArrays.TestInArrays( + :integer_value, :array); + end; + """, + return_value=return_value, + integer_value=6, + array=array, + ) + assert return_value.getvalue() == 87 + + +def test_2507(cursor, module_data): + "2507 - test binding in a string array (with arrayvar)" + return_value = cursor.var(oracledb.NUMBER) + array = cursor.arrayvar(oracledb.STRING, 10, 20) + array.setvalue(0, [r[1] for r in module_data]) + cursor.execute( + """ + begin + :return_value := pkg_TestStringArrays.TestInArrays( + :integer_value, :array); + end; + """, + return_value=return_value, + integer_value=7, + array=array, + ) + assert return_value.getvalue() == 88 + + +def test_2508(cursor, module_data): + "2508 - test binding in/out a string array (with arrayvar)" + array = cursor.arrayvar(oracledb.STRING, 10, 100) + original_data = [r[1] for r in module_data] + expected_data = [ + "Converted element # %d originally had length %d" + % (i, len(original_data[i - 1])) + for i in range(1, 6) + ] + original_data[5:] + array.setvalue(0, original_data) + cursor.execute( + """ + begin + pkg_TestStringArrays.TestInOutArrays(:num_elems, :array); + end; + """, + num_elems=5, + array=array, + ) + assert array.getvalue() == expected_data + + +def test_2509(cursor): + "2509 - test binding out a string array (with arrayvar)" + array = cursor.arrayvar(oracledb.STRING, 6, 100) + expected_data = [f"Test out element # {i}" for i in range(1, 7)] + cursor.execute( + """ + begin + pkg_TestStringArrays.TestOutArrays(:num_elems, :array); + end; + """, + num_elems=6, + array=array, + ) + assert array.getvalue() == expected_data - def test_2513(self): - "2513 - test binding in a null" - self.cursor.execute( - "select * from TestStrings where StringCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - def test_2514(self): - "2514 - test binding out with set input sizes defined (by type)" - bind_vars = self.cursor.setinputsizes(value=oracledb.STRING) - self.cursor.execute( - """ - begin - :value := 'TSI'; - end; - """ - ) - self.assertEqual(bind_vars["value"].getvalue(), "TSI") +def test_2510(cursor, module_data_by_key): + "2510 - test binding in a raw" + cursor.setinputsizes(value=oracledb.BINARY) + cursor.execute( + "select * from TestStrings where RawCol = :value", + value="Raw 4".encode(), + ) + assert cursor.fetchall() == [module_data_by_key[4]] - def test_2515(self): - "2515 - test binding out with set input sizes defined (by integer)" - bind_vars = self.cursor.setinputsizes(value=30) - self.cursor.execute( - """ - begin - :value := 'TSI (I)'; - end; - """ - ) - self.assertEqual(bind_vars["value"].getvalue(), "TSI (I)") - def test_2516(self): - "2516 - test binding in/out with set input sizes defined (by type)" - bind_vars = self.cursor.setinputsizes(value=oracledb.STRING) - self.cursor.execute( - """ - begin - :value := :value || ' TSI'; - end; - """, - value="InVal", - ) - self.assertEqual(bind_vars["value"].getvalue(), "InVal TSI") +def test_2511(cursor, module_data_by_key): + "2511 - test binding (and fetching) a rowid" + cursor.execute("select rowid from TestStrings where IntCol = 3") + (rowid,) = cursor.fetchone() + cursor.execute( + "select * from TestStrings where rowid = :value", + value=rowid, + ) + assert cursor.fetchall() == [module_data_by_key[3]] - def test_2517(self): - "2517 - test binding in/out with set input sizes defined (by integer)" - bind_vars = self.cursor.setinputsizes(value=30) - self.cursor.execute( - """ - begin - :value := :value || ' TSI (I)'; - end; - """, - value="InVal", - ) - self.assertEqual(bind_vars["value"].getvalue(), "InVal TSI (I)") - def test_2518(self): - "2518 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.STRING) - self.cursor.execute( - """ - begin - :value := 'TSI (VAR)'; - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), "TSI (VAR)") - - def test_2519(self): - "2519 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.STRING) - var.setvalue(0, "InVal") - self.cursor.execute( - """ - begin - :value := :value || ' TSI (VAR)'; - end; - """, - value=var, - ) - self.assertEqual(var.getvalue(), "InVal TSI (VAR)") - - def test_2520(self): - "2520 - test that binding a long string succeeds" - self.cursor.setinputsizes(big_string=oracledb.DB_TYPE_LONG) - self.cursor.execute( - """ - declare - t_Temp varchar2(20000); - begin - t_Temp := :big_string; - end; - """, - big_string="X" * 10000, - ) +def test_2513(cursor): + "2513 - test binding in a null" + cursor.execute( + "select * from TestStrings where StringCol = :value", + value=None, + ) + assert cursor.fetchall() == [] + + +def test_2514(cursor): + "2514 - test binding out with set input sizes defined (by type)" + bind_vars = cursor.setinputsizes(value=oracledb.STRING) + cursor.execute( + """ + begin + :value := 'TSI'; + end; + """ + ) + assert bind_vars["value"].getvalue() == "TSI" + + +def test_2515(cursor): + "2515 - test binding out with set input sizes defined (by integer)" + bind_vars = cursor.setinputsizes(value=30) + cursor.execute( + """ + begin + :value := 'TSI (I)'; + end; + """ + ) + assert bind_vars["value"].getvalue() == "TSI (I)" + + +def test_2516(cursor): + "2516 - test binding in/out with set input sizes defined (by type)" + bind_vars = cursor.setinputsizes(value=oracledb.STRING) + cursor.execute( + """ + begin + :value := :value || ' TSI'; + end; + """, + value="InVal", + ) + assert bind_vars["value"].getvalue() == "InVal TSI" + + +def test_2517(cursor): + "2517 - test binding in/out with set input sizes defined (by integer)" + bind_vars = cursor.setinputsizes(value=30) + cursor.execute( + """ + begin + :value := :value || ' TSI (I)'; + end; + """, + value="InVal", + ) + assert bind_vars["value"].getvalue() == "InVal TSI (I)" + + +def test_2518(cursor): + "2518 - test binding out with cursor.var() method" + var = cursor.var(oracledb.STRING) + cursor.execute( + """ + begin + :value := 'TSI (VAR)'; + end; + """, + value=var, + ) + assert var.getvalue() == "TSI (VAR)" + + +def test_2519(cursor): + "2519 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.STRING) + var.setvalue(0, "InVal") + cursor.execute( + """ + begin + :value := :value || ' TSI (VAR)'; + end; + """, + value=var, + ) + assert var.getvalue() == "InVal TSI (VAR)" + + +def test_2520(cursor): + "2520 - test that binding a long string succeeds" + cursor.setinputsizes(big_string=oracledb.DB_TYPE_LONG) + cursor.execute( + """ + declare + t_Temp varchar2(20000); + begin + t_Temp := :big_string; + end; + """, + big_string="X" * 10000, + ) - def test_2521(self): - "2521 - test that setinputsizes() returns a long variable" - var = self.cursor.setinputsizes(test=90000)["test"] - in_string = "1234567890" * 9000 - var.setvalue(0, in_string) - out_string = var.getvalue() - msg = ( - f"output does not match: in was {len(in_string)}, " - f"out was {len(out_string)}" - ) - self.assertEqual(in_string, out_string, msg) - - def test_2522(self): - "2522 - test cursor description is accurate" - self.cursor.execute("select * from TestStrings") - varchar_ratio, nvarchar_ratio = test_env.get_charset_ratios() - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "STRINGCOL", - oracledb.DB_TYPE_VARCHAR, - 20, - 20 * varchar_ratio, - None, - None, - False, - ), - ("RAWCOL", oracledb.DB_TYPE_RAW, 30, 30, None, None, False), - ( - "FIXEDCHARCOL", - oracledb.DB_TYPE_CHAR, - 40, - 40 * varchar_ratio, - None, - None, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_VARCHAR, - 50, - 50 * varchar_ratio, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_2523(self): - "2523 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestStrings order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2524(self): - "2524 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestStrings order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_2525(self): - "2525 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestStrings - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - def test_2526(self): - "2526 - test binding and fetching supplemental charcters" - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - supplemental_chars = ( - "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 " - "𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 𠽌 𠾴 𠾼 𠿪 𡁜 " - "𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 " - "𢱕 𢳂 𢴈 𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 " - "𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" - ) - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, supplemental_chars), - ) - self.conn.commit() - self.cursor.execute("select StringCol1 from TestTempTable") - (value,) = self.cursor.fetchone() - self.assertEqual(value, supplemental_chars) - - def test_2527(self): - "2527 - test binding twice with a larger string the second time" - self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - short_string = "short string" - long_string = "long string " * 30 - self.cursor.execute(sql, (1, short_string)) - self.cursor.execute(sql, (2, long_string)) - self.conn.commit() - self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - self.assertEqual( - self.cursor.fetchall(), [(1, short_string), (2, long_string)] - ) - @unittest.skipUnless( - test_env.has_server_version(12, 2), "not supported on this server" +def test_2521(cursor): + "2521 - test that setinputsizes() returns a long variable" + var = cursor.setinputsizes(test=90000)["test"] + in_string = "1234567890" * 9000 + var.setvalue(0, in_string) + out_string = var.getvalue() + msg = ( + f"output does not match: in was {len(in_string)}, " + f"out was {len(out_string)}" ) - def test_2528(self): - "2528 - test issue 50 - avoid error ORA-24816" - cursor = self.conn.cursor() - try: - cursor.execute("drop table issue_50 purge") - except oracledb.DatabaseError: - pass - cursor.execute( - """ - create table issue_50 ( - Id number(11) primary key, - Str1 nvarchar2(256), - Str2 nvarchar2(256), - Str3 nvarchar2(256), - NClob1 nclob, - NClob2 nclob - ) - """ - ) - id_var = cursor.var(oracledb.NUMBER) - cursor.execute( - """ - insert into issue_50 (Id, Str2, Str3, NClob1, NClob2, Str1) - values (:arg0, :arg1, :arg2, :arg3, :arg4, :arg5) - returning id into :arg6 - """, - [1, "555a4c78", "f319ef0e", "23009914", "", "", id_var], - ) - cursor = self.conn.cursor() - cursor.execute( - """ - insert into issue_50 (Id, Str2, Str3, NClob1, NClob2, Str1) - values (:arg0, :arg1, :arg2, :arg3, :arg4, :arg5) - returning id into :arg6 - """, - [2, "d5ff845a", "94275767", "bf161ff6", "", "", id_var], - ) + assert in_string == out_string, msg + + +def test_2522(cursor, test_env): + "2522 - test cursor description is accurate" + cursor.execute("select * from TestStrings") + varchar_ratio, nvarchar_ratio = test_env.charset_ratios + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "STRINGCOL", + oracledb.DB_TYPE_VARCHAR, + 20, + 20 * varchar_ratio, + None, + None, + False, + ), + ("RAWCOL", oracledb.DB_TYPE_RAW, 30, 30, None, None, False), + ( + "FIXEDCHARCOL", + oracledb.DB_TYPE_CHAR, + 40, + 40 * varchar_ratio, + None, + None, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_VARCHAR, + 50, + 50 * varchar_ratio, + None, + None, + True, + ), + ] + assert cursor.description == expected_value + + +def test_2523(cursor, module_data): + "2523 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestStrings order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_2524(cursor, module_data): + "2524 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestStrings order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_2525(cursor, module_data_by_key): + "2525 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestStrings + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_2526(conn, cursor, test_env): + "2526 - test binding and fetching supplemental charcters" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + supplemental_chars = ( + "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 " + "𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 𠽌 𠾴 𠾼 𠿪 𡁜 " + "𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 " + "𢱕 𢳂 𢴈 𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 " + "𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" + ) + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, supplemental_chars), + ) + conn.commit() + cursor.execute("select StringCol1 from TestTempTable") + (value,) = cursor.fetchone() + assert value == supplemental_chars + + +def test_2527(conn, cursor): + "2527 - test binding twice with a larger string the second time" + cursor.execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + short_string = "short string" + long_string = "long string " * 30 + cursor.execute(sql, (1, short_string)) + cursor.execute(sql, (2, long_string)) + conn.commit() + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert cursor.fetchall() == [(1, short_string), (2, long_string)] + + +def test_2528(conn, test_env): + "2528 - test issue 50 - avoid error ORA-24816" + if not test_env.has_server_version(12, 2): + pytest.skip("not supported on this server") + + cursor = conn.cursor() + try: cursor.execute("drop table issue_50 purge") + except oracledb.DatabaseError: + pass + cursor.execute( + """ + create table issue_50 ( + Id number(11) primary key, + Str1 nvarchar2(256), + Str2 nvarchar2(256), + Str3 nvarchar2(256), + NClob1 nclob, + NClob2 nclob + ) + """ + ) + id_var = cursor.var(oracledb.NUMBER) + cursor.execute( + """ + insert into issue_50 (Id, Str2, Str3, NClob1, NClob2, Str1) + values (:arg0, :arg1, :arg2, :arg3, :arg4, :arg5) + returning id into :arg6 + """, + [1, "555a4c78", "f319ef0e", "23009914", "", "", id_var], + ) + cursor = conn.cursor() + cursor.execute( + """ + insert into issue_50 (Id, Str2, Str3, NClob1, NClob2, Str1) + values (:arg0, :arg1, :arg2, :arg3, :arg4, :arg5) + returning id into :arg6 + """, + [2, "d5ff845a", "94275767", "bf161ff6", "", "", id_var], + ) + cursor.execute("drop table issue_50 purge") - def test_2529(self): - "2529 - test assigning a string to rowid" - var = self.cursor.var(oracledb.ROWID) - with self.assertRaisesFullCode("DPY-3004"): - var.setvalue(0, "ABDHRYTHFJGKDKKDH") - - def test_2530(self): - "2530 - test fetching XMLType (< 1K) as a string" - self.cursor.execute( - """ - select XMLElement("string", stringCol) as xml - from TestStrings - where intCol = 1 - """ - ) - (actual_value,) = self.cursor.fetchone() - self.assertEqual(actual_value, "String 1") - self.assertEqual( - self.cursor.description, - [("XML", oracledb.DB_TYPE_XMLTYPE, None, None, None, None, True)], - ) - def test_2531(self): - "2531 - test inserting and fetching XMLType (1K) as a string" - self.cursor.execute("truncate table TestTempXML") - chars = string.ascii_uppercase + string.ascii_lowercase - random_string = "".join(random.choice(chars) for _ in range(1024)) - int_val = 2531 - xml_string = f"{random_string}" - self.cursor.execute( - "insert into TestTempXML (IntCol, XMLCol) values (:1, :2)", - (int_val, xml_string), - ) - self.cursor.execute( - "select XMLCol from TestTempXML where intCol = :1", - [int_val], - ) - (actual_value,) = self.cursor.fetchone() - self.assertEqual(actual_value.strip(), xml_string) - - def test_2532(self): - "2532 - fetching null and not null values can use optimised path" - sql = """ - select * from TestStrings - where IntCol between :start_value and :end_value""" - self.cursor.execute(sql, start_value=2, end_value=5) - self.assertEqual(self.cursor.fetchall(), self.raw_data[1:5]) - self.cursor.execute(sql, start_value=5, end_value=8) - self.assertEqual(self.cursor.fetchall(), self.raw_data[4:8]) - self.cursor.execute(sql, start_value=8, end_value=10) - self.assertEqual(self.cursor.fetchall(), self.raw_data[7:10]) - - def test_2533(self): - "2533 - test bypass string decode" - self.cursor.execute("truncate table TestTempTable") - string_val = "I bought a cafetière on the Champs-Élysées" - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - with self.conn.cursor() as cursor: - cursor.execute(sql, (1, string_val)) - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchone(), (1, string_val)) - with self.conn.cursor() as cursor: - cursor.outputtypehandler = self.__return_strings_as_bytes - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchone(), (1, string_val.encode())) - with self.conn.cursor() as cursor: - cursor.outputtypehandler = None - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchone(), (1, string_val)) - - @test_env.skip_unless_thin_mode() - def test_2534(self): - "2534 - test inserting and fetching XMLType (32K) as a string" - self.cursor.execute("truncate table TestTempXML") - chars = string.ascii_uppercase + string.ascii_lowercase - random_string = "".join(random.choice(chars) for _ in range(32768)) - int_val = 2534 - xml_string = f"{random_string}" - lob = self.conn.createlob(oracledb.DB_TYPE_CLOB) - lob.write(xml_string) - self.cursor.execute( - """ - insert into TestTempXML (IntCol, XMLCol) - values (:1, sys.xmltype(:2)) - """, - (int_val, lob), - ) - self.cursor.execute( - "select XMLCol from TestTempXML where intCol = :1", - [int_val], - ) - (actual_value,) = self.cursor.fetchone() - self.assertEqual(actual_value.strip(), xml_string) +def test_2529(cursor, test_env): + "2529 - test assigning a string to rowid" + var = cursor.var(oracledb.ROWID) + with test_env.assert_raises_full_code("DPY-3004"): + var.setvalue(0, "ABDHRYTHFJGKDKKDH") -if __name__ == "__main__": - test_env.run_test_cases() +def test_2530(cursor): + "2530 - test fetching XMLType (< 1K) as a string" + cursor.execute( + """ + select XMLElement("string", stringCol) as xml + from TestStrings + where intCol = 1 + """ + ) + (actual_value,) = cursor.fetchone() + assert actual_value == "String 1" + assert cursor.description == [ + ("XML", oracledb.DB_TYPE_XMLTYPE, None, None, None, None, True) + ] + + +def test_2531(cursor): + "2531 - test inserting and fetching XMLType (1K) as a string" + cursor.execute("truncate table TestTempXML") + chars = string.ascii_uppercase + string.ascii_lowercase + random_string = "".join(random.choice(chars) for _ in range(1024)) + int_val = 2531 + xml_string = f"{random_string}" + cursor.execute( + "insert into TestTempXML (IntCol, XMLCol) values (:1, :2)", + (int_val, xml_string), + ) + cursor.execute( + "select XMLCol from TestTempXML where intCol = :1", + [int_val], + ) + (actual_value,) = cursor.fetchone() + assert actual_value.strip() == xml_string + + +def test_2532(cursor, module_data): + "2532 - fetching null and not null values can use optimised path" + sql = """ + select * from TestStrings + where IntCol between :start_value and :end_value""" + cursor.execute(sql, start_value=2, end_value=5) + assert cursor.fetchall() == module_data[1:5] + cursor.execute(sql, start_value=5, end_value=8) + assert cursor.fetchall() == module_data[4:8] + cursor.execute(sql, start_value=8, end_value=10) + assert cursor.fetchall() == module_data[7:10] + + +def test_2533(conn, cursor): + "2533 - test bypass string decode" + cursor.execute("truncate table TestTempTable") + string_val = "I bought a cafetière on the Champs-Élysées" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + with conn.cursor() as cursor: + cursor.execute(sql, (1, string_val)) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchone() == (1, string_val) + with conn.cursor() as cursor: + cursor.outputtypehandler = return_strings_as_bytes + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchone() == (1, string_val.encode()) + with conn.cursor() as cursor: + cursor.outputtypehandler = None + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchone() == (1, string_val) + + +def test_2534(skip_unless_thin_mode, conn, cursor): + "2534 - test inserting and fetching XMLType (32K) as a string" + cursor.execute("truncate table TestTempXML") + chars = string.ascii_uppercase + string.ascii_lowercase + random_string = "".join(random.choice(chars) for _ in range(32768)) + int_val = 2534 + xml_string = f"{random_string}" + lob = conn.createlob(oracledb.DB_TYPE_CLOB) + lob.write(xml_string) + cursor.execute( + """ + insert into TestTempXML (IntCol, XMLCol) + values (:1, sys.xmltype(:2)) + """, + (int_val, lob), + ) + cursor.execute( + "select XMLCol from TestTempXML where intCol = :1", + [int_val], + ) + (actual_value,) = cursor.fetchone() + assert actual_value.strip() == xml_string diff --git a/tests/test_2600_timestamp_var.py b/tests/test_2600_timestamp_var.py index 99b3857f..cb8b899d 100644 --- a/tests/test_2600_timestamp_var.py +++ b/tests/test_2600_timestamp_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,212 +29,218 @@ import datetime import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - base_date = datetime.datetime(2002, 12, 9) - date_interval = datetime.timedelta(days=i) +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + base_date = datetime.datetime(2002, 12, 9) + date_interval = datetime.timedelta(days=i) + date_value = base_date + date_interval + str_value = str(i * 50) + fsecond = int(str_value + "0" * (6 - len(str_value))) + date_col = datetime.datetime( + date_value.year, + date_value.month, + date_value.day, + date_value.hour, + date_value.minute, + i * 2, + fsecond, + ) + if i % 2: + date_interval = datetime.timedelta(days=i + 1) date_value = base_date + date_interval - str_value = str(i * 50) + str_value = str(i * 125) fsecond = int(str_value + "0" * (6 - len(str_value))) - date_col = datetime.datetime( + nullable_col = datetime.datetime( date_value.year, date_value.month, date_value.day, date_value.hour, date_value.minute, - i * 2, + i * 3, fsecond, ) - if i % 2: - date_interval = datetime.timedelta(days=i + 1) - date_value = base_date + date_interval - str_value = str(i * 125) - fsecond = int(str_value + "0" * (6 - len(str_value))) - nullable_col = datetime.datetime( - date_value.year, - date_value.month, - date_value.day, - date_value.hour, - date_value.minute, - i * 3, - fsecond, - ) - else: - nullable_col = None - precision_col = datetime.datetime(2009, 12, 14) - data_tuple = (i, date_col, nullable_col, precision_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_2600(self): - "2600 - test binding in a timestamp" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - "select * from TestTimestamps where TimestampCol = :value", - value=datetime.datetime(2002, 12, 14, 0, 0, 10, 250000), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_2601(self): - "2601 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - "select * from TestTimestamps where TimestampCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2602(self): - "2602 - test binding out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - """ - begin - :value := to_timestamp('20021209', 'YYYYMMDD'); - end; - """ - ) - self.assertEqual( - bind_vars["value"].getvalue(), datetime.datetime(2002, 12, 9) - ) - - def test_2603(self): - "2603 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=datetime.datetime(2002, 12, 12, 10, 0, 0), - ) - self.assertEqual( - bind_vars["value"].getvalue(), - datetime.datetime(2002, 12, 17, 16, 0, 0), - ) - - def test_2604(self): - "2604 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - """ - begin - :value := to_date('20021231 12:31:00', - 'YYYYMMDD HH24:MI:SS'); - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2002, 12, 31, 12, 31, 0) - ) - - def test_2605(self): - "2605 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP) - var.setvalue(0, datetime.datetime(2002, 12, 9, 6, 0, 0)) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2002, 12, 14, 12, 0, 0) - ) - - def test_2606(self): - "2606 - test cursor description is accurate" - self.cursor.execute("select * from TestTimestamps") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "TIMESTAMPCOL", - oracledb.DB_TYPE_TIMESTAMP, - 23, - None, - 0, - 6, - False, - ), - ("NULLABLECOL", oracledb.DB_TYPE_TIMESTAMP, 23, None, 0, 6, True), - ( - "TIMESTAMPPRECISIONCOL", - oracledb.DB_TYPE_TIMESTAMP, - 23, - None, - 0, - 4, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_2607(self): - "2607 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestTimestamps order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_2608(self): - "2608 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestTimestamps order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_2609(self): - "2609 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestTimestamps - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - def test_2610(self): - "2610 - test binding a timestamp with zero fractional seconds" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - """ - select * - from TestTimestamps - where trunc(TimestampCol) = :value - """, - value=datetime.datetime(2002, 12, 14), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_2611(self): - "2611 - test binding a timestamp with datetime.date as input" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) - self.cursor.execute( - """ - select * - from TestTimestamps - where trunc(TimestampCol) = :value - """, - value=datetime.date(2002, 12, 14), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - -if __name__ == "__main__": - test_env.run_test_cases() + else: + nullable_col = None + precision_col = datetime.datetime(2009, 12, 14) + data_tuple = (i, date_col, nullable_col, precision_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def test_2600(cursor, module_data_by_key): + "2600 - test binding in a timestamp" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + "select * from TestTimestamps where TimestampCol = :value", + value=datetime.datetime(2002, 12, 14, 0, 0, 10, 250000), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_2601(cursor): + "2601 - test binding in a null" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + "select * from TestTimestamps where TimestampCol = :value", + value=None, + ) + assert cursor.fetchall() == [] + + +def test_2602(cursor): + "2602 - test binding out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + """ + begin + :value := to_timestamp('20021209', 'YYYYMMDD'); + end; + """ + ) + assert bind_vars["value"].getvalue() == datetime.datetime(2002, 12, 9) + + +def test_2603(cursor): + "2603 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=datetime.datetime(2002, 12, 12, 10, 0, 0), + ) + value = bind_vars["value"].getvalue() + assert value == datetime.datetime(2002, 12, 17, 16, 0, 0) + + +def test_2604(cursor): + "2604 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + """ + begin + :value := to_date('20021231 12:31:00', + 'YYYYMMDD HH24:MI:SS'); + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2002, 12, 31, 12, 31, 0) + + +def test_2605(cursor): + "2605 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP) + var.setvalue(0, datetime.datetime(2002, 12, 9, 6, 0, 0)) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2002, 12, 14, 12, 0, 0) + + +def test_2606(cursor): + "2606 - test cursor description is accurate" + cursor.execute("select * from TestTimestamps") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "TIMESTAMPCOL", + oracledb.DB_TYPE_TIMESTAMP, + 23, + None, + 0, + 6, + False, + ), + ("NULLABLECOL", oracledb.DB_TYPE_TIMESTAMP, 23, None, 0, 6, True), + ( + "TIMESTAMPPRECISIONCOL", + oracledb.DB_TYPE_TIMESTAMP, + 23, + None, + 0, + 4, + True, + ), + ] + assert cursor.description == expected_value + + +def test_2607(cursor, module_data): + "2607 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestTimestamps order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_2608(cursor, module_data): + "2608 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestTimestamps order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_2609(cursor, module_data_by_key): + "2609 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestTimestamps + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_2610(cursor, module_data_by_key): + "2610 - test binding a timestamp with zero fractional seconds" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + """ + select * + from TestTimestamps + where trunc(TimestampCol) = :value + """, + value=datetime.datetime(2002, 12, 14), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_2611(cursor, module_data_by_key): + "2611 - test binding a timestamp with datetime.date as input" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP) + cursor.execute( + """ + select * + from TestTimestamps + where trunc(TimestampCol) = :value + """, + value=datetime.date(2002, 12, 14), + ) + assert cursor.fetchall() == [module_data_by_key[5]] diff --git a/tests/test_2700_aq_dbobject.py b/tests/test_2700_aq_dbobject.py index e35a69b4..8a1f36b1 100644 --- a/tests/test_2700_aq_dbobject.py +++ b/tests/test_2700_aq_dbobject.py @@ -30,638 +30,585 @@ import threading import oracledb -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - book_type_name = "UDT_BOOK" - book_queue_name = "TEST_BOOK_QUEUE" - book_data = [ +@pytest.fixture(scope="module") +def book_data(): + return [ ("Wings of Fire", "A.P.J. Abdul Kalam", decimal.Decimal("15.75")), ("The Story of My Life", "Hellen Keller", decimal.Decimal("10.50")), ("The Chronicles of Narnia", "C.S. Lewis", decimal.Decimal("25.25")), ] - def __deq_in_thread(self, results): - with test_env.get_connection() as conn: - books_type = conn.gettype(self.book_type_name) - queue = conn.queue(self.book_queue_name, books_type) - queue.deqoptions.wait = 10 - props = queue.deqone() - if props is not None: - book = props.payload - results.append((book.TITLE, book.AUTHORS, book.PRICE)) - conn.commit() - - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) - - def test_2700(self): - "2700 - test dequeuing an empty queue" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - - def test_2701(self): - "2701 - test enqueuing and dequeuing multiple messages" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - props = self.conn.msgproperties() - for title, authors, price in self.book_data: - props.payload = book = queue.payload_type.newobject() - book.TITLE = title - book.AUTHORS = authors - book.PRICE = price - queue.enqone(props) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = queue.deqone() - if props is None: - break + +@pytest.fixture +def queue(conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return test_env.get_and_clear_queue(conn, "TEST_BOOK_QUEUE", "UDT_BOOK") + + +def _deq_in_thread(test_env, queue, results): + with test_env.get_connection() as conn: + books_type = conn.gettype(queue.payload_type.name) + thread_queue = conn.queue(queue.name, books_type) + thread_queue.deqoptions.wait = 10 + props = thread_queue.deqone() + if props is not None: book = props.payload - row = (book.TITLE, book.AUTHORS, book.PRICE) - results.append(row) - self.conn.commit() - self.assertEqual(results, self.book_data) - - def test_2702(self): - "2702 - test dequeuing with DEQ_REMOVE_NODATA option" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[1] - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA - props = queue.deqone() - self.assertIsNotNone(props) - self.assertIsNone(props.payload.TITLE) - - def test_2703(self): - "2703 - test getting/setting dequeue options attributes" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - def test_2704(self): - "2704 - test waiting for dequeue" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - results = [] - thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) - thread.start() - book = queue.payload_type.newobject() - title, authors, price = self.book_data[0] + results.append((book.TITLE, book.AUTHORS, book.PRICE)) + conn.commit() + + +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value + + +def test_2700(queue): + "2700 - test dequeuing an empty queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + + +def test_2701(conn, queue, book_data): + "2701 - test enqueuing and dequeuing multiple messages" + props = conn.msgproperties() + for title, authors, price in book_data: + props.payload = book = queue.payload_type.newobject() book.TITLE = title book.AUTHORS = authors book.PRICE = price - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - self.conn.commit() - thread.join() - self.assertEqual(results, [(title, authors, price)]) - - def test_2705(self): - "2705 - test getting/setting enqueue options attributes" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - def test_2706(self): - "2706 - test errors for invalid values for enqueue" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - self.assertRaises(TypeError, queue.enqone, book) - - def test_2707(self): - "2707 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - - def test_2708(self): - "2708 - test enqueue visibility option - ENQ_ON_COMMIT" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - self.conn.commit() - props = queue.deqone() - other_conn.commit() - self.assertIsNotNone(props) - - def test_2709(self): - "2709 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) queue.enqone(props) - - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: props = queue.deqone() + if props is None: + break book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - def test_2710(self): - "2710 - test enqueue/dequeue delivery modes identical - buffered" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) + row = (book.TITLE, book.AUTHORS, book.PRICE) + results.append(row) + conn.commit() + assert results == book_data + + +def test_2702(conn, queue, book_data): + "2702 - test dequeuing with DEQ_REMOVE_NODATA option" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[1] + props = conn.msgproperties(payload=book) + queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = queue.deqone() + assert props is not None + assert props.payload.TITLE is None + + +def test_2703(queue): + "2703 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +def test_2704(conn, queue, book_data, test_env): + "2704 - test waiting for dequeue" + results = [] + thread = threading.Thread( + target=_deq_in_thread, + args=( + test_env, + queue, + results, + ), + ) + thread.start() + book = queue.payload_type.newobject() + title, authors, price = book_data[0] + book.TITLE = title + book.AUTHORS = authors + book.PRICE = price + props = conn.msgproperties(payload=book) + queue.enqone(props) + conn.commit() + thread.join() + assert results == [(title, authors, price)] + + +def test_2705(queue): + "2705 - test getting/setting enqueue options attributes" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +def test_2706(queue): + "2706 - test errors for invalid values for enqueue" + book = queue.payload_type.newobject() + pytest.raises(TypeError, queue.enqone, book) + + +def test_2707(conn): + "2707 - test getting/setting message properties attributes" + props = conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + + +def test_2708(conn, queue, book_data, test_env): + "2708 - test enqueue visibility option - ENQ_ON_COMMIT" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + conn.commit() + props = queue.deqone() + other_conn.commit() + assert props is not None + + +def test_2709(conn, queue, book_data, test_env): + "2709 - test enqueue visibility option - ENQ_IMMEDIATE" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + other_conn.commit() + assert results == book_data[0] + + +def test_2710(conn, queue, book_data, test_env): + "2710 - test enqueue/dequeue delivery modes identical - buffered" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props.deliverymode == oracledb.MSG_BUFFERED + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + other_conn.commit() + assert results == book_data[0] + + +def test_2711(conn, queue, book_data, test_env): + "2711 - test enqueue/dequeue delivery modes identical - persistent" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props.deliverymode == oracledb.MSG_PERSISTENT + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + other_conn.commit() + assert results == book_data[0] + + +def test_2712(conn, queue, book_data, test_env): + "2712 - test enqueue/dequeue delivery modes the same" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props.deliverymode == oracledb.MSG_PERSISTENT + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + other_conn.commit() + assert results == book_data[0] + + +def test_2713(conn, queue, book_data, test_env): + "2713 - test enqueue/dequeue delivery modes different" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=book) + queue.enqone(props) + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + + +def test_2714(skip_unless_thick_mode, conn, queue, book_data, test_env): + "2714 - test dequeue transformation" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + expected_price = book.PRICE + 10 + props = conn.msgproperties(payload=book) + queue.enqone(props) + conn.commit() + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + transformation_str = f"{conn.username}.transform2" + queue.deqoptions.transformation = transformation_str + assert queue.deqoptions.transformation == transformation_str + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props.payload.PRICE == expected_price + + +def test_2715(skip_unless_thick_mode, conn, queue, book_data, test_env): + "2715 - test enqueue transformation" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + expected_price = book.PRICE + 5 + queue.enqoptions.transformation = transformation_str = ( + f"{conn.username}.transform1" + ) + queue.enqoptions.transformation = transformation_str + if test_env.has_client_version(23): + assert queue.enqoptions.transformation == transformation_str + props = conn.msgproperties(payload=book) + queue.enqone(props) + conn.commit() + + other_conn = test_env.get_connection() + books_type = other_conn.gettype(queue.payload_type.name) + queue = other_conn.queue(queue.name, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props.payload.PRICE == expected_price + + +def test_2716(conn, queue, test_env): + "2716 - test to verify payloadType is deprecated" + books_type = conn.gettype(queue.payload_type.name) + assert queue.payload_type == books_type + assert queue.payloadType == books_type + with test_env.assert_raises_full_code("DPY-2014"): + conn.queue(queue.name, books_type, payloadType=books_type) + + +def test_2717(conn, queue, test_env): + "2717 - test error for message with no payload" + props = conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): queue.enqone(props) - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - def test_2711(self): - "2711 - test enqueue/dequeue delivery modes identical - persistent" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - def test_2712(self): - "2712 - test enqueue/dequeue delivery modes the same" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) +def test_2718(conn, cursor, queue, book_data): + "2718 - verify that the msgid property is returned correctly" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + props = conn.msgproperties(payload=book) + assert props.msgid is None + queue.enqone(props) + cursor.execute("select msgid from book_queue_tab") + (actual_msgid,) = cursor.fetchone() + assert props.msgid == actual_msgid + props = queue.deqone() + assert props.msgid == actual_msgid + + +def test_2719(conn, queue, book_data): + "2719 - verify use of recipients property" + books_type = conn.gettype(queue.payload_type.name) + book = books_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue = conn.queue("BOOK_QUEUE_MULTI", books_type) + props = conn.msgproperties(payload=book, recipients=["sub2", "sub3"]) + assert props.recipients == ["sub2", "sub3"] + queue.enqone(props) + conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.consumername = "sub3" + props1 = queue.deqone() + book = props1.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + assert results == book_data[0] + queue.deqoptions.consumername = "sub1" + props1 = queue.deqone() + assert props1 is None + + +def test_2720(skip_unless_thick_mode, conn, queue, book_data, test_env): + "2720 - verify attributes of AQ message which spawned notification" + if test_env.is_on_oracle_cloud: + pytest.skip("AQ notification not supported on the cloud") + condition = threading.Condition() + other_conn = test_env.get_connection(events=True) + + def notification_callback(message=None, *args, **kwargs): + cursor = conn.cursor() + cursor.execute("select msgid from book_queue_tab") + (actual_msgid,) = cursor.fetchone() + assert message.msgid == actual_msgid + assert message.consumer_name is None + main_user = test_env.main_user.upper() + assert message.queue_name == f'"{main_user}"."{queue.name}"' + assert message.type == oracledb.EVENT_AQ + with condition: + condition.notify() + + sub = other_conn.subscribe( + namespace=oracledb.SUBSCR_NAMESPACE_AQ, + name=queue.name, + timeout=300, + callback=notification_callback, + ) + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + props = conn.msgproperties(payload=book) + queue.enqone(props) + conn.commit() + with condition: + assert condition.wait(5) + other_conn.unsubscribe(sub) + + +def test_2721(conn, cursor, queue): + "2721 - test message props enqtime" + book = queue.payload_type.newobject() + cursor.execute("select current_timestamp from dual") + (start_date,) = cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = conn.msgproperties(payload=book) + queue.enqone(props) + props = queue.deqone() + cursor.execute("select current_timestamp from dual") + (end_date,) = cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +def test_2722(conn, queue): + "2722 - test message props declared attributes" + book = queue.payload_type.newobject() + values = dict( + payload=book, + correlation="TEST_CORRELATION", + delay=7, + exceptionq="TEST_EXCEPTIONQ", + expiration=10, + priority=1, + ) + props = conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + + +def test_2723(conn): + "2723 - test error for invalid type for payload_type" + pytest.raises(TypeError, conn.queue, "THE QUEUE", payload_type=4) + + +def test_2724(conn): + "2724 - test setting bytes to payload" + props = conn.msgproperties() + bytes_val = b"Hello there" + props.payload = bytes_val + assert props.payload == bytes_val + + +def test_2725(conn, queue): + "2725 - test getting queue attributes" + other_queue = conn.queue(queue.name, queue.payload_type) + assert other_queue.name == queue.name + assert queue.connection is conn + + +def test_2726(queue): + "2726 - test getting write-only attributes" + with pytest.raises(AttributeError): + queue.enqoptions.deliverymode + with pytest.raises(AttributeError): + queue.deqoptions.deliverymode + + +def test_2727(conn, queue, book_data, test_env): + "2727 - test correlation deqoption" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + correlations = ["Math", "Programming"] + num_messages = 3 + messages = [ + conn.msgproperties(payload=book, correlation=c) + for c in correlations + for i in range(num_messages) + ] + queue.enqmany(messages) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = correlations[0] + correlated_messages = queue.deqmany(num_messages + 1) + assert len(correlated_messages) == num_messages + + queue.deqoptions.correlation = correlations[1] + with test_env.assert_raises_full_code("ORA-25241"): + queue.deqone() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + correlated_messages = queue.deqmany(num_messages + 1) + assert len(correlated_messages) == num_messages + + +def test_2728(conn, queue, book_data): + "2728 - test correlation deqoption with pattern-matching characters" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + for correlation in ("PreCalculus-math1", "Calculus-Math2"): + props = conn.msgproperties(payload=book, correlation=correlation) queue.enqone(props) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "%Calculus-%ath_" + messages = queue.deqmany(5) + assert len(messages) == 2 - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - def test_2713(self): - "2713 - test enqueue/dequeue delivery modes different" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - - @test_env.skip_unless_thick_mode() - def test_2714(self): - "2714 - test dequeue transformation" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - expected_price = book.PRICE + 10 - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - self.conn.commit() - - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - transformation_str = f"{self.conn.username}.transform2" - queue.deqoptions.transformation = transformation_str - self.assertEqual(queue.deqoptions.transformation, transformation_str) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertEqual(props.payload.PRICE, expected_price) - - @test_env.skip_unless_thick_mode() - def test_2715(self): - "2715 - test enqueue transformation" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - expected_price = book.PRICE + 5 - queue.enqoptions.transformation = transformation_str = ( - f"{self.conn.username}.transform1" - ) - queue.enqoptions.transformation = transformation_str - if test_env.has_client_version(23): - self.assertEqual( - queue.enqoptions.transformation, transformation_str - ) - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - self.conn.commit() - - other_conn = test_env.get_connection() - books_type = other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertEqual(props.payload.PRICE, expected_price) - - def test_2716(self): - "2716 - test to verify payloadType is deprecated" - books_type = self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, payloadType=books_type) - self.assertEqual(queue.payload_type, books_type) - self.assertEqual(queue.payloadType, books_type) - with self.assertRaisesFullCode("DPY-2014"): - self.conn.queue( - self.book_queue_name, books_type, payloadType=books_type - ) - - def test_2717(self): - "2717 - test error for message with no payload" - books_type = self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - queue.enqone(props) - - def test_2718(self): - "2718 - verify that the msgid property is returned correctly" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - props = self.conn.msgproperties(payload=book) - self.assertIsNone(props.msgid) - queue.enqone(props) - self.cursor.execute("select msgid from book_queue_tab") - (actual_msgid,) = self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) - props = queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - - def test_2719(self): - "2719 - verify use of recipients property" - books_type = self.conn.gettype(self.book_type_name) - book = books_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue = self.conn.queue("BOOK_QUEUE_MULTI", books_type) - props = self.conn.msgproperties( - payload=book, recipients=["sub2", "sub3"] - ) - self.assertEqual(props.recipients, ["sub2", "sub3"]) - queue.enqone(props) - self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.consumername = "sub3" - props1 = queue.deqone() - book = props1.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - self.assertEqual(results, self.book_data[0]) - queue.deqoptions.consumername = "sub1" - props1 = queue.deqone() - self.assertIsNone(props1) - - @test_env.skip_unless_thick_mode() - def test_2720(self): - "2720 - verify attributes of AQ message which spawned notification" - if self.is_on_oracle_cloud(self.conn): - self.skipTest("AQ notification not supported on the cloud") - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - condition = threading.Condition() - conn = test_env.get_connection(events=True) - - def notification_callback(message): - self.cursor.execute("select msgid from book_queue_tab") - (actual_msgid,) = self.cursor.fetchone() - self.assertEqual(message.msgid, actual_msgid) - self.assertIsNone(message.consumer_name) - main_user = test_env.get_main_user().upper() - self.assertEqual( - message.queue_name, f'"{main_user}"."{queue.name}"' - ) - self.assertEqual(message.type, oracledb.EVENT_AQ) - with condition: - condition.notify() - - sub = conn.subscribe( - namespace=oracledb.SUBSCR_NAMESPACE_AQ, - name=self.book_queue_name, - timeout=300, - callback=notification_callback, - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - self.conn.commit() - with condition: - self.assertTrue(condition.wait(5)) - conn.unsubscribe(sub) - - def test_2721(self): - "2721 - test message props enqtime" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) +def test_2729(conn, queue, book_data): + "2729 - test condition deqoption with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + + priorities = [5, 10] + indexes = [0, 1] + for priority, ix in zip(priorities, indexes): book = queue.payload_type.newobject() - self.cursor.execute("select current_timestamp from dual") - (start_date,) = self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=book) + book.TITLE, book.AUTHORS, book.PRICE = book_data[ix] + props = conn.msgproperties(payload=book, priority=priority) queue.enqone(props) - props = queue.deqone() - self.cursor.execute("select current_timestamp from dual") - (end_date,) = self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - def test_2722(self): - "2722 - test message props declared attributes" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - values = dict( - payload=book, - correlation="TEST_CORRELATION", - delay=7, - exceptionq="TEST_EXCEPTIONQ", - expiration=10, - priority=1, - ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - - def test_2723(self): - "2723 - test error for invalid type for payload_type" - self.assertRaises( - TypeError, self.conn.queue, "THE QUEUE", payload_type=4 - ) - - def test_2724(self): - "2724 - test setting bytes to payload" - props = self.conn.msgproperties() - bytes_val = b"Hello there" - props.payload = bytes_val - self.assertEqual(props.payload, bytes_val) - - def test_2725(self): - "2725 - test getting queue attributes" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - self.assertEqual(queue.name, self.book_queue_name) - self.assertEqual(queue.connection, self.conn) - - def test_2726(self): - "2726 - test getting write-only attributes" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - with self.assertRaises(AttributeError): - queue.enqoptions.deliverymode - with self.assertRaises(AttributeError): - queue.deqoptions.deliverymode - - def test_2727(self): - "2727 - test correlation deqoption" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - correlations = ["Math", "Programming"] - num_messages = 3 - messages = [ - self.conn.msgproperties(payload=book, correlation=c) - for c in correlations - for i in range(num_messages) - ] - queue.enqmany(messages) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = correlations[0] - correlated_messages = queue.deqmany(num_messages + 1) - self.assertEqual(len(correlated_messages), num_messages) - - queue.deqoptions.correlation = correlations[1] - with self.assertRaisesFullCode("ORA-25241"): - queue.deqone() - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - correlated_messages = queue.deqmany(num_messages + 1) - self.assertEqual(len(correlated_messages), num_messages) - - def test_2728(self): - "2728 - test correlation deqoption with pattern-matching characters" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - for correlation in ("PreCalculus-math1", "Calculus-Math2"): - props = self.conn.msgproperties( - payload=book, correlation=correlation - ) - queue.enqone(props) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = "%Calculus-%ath_" - messages = queue.deqmany(5) - self.assertEqual(len(messages), 2) - - def test_2729(self): - "2729 - test condition deqoption with priority" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - - priorities = [5, 10] - indexes = [0, 1] - for priority, ix in zip(priorities, indexes): - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[ix] - props = self.conn.msgproperties(payload=book, priority=priority) - queue.enqone(props) - - queue.deqoptions.condition = "priority = 9" + + queue.deqoptions.condition = "priority = 9" + messages = queue.deqmany(3) + assert len(messages) == 0 + + for priority, ix in zip(priorities, indexes): + queue.deqoptions.condition = f"priority = {priority}" messages = queue.deqmany(3) - self.assertEqual(len(messages), 0) - - for priority, ix in zip(priorities, indexes): - queue.deqoptions.condition = f"priority = {priority}" - messages = queue.deqmany(3) - self.assertEqual(len(messages), 1) - book = messages[0].payload - data = book.TITLE, book.AUTHORS, book.PRICE - self.assertEqual(data, self.book_data[ix]) - - def test_2730(self): - "2730 - test mode deqoption with DEQ_REMOVE_NODATA" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + assert len(messages) == 1 + book = messages[0].payload + data = book.TITLE, book.AUTHORS, book.PRICE + assert data == book_data[ix] - book = queue.payload_type.newobject() - for data in self.book_data: - book.TITLE, book.AUTHORS, book.PRICE = data - props = self.conn.msgproperties(payload=book) - queue.enqone(props) - - messages = queue.deqmany(5) - self.assertEqual(len(messages), 3) - for message in messages: - self.assertIsNone(message.payload.TITLE) - self.assertIsNone(message.payload.AUTHORS) - self.assertIsNone(message.payload.PRICE) - - def test_2731(self): - "2731 - test payload_type returns the correct value" - books_type = self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - self.assertEqual(queue.payload_type, books_type) - - def test_2732(self): - "2732 - test deprecated attributes (enqOptions, deqOptions)" - books_type = self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - self.assertEqual(queue.enqOptions, queue.enqoptions) - self.assertEqual(queue.deqOptions, queue.deqoptions) - - def test_2733(self): - "2733 - test deprecated AQ methods (enqOne, deqOne)" - books_type = self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqOne(self.conn.msgproperties(book)) - props = queue.deqOne() - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - self.assertEqual(results, self.book_data[0]) - def test_2734(self): - "2734 - test enqueuing to an object queue with the wrong payload" - queue = self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - props = self.conn.msgproperties(payload="A string") - with self.assertRaisesFullCode("DPY-2062"): - queue.enqone(props) +def test_2730(conn, queue, book_data): + "2730 - test mode deqoption with DEQ_REMOVE_NODATA" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + + book = queue.payload_type.newobject() + for data in book_data: + book.TITLE, book.AUTHORS, book.PRICE = data + props = conn.msgproperties(payload=book) + queue.enqone(props) + messages = queue.deqmany(5) + assert len(messages) == 3 + for message in messages: + assert message.payload.TITLE is None + assert message.payload.AUTHORS is None + assert message.payload.PRICE is None -if __name__ == "__main__": - test_env.run_test_cases() + +def test_2731(conn, queue): + "2731 - test payload_type returns the correct value" + books_type = conn.gettype(queue.payload_type.name) + other_queue = conn.queue(queue.name, books_type) + assert other_queue.payload_type == books_type + + +def test_2732(queue): + "2732 - test deprecated attributes (enqOptions, deqOptions)" + assert queue.enqOptions == queue.enqoptions + assert queue.deqOptions == queue.deqoptions + + +def test_2733(conn, queue, book_data): + "2733 - test deprecated AQ methods (enqOne, deqOne)" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = book_data[0] + queue.enqOne(conn.msgproperties(book)) + props = queue.deqOne() + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + assert results == book_data[0] + + +def test_2734(conn, queue, test_env): + "2734 - test enqueuing to an object queue with the wrong payload" + props = conn.msgproperties(payload="A string") + with test_env.assert_raises_full_code("DPY-2062"): + queue.enqone(props) diff --git a/tests/test_2800_aq_bulk.py b/tests/test_2800_aq_bulk.py index 643f2778..c78faa28 100644 --- a/tests/test_2800_aq_bulk.py +++ b/tests/test_2800_aq_bulk.py @@ -30,7 +30,7 @@ import threading import oracledb -import test_env +import pytest RAW_QUEUE_NAME = "TEST_RAW_QUEUE" JSON_QUEUE_NAME = "TEST_JSON_QUEUE" @@ -62,164 +62,170 @@ ] -class TestCase(test_env.BaseTestCase): - def __deq_in_thread(self, results): - with test_env.get_connection() as conn: - queue = conn.queue(RAW_QUEUE_NAME) - queue.deqoptions.wait = 10 - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - while len(results) < len(RAW_PAYLOAD_DATA): - messages = queue.deqmany(5) - if not messages: - break - for message in messages: - results.append(message.payload.decode()) - conn.commit() - - def test_2800(self): - "2800 - test bulk enqueue and dequeue" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) +@pytest.fixture +def queue(conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return test_env.get_and_clear_queue(conn, RAW_QUEUE_NAME) + + +@pytest.fixture +def json_queue(conn, test_env): + """ + Creates the JSON queue used by the tests in this file. + """ + return test_env.get_and_clear_queue(conn, JSON_QUEUE_NAME, "JSON") + + +def _deq_in_thread(test_env, results): + with test_env.get_connection() as conn: + queue = conn.queue(RAW_QUEUE_NAME) + queue.deqoptions.wait = 10 + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + while len(results) < len(RAW_PAYLOAD_DATA): + messages = queue.deqmany(5) + if not messages: + break + for message in messages: + results.append(message.payload.decode()) + conn.commit() + + +def test_2800(conn, queue): + "2800 - test bulk enqueue and dequeue" + messages = [conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA] + queue.enqmany(messages) + messages = queue.deqmany(len(RAW_PAYLOAD_DATA)) + data = [message.payload.decode() for message in messages] + conn.commit() + assert data == RAW_PAYLOAD_DATA + + +def test_2801(conn, queue): + "2801 - test empty bulk dequeue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = queue.deqmany(5) + conn.commit() + assert messages == [] + + +def test_2802(skip_unless_thick_mode, conn, queue, test_env): + "2802 - test bulk dequeue with wait" + results = [] + thread = threading.Thread( + target=_deq_in_thread, + args=( + test_env, + results, + ), + ) + thread.start() + messages = [conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + queue.enqmany(messages) + thread.join() + assert results == RAW_PAYLOAD_DATA + + +def test_2803(conn, queue): + "2803 - test enqueue and dequeue multiple times" + data_to_enqueue = RAW_PAYLOAD_DATA + for num in (2, 6, 4): messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + conn.msgproperties(payload=data) for data in data_to_enqueue[:num] ] + data_to_enqueue = data_to_enqueue[num:] queue.enqmany(messages) - messages = queue.deqmany(len(RAW_PAYLOAD_DATA)) - data = [message.payload.decode() for message in messages] - self.conn.commit() - self.assertEqual(data, RAW_PAYLOAD_DATA) - - def test_2801(self): - "2801 - test empty bulk dequeue" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - messages = queue.deqmany(5) - self.conn.commit() - self.assertEqual(messages, []) - - @test_env.skip_unless_thick_mode() - def test_2802(self): - "2802 - test bulk dequeue with wait" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - results = [] - thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) - thread.start() - messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA - ] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - queue.enqmany(messages) - thread.join() - self.assertEqual(results, RAW_PAYLOAD_DATA) - - def test_2803(self): - "2803 - test enqueue and dequeue multiple times" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - data_to_enqueue = RAW_PAYLOAD_DATA - for num in (2, 6, 4): - messages = [ - self.conn.msgproperties(payload=data) - for data in data_to_enqueue[:num] - ] - data_to_enqueue = data_to_enqueue[num:] - queue.enqmany(messages) - self.conn.commit() - all_data = [] - for num in (3, 5, 10): - messages = queue.deqmany(num) - all_data.extend(message.payload.decode() for message in messages) - self.conn.commit() - self.assertEqual(all_data, RAW_PAYLOAD_DATA) - - @test_env.skip_unless_thick_mode() - def test_2804(self): - "2804 - test visibility option for enqueue and dequeue" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - - # first test with ENQ_ON_COMMIT (commit required) - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props1 = self.conn.msgproperties(payload="A first message") - props2 = self.conn.msgproperties(payload="A second message") - queue.enqmany([props1, props2]) - other_connection = test_env.get_connection() - other_queue = other_connection.queue(RAW_QUEUE_NAME) - other_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - other_queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - messages = other_queue.deqmany(5) - self.assertEqual(len(messages), 0) - self.conn.commit() - messages = other_queue.deqmany(5) - self.assertEqual(len(messages), 2) - other_connection.rollback() - - # second test with ENQ_IMMEDIATE (no commit required) - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - other_queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.enqmany([props1, props2]) - messages = other_queue.deqmany(5) - self.assertEqual(len(messages), 4) - other_connection.rollback() - messages = other_queue.deqmany(5) - self.assertEqual(len(messages), 0) - - def test_2805(self): - "2805 - test error for messages with no payload" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - messages = [self.conn.msgproperties() for _ in RAW_PAYLOAD_DATA] - with self.assertRaisesFullCode("DPY-2000"): - queue.enqmany(messages) - - def test_2806(self): - "2806 - verify that the msgid property is returned correctly" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA - ] + conn.commit() + all_data = [] + for num in (3, 5, 10): + messages = queue.deqmany(num) + all_data.extend(message.payload.decode() for message in messages) + conn.commit() + assert all_data == RAW_PAYLOAD_DATA + + +def test_2804(skip_unless_thick_mode, conn, queue, test_env): + "2804 - test visibility option for enqueue and dequeue" + + # first test with ENQ_ON_COMMIT (commit required) + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props1 = conn.msgproperties(payload="A first message") + props2 = conn.msgproperties(payload="A second message") + queue.enqmany([props1, props2]) + other_connection = test_env.get_connection() + other_queue = other_connection.queue(RAW_QUEUE_NAME) + other_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + other_queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + messages = other_queue.deqmany(5) + assert len(messages) == 0 + conn.commit() + messages = other_queue.deqmany(5) + assert len(messages) == 2 + other_connection.rollback() + + # second test with ENQ_IMMEDIATE (no commit required) + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + other_queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.enqmany([props1, props2]) + messages = other_queue.deqmany(5) + assert len(messages) == 4 + other_connection.rollback() + messages = other_queue.deqmany(5) + assert len(messages) == 0 + + +def test_2805(conn, queue, test_env): + "2805 - test error for messages with no payload" + messages = [conn.msgproperties() for _ in RAW_PAYLOAD_DATA] + with test_env.assert_raises_full_code("DPY-2000"): queue.enqmany(messages) - self.cursor.execute("select msgid from raw_queue_tab") - actual_msgids = set(m for m, in self.cursor) - msgids = set(message.msgid for message in messages) - self.assertEqual(msgids, actual_msgids) - messages = queue.deqmany(len(RAW_PAYLOAD_DATA)) - msgids = set(message.msgid for message in messages) - self.assertEqual(msgids, actual_msgids) - - def test_2807(self): - "4800 - test enqueuing and dequeuing JSON message" - queue = self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = [ - self.conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD - ] - queue.enqmany(props) - self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - messages = queue.deqmany(5) - actual_data = [message.payload for message in messages] - self.assertEqual(actual_data, JSON_DATA_PAYLOAD) - - def test_2808(self): - "2808 - test enqueuing to a JSON queue without a JSON payload" - queue = self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = self.conn.msgproperties(payload="string message") - with self.assertRaisesFullCode("DPY-2062"): - queue.enqmany([props, props]) - - def test_2809(self): - "2809 - test errors for invalid values for enqmany and deqmany" - queue = self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = self.conn.msgproperties(payload="string message") - self.assertRaises(TypeError, queue.enqmany, props) - self.assertRaises(TypeError, queue.enqmany, ["Not", "msgproperties"]) - self.assertRaises(TypeError, queue.deqmany, "5") - - def test_2810(self): - "2810 - test deprecated AQ methods (enqMany, deqMany)" - queue = self.get_and_clear_queue(RAW_QUEUE_NAME) - data = [b"labrador", b"schnauzer", b"shih tzu"] - queue.enqMany([self.conn.msgproperties(d) for d in data]) - props = queue.deqMany(len(data) + 1) - dequeued_data = [p.payload for p in props] - self.assertEqual(dequeued_data, data) - - -if __name__ == "__main__": - test_env.run_test_cases() + + +def test_2806(conn, cursor, queue): + "2806 - verify that the msgid property is returned correctly" + messages = [conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA] + queue.enqmany(messages) + cursor.execute("select msgid from raw_queue_tab") + actual_msgids = set(m for m, in cursor) + msgids = set(message.msgid for message in messages) + assert msgids == actual_msgids + messages = queue.deqmany(len(RAW_PAYLOAD_DATA)) + msgids = set(message.msgid for message in messages) + assert msgids == actual_msgids + + +def test_2807(conn, json_queue): + "4800 - test enqueuing and dequeuing JSON message" + props = [conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD] + json_queue.enqmany(props) + conn.commit() + json_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = json_queue.deqmany(5) + actual_data = [message.payload for message in messages] + assert actual_data == JSON_DATA_PAYLOAD + + +def test_2808(conn, json_queue, test_env): + "2808 - test enqueuing to a JSON queue without a JSON payload" + props = conn.msgproperties(payload="string message") + with test_env.assert_raises_full_code("DPY-2062"): + json_queue.enqmany([props, props]) + + +def test_2809(conn, json_queue): + "2809 - test errors for invalid values for enqmany and deqmany" + props = conn.msgproperties(payload="string message") + pytest.raises(TypeError, json_queue.enqmany, props) + pytest.raises(TypeError, json_queue.enqmany, ["Not", "msgproperties"]) + pytest.raises(TypeError, json_queue.deqmany, "5") + + +def test_2810(conn, queue): + "2810 - test deprecated AQ methods (enqMany, deqMany)" + data = [b"labrador", b"schnauzer", b"shih tzu"] + queue.enqMany([conn.msgproperties(d) for d in data]) + props = queue.deqMany(len(data) + 1) + dequeued_data = [p.payload for p in props] + assert dequeued_data == data diff --git a/tests/test_2900_rowid.py b/tests/test_2900_rowid.py index 676251ef..cbad123c 100644 --- a/tests/test_2900_rowid.py +++ b/tests/test_2900_rowid.py @@ -29,199 +29,198 @@ import datetime import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def __populate_test_universal_rowids(self): - self.cursor.execute("truncate table TestUniversalRowids") - data = [ - (1, "ABC" * 75, datetime.datetime(2017, 4, 11)), - (2, "DEF" * 80, datetime.datetime(2017, 4, 12)), - ] - self.cursor.executemany( - "insert into TestUniversalRowids values (:1, :2, :3)", - data, - ) - self.conn.commit() - - def __test_select_rowids(self, table_name): - self.cursor.execute(f"select rowid, IntCol from {table_name}") - sql = f"select IntCol from {table_name} where rowid = :val" - for rowid, int_val in self.cursor.fetchall(): - self.cursor.execute(sql, val=rowid) - self.assertEqual(self.cursor.fetchall(), [(int_val,)]) - - def test_2900(self): - "2900 - test selecting all rowids from a regular table" - self.__test_select_rowids("TestNumbers") - - def test_2901(self): - "2901 - test selecting all rowids from an index organised table" - self.__populate_test_universal_rowids() - self.__test_select_rowids("TestUniversalRowids") - - def test_2902(self): - "2902 - test inserting an invalid rowid" - sql = "insert into TestRowids (IntCol, RowidCol) values (1, :rid)" - with self.assertRaisesFullCode("ORA-00932"): - self.cursor.execute(sql, rid=12345) - with self.assertRaisesFullCode("ORA-01410"): - self.cursor.execute(sql, rid="523lkhlf") - - def test_2903(self): - "2903 - test selecting regular rowids stored in a urowid column" - self.cursor.execute("truncate table TestRowids") - self.cursor.execute( - """ - insert into TestRowids (IntCol, UrowidCol) - select IntCol, rowid - from TestNumbers - """ + + +def _populate_test_universal_rowids(cursor): + cursor.execute("truncate table TestUniversalRowids") + data = [ + (1, "ABC" * 75, datetime.datetime(2017, 4, 11)), + (2, "DEF" * 80, datetime.datetime(2017, 4, 12)), + ] + cursor.executemany( + "insert into TestUniversalRowids values (:1, :2, :3)", + data, + ) + cursor.connection.commit() + + +def _test_select_rowids(cursor, table_name): + cursor.execute(f"select rowid, IntCol from {table_name}") + sql = f"select IntCol from {table_name} where rowid = :val" + for rowid, int_val in cursor.fetchall(): + cursor.execute(sql, val=rowid) + assert cursor.fetchall() == [(int_val,)] + + +def test_2900(cursor): + "2900 - test selecting all rowids from a regular table" + _test_select_rowids(cursor, "TestNumbers") + + +def test_2901(cursor): + "2901 - test selecting all rowids from an index organised table" + _populate_test_universal_rowids(cursor) + _test_select_rowids(cursor, "TestUniversalRowids") + + +def test_2902(cursor, test_env): + "2902 - test inserting an invalid rowid" + sql = "insert into TestRowids (IntCol, RowidCol) values (1, :rid)" + with test_env.assert_raises_full_code("ORA-00932"): + cursor.execute(sql, rid=12345) + with test_env.assert_raises_full_code("ORA-01410"): + cursor.execute(sql, rid="523lkhlf") + + +def test_2903(conn, cursor): + "2903 - test selecting regular rowids stored in a urowid column" + cursor.execute("truncate table TestRowids") + cursor.execute( + """ + insert into TestRowids (IntCol, UrowidCol) + select IntCol, rowid + from TestNumbers + """ + ) + conn.commit() + cursor.execute("select IntCol, UrowidCol from TestRowids") + for int_val, rowid in cursor.fetchall(): + cursor.execute( + "select IntCol from TestNumbers where rowid = :val", + val=rowid, ) - self.conn.commit() - self.cursor.execute("select IntCol, UrowidCol from TestRowids") - for int_val, rowid in self.cursor.fetchall(): - self.cursor.execute( - "select IntCol from TestNumbers where rowid = :val", - val=rowid, - ) - self.assertEqual(self.cursor.fetchall(), [(int_val,)]) - - def test_2904(self): - "2904 - test selecting regular rowids stored in a rowid column" - self.cursor.execute("truncate table TestRowids") - self.cursor.execute( + assert cursor.fetchall() == [(int_val,)] + + +def test_2904(conn, cursor): + "2904 - test selecting regular rowids stored in a rowid column" + cursor.execute("truncate table TestRowids") + cursor.execute( + """ + insert into TestRowids (IntCol, RowidCol) + select IntCol, rowid + from TestNumbers + """ + ) + conn.commit() + cursor.execute("select IntCol, RowidCol from TestRowids") + for int_val, rowid in cursor.fetchall(): + cursor.execute( """ - insert into TestRowids (IntCol, RowidCol) - select IntCol, rowid + select IntCol from TestNumbers - """ - ) - self.conn.commit() - self.cursor.execute("select IntCol, RowidCol from TestRowids") - for int_val, rowid in self.cursor.fetchall(): - self.cursor.execute( - """ - select IntCol - from TestNumbers - where rowid = :val - """, - val=rowid, - ) - self.assertEqual(self.cursor.fetchall(), [(int_val,)]) - - def test_2905(self): - "2905 - binding and inserting a rowid" - self.cursor.execute("truncate table TestRowids") - insert_data = [ - (1, "String #1"), - (2, "String #2"), - (3, "String #3"), - (4, "String #4"), - ] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - insert_data, - ) - self.conn.commit() - ridvar = self.cursor.var(oracledb.ROWID) - self.cursor.execute( - """ - begin - select rowid into :rid - from TestTempTable - where IntCol = 3; - end; - """, - rid=ridvar, - ) - self.cursor.setinputsizes(r1=oracledb.ROWID) - self.cursor.execute( - """ - insert into TestRowids (IntCol, RowidCol) - values(1, :r1) - """, - r1=ridvar, - ) - self.conn.commit() - self.cursor.execute("select IntCol, RowidCol from TestRowids") - int_val, rowid = self.cursor.fetchone() - self.cursor.execute( - """ - select IntCol, StringCol1 from TestTempTable where rowid = :val """, val=rowid, ) - self.assertEqual(self.cursor.fetchone(), (3, "String #3")) - - @test_env.skip_unless_thin_mode() - def test_2906(self): - "2906 - binding and inserting a rowid as urowid" - self.cursor.execute("truncate table TestRowids") - insert_data = [ - (1, "String #1", datetime.datetime(2017, 4, 4)), - (2, "String #2", datetime.datetime(2017, 4, 5)), - (3, "String #3", datetime.datetime(2017, 4, 6)), - (4, "A" * 250, datetime.datetime(2017, 4, 7)), - ] - self.cursor.execute("truncate table TestUniversalRowids") - self.cursor.executemany( - """ - insert into TestUniversalRowids - values (:1, :2, :3) - """, - insert_data, - ) - self.conn.commit() - ridvar = self.cursor.var(oracledb.DB_TYPE_UROWID) - self.cursor.execute( - """ - begin - select rowid into :rid - from TestUniversalRowids - where IntCol = 3; - end; - """, - rid=ridvar, - ) - self.cursor.setinputsizes(r1=oracledb.DB_TYPE_UROWID) - self.cursor.execute( - """ - insert into TestRowids (IntCol, UrowidCol) - values(1, :r1) - """, - r1=ridvar, - ) - self.conn.commit() - self.cursor.execute("select IntCol, UrowidCol from TestRowids") - int_val, rowid = self.cursor.fetchone() - self.cursor.execute( - """ - select IntCol, StringCol, DateCol + assert cursor.fetchall() == [(int_val,)] + + +def test_2905(conn, cursor): + "2905 - binding and inserting a rowid" + cursor.execute("truncate table TestRowids") + insert_data = [ + (1, "String #1"), + (2, "String #2"), + (3, "String #3"), + (4, "String #4"), + ] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + insert_data, + ) + conn.commit() + ridvar = cursor.var(oracledb.ROWID) + cursor.execute( + """ + begin + select rowid into :rid + from TestTempTable + where IntCol = 3; + end; + """, + rid=ridvar, + ) + cursor.setinputsizes(r1=oracledb.ROWID) + cursor.execute( + """ + insert into TestRowids (IntCol, RowidCol) + values(1, :r1) + """, + r1=ridvar, + ) + conn.commit() + cursor.execute("select IntCol, RowidCol from TestRowids") + int_val, rowid = cursor.fetchone() + cursor.execute( + """ + select IntCol, StringCol1 from TestTempTable + where rowid = :val + """, + val=rowid, + ) + assert cursor.fetchone() == (3, "String #3") + + +def test_2906(skip_unless_thin_mode, conn, cursor): + "2906 - binding and inserting a rowid as urowid" + cursor.execute("truncate table TestRowids") + insert_data = [ + (1, "String #1", datetime.datetime(2017, 4, 4)), + (2, "String #2", datetime.datetime(2017, 4, 5)), + (3, "String #3", datetime.datetime(2017, 4, 6)), + (4, "A" * 250, datetime.datetime(2017, 4, 7)), + ] + cursor.execute("truncate table TestUniversalRowids") + cursor.executemany( + """ + insert into TestUniversalRowids + values (:1, :2, :3) + """, + insert_data, + ) + conn.commit() + ridvar = cursor.var(oracledb.DB_TYPE_UROWID) + cursor.execute( + """ + begin + select rowid into :rid from TestUniversalRowids - where rowid = :val - """, - val=rowid, - ) - self.assertEqual( - self.cursor.fetchone(), - (3, "String #3", datetime.datetime(2017, 4, 6)), - ) - - def test_2907(self): - "2907 - fetching a null rowid" - self.cursor.execute("truncate table TestRowids") - self.cursor.execute("insert into TestRowids (IntCol) values (1)") - self.conn.commit() - self.cursor.execute("select * from TestRowids") - self.assertEqual(self.cursor.fetchone(), (1, None, None)) - - -if __name__ == "__main__": - test_env.run_test_cases() + where IntCol = 3; + end; + """, + rid=ridvar, + ) + cursor.setinputsizes(r1=oracledb.DB_TYPE_UROWID) + cursor.execute( + """ + insert into TestRowids (IntCol, UrowidCol) + values(1, :r1) + """, + r1=ridvar, + ) + conn.commit() + cursor.execute("select IntCol, UrowidCol from TestRowids") + int_val, rowid = cursor.fetchone() + cursor.execute( + """ + select IntCol, StringCol, DateCol + from TestUniversalRowids + where rowid = :val + """, + val=rowid, + ) + assert cursor.fetchone() == (3, "String #3", datetime.datetime(2017, 4, 6)) + + +def test_2907(conn, cursor): + "2907 - fetching a null rowid" + cursor.execute("truncate table TestRowids") + cursor.execute("insert into TestRowids (IntCol) values (1)") + conn.commit() + cursor.execute("select * from TestRowids") + assert cursor.fetchone() == (1, None, None) diff --git a/tests/test_3000_subscription.py b/tests/test_3000_subscription.py index d34b89c6..d431486e 100644 --- a/tests/test_3000_subscription.py +++ b/tests/test_3000_subscription.py @@ -27,10 +27,9 @@ """ import threading -import unittest import oracledb -import test_env +import pytest class SubscriptionData: @@ -78,368 +77,341 @@ def _process_message(self, message): self.rowids.append(row.rowid) -@test_env.skip_unless_thick_mode() -class TestCase(test_env.BaseTestCase): - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" +@pytest.fixture(scope="module") +def skip_unless_has_client_23(test_env): + """ + Skips tests unless running with Oracle Client 23ai. + """ + if not test_env.use_thick_mode: + pytest.skip("requires thick mode") + if not test_env.has_client_version(23): + pytest.skip("crashes in older clients") + + +def test_3000(skip_unless_has_client_23, cursor, test_env): + "3000 - test subscription for insert, update, delete and truncate" + + # skip if running on the Oracle Cloud, which does not support + # subscriptions currently + if test_env.is_on_oracle_cloud: + message = "Oracle Cloud does not support subscriptions currently" + pytest.skip(message) + + # truncate table in order to run test in known state + cursor.execute("truncate table TestTempTable") + + # expected values + table_operations = [ + oracledb.OPCODE_INSERT, + oracledb.OPCODE_UPDATE, + oracledb.OPCODE_INSERT, + oracledb.OPCODE_DELETE, + oracledb.OPCODE_ALTER | oracledb.OPCODE_ALLROWS, + ] + row_operations = [ + oracledb.OPCODE_INSERT, + oracledb.OPCODE_UPDATE, + oracledb.OPCODE_INSERT, + oracledb.OPCODE_DELETE, + ] + rowids = [] + + # set up subscription + data = DMLSubscriptionData(5) + conn = test_env.get_connection(events=True) + sub = conn.subscribe( + callback=data.callback_handler, + timeout=10, + qos=oracledb.SUBSCR_QOS_ROWIDS, ) - def test_3000(self): - "3000 - test subscription for insert, update, delete and truncate" - - # skip if running on the Oracle Cloud, which does not support - # subscriptions currently - if self.is_on_oracle_cloud(): - message = "Oracle Cloud does not support subscriptions currently" - self.skipTest(message) - - # truncate table in order to run test in known state - self.cursor.execute("truncate table TestTempTable") - - # expected values - table_operations = [ - oracledb.OPCODE_INSERT, - oracledb.OPCODE_UPDATE, - oracledb.OPCODE_INSERT, - oracledb.OPCODE_DELETE, - oracledb.OPCODE_ALTER | oracledb.OPCODE_ALLROWS, - ] - row_operations = [ - oracledb.OPCODE_INSERT, - oracledb.OPCODE_UPDATE, - oracledb.OPCODE_INSERT, - oracledb.OPCODE_DELETE, - ] - rowids = [] - - # set up subscription - data = DMLSubscriptionData(5) - conn = test_env.get_connection(events=True) - sub = conn.subscribe( - callback=data.callback_handler, - timeout=10, - qos=oracledb.SUBSCR_QOS_ROWIDS, - ) - sub.registerquery("select * from TestTempTable") - conn.autocommit = True - cursor = conn.cursor() - - # insert statement - cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (1, 'test')" - ) - cursor.execute("select rowid from TestTempTable where IntCol = 1") - rowids.extend(r for r, in cursor) - - # update statement - cursor.execute( - "update TestTempTable set StringCol1 = 'update' where IntCol = 1" - ) - cursor.execute("select rowid from TestTempTable where IntCol = 1") - rowids.extend(r for r, in cursor) - - # second insert statement - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (2, 'test2') - """ - ) - cursor.execute("select rowid from TestTempTable where IntCol = 2") - rowids.extend(r for r, in cursor) + sub.registerquery("select * from TestTempTable") + conn.autocommit = True + cursor = conn.cursor() - # delete statement - cursor.execute("delete TestTempTable where IntCol = 2") - rowids.append(rowids[-1]) - - # truncate table - cursor.execute("truncate table TestTempTable") - - # wait for all messages to be sent - data.wait_for_messages() - - # verify the correct messages were sent - self.assertEqual(data.table_operations, table_operations) - self.assertEqual(data.row_operations, row_operations) - self.assertEqual(data.rowids, rowids) - - # test string format of subscription object is as expected - fmt = ">" - expected = fmt % ( - test_env.get_main_user(), - test_env.get_connect_string(), - ) - self.assertEqual(str(sub), expected) - - def test_3001(self): - "3001 - test to verify deprecations" - with self.assertRaisesFullCode("DPY-2014"): - self.conn.subscribe( - ip_address="www.oracle.in", ipAddress="www.oracle.in" - ) - with self.assertRaisesFullCode("DPY-2014"): - self.conn.subscribe(grouping_class=1, groupingClass=1) - with self.assertRaisesFullCode("DPY-2014"): - self.conn.subscribe(grouping_value=3, groupingValue=3) - with self.assertRaisesFullCode("DPY-2014"): - self.conn.subscribe(grouping_type=2, groupingType=2) - with self.assertRaisesFullCode("DPY-2014"): - self.conn.subscribe(client_initiated=True, clientInitiated=True) - - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + # insert statement + cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (1, 'test')" ) - def test_3002(self): - "3002 - test subscription for AQ" - - # create queue and clear it of all messages - queue = self.conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - while queue.deqone(): - pass - self.conn.commit() - - # set up subscription - data = AQSubscriptionData(1) - conn = test_env.get_connection(events=True) - conn.subscribe( - namespace=oracledb.SUBSCR_NAMESPACE_AQ, - name=queue.name, - timeout=10, - callback=data.callback_handler, - ) - - # enqueue a message - queue.enqone(self.conn.msgproperties(payload="Some data")) - self.conn.commit() + cursor.execute("select rowid from TestTempTable where IntCol = 1") + rowids.extend(r for r, in cursor) - # wait for all messages to be sent - data.wait_for_messages() - - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + # update statement + cursor.execute( + "update TestTempTable set StringCol1 = 'update' where IntCol = 1" ) - def test_3003(self): - "3003 - test verifying what registerquery returns" - self.skipTest("fails intermittently") - data = DMLSubscriptionData(5) - qos_constants = [ - oracledb.SUBSCR_QOS_QUERY, - oracledb.SUBSCR_QOS_RELIABLE, - oracledb.SUBSCR_QOS_DEREG_NFY, - oracledb.SUBSCR_QOS_ROWIDS, - oracledb.SUBSCR_QOS_BEST_EFFORT, - ] - for qos_constant in qos_constants: - conn = test_env.get_connection(events=True) - sub = conn.subscribe( - qos=qos_constant, callback=data.callback_handler - ) - query_id = sub.registerquery("select * from TestTempTable") - if qos_constant == oracledb.SUBSCR_QOS_QUERY: - self.assertIsInstance(query_id, int) - self.assertIsInstance(sub.id, int) - else: - self.assertIsNone(query_id) - conn.unsubscribe(sub) - conn.close() - - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + cursor.execute("select rowid from TestTempTable where IntCol = 1") + rowids.extend(r for r, in cursor) + + # second insert statement + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'test2') + """ ) - def test_3004(self): - "3004 - test Subscription repr()" - data = DMLSubscriptionData(5) - with test_env.get_connection(events=True) as conn: - sub = conn.subscribe(callback=data.callback_handler) - self.assertEqual(repr(sub), f"") - conn.unsubscribe(sub) - - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + cursor.execute("select rowid from TestTempTable where IntCol = 2") + rowids.extend(r for r, in cursor) + + # delete statement + cursor.execute("delete TestTempTable where IntCol = 2") + rowids.append(rowids[-1]) + + # truncate table + cursor.execute("truncate table TestTempTable") + + # wait for all messages to be sent + data.wait_for_messages() + + # verify the correct messages were sent + assert data.table_operations == table_operations + assert data.row_operations == row_operations + assert data.rowids == rowids + + # test string format of subscription object is as expected + fmt = ">" + expected = fmt % (test_env.main_user, test_env.connect_string) + assert str(sub) == expected + + +def test_3001(conn, test_env): + "3001 - test to verify deprecations" + with test_env.assert_raises_full_code("DPY-2014"): + conn.subscribe(ip_address="www.oracle.in", ipAddress="www.oracle.in") + with test_env.assert_raises_full_code("DPY-2014"): + conn.subscribe(grouping_class=1, groupingClass=1) + with test_env.assert_raises_full_code("DPY-2014"): + conn.subscribe(grouping_value=3, groupingValue=3) + with test_env.assert_raises_full_code("DPY-2014"): + conn.subscribe(grouping_type=2, groupingType=2) + with test_env.assert_raises_full_code("DPY-2014"): + conn.subscribe(client_initiated=True, clientInitiated=True) + + +def test_3002(skip_unless_has_client_23, conn, test_env): + "3002 - test subscription for AQ" + + # create queue and clear it of all messages + queue = conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + while queue.deqone(): + pass + conn.commit() + + # set up subscription + data = AQSubscriptionData(1) + conn = test_env.get_connection(events=True) + conn.subscribe( + namespace=oracledb.SUBSCR_NAMESPACE_AQ, + name=queue.name, + timeout=10, + callback=data.callback_handler, ) - def test_3005(self): - "3005 - test registerquery with invalid parameters" - data = DMLSubscriptionData(5) - conn = test_env.get_connection(events=True) - sub = conn.subscribe(callback=data.callback_handler) - self.assertRaises( - TypeError, - sub.registerquery, - "select * from TestTempTable", - "invalid args", - ) - with self.assertRaisesFullCode("ORA-00942"): - sub.registerquery("select * from Nonexistent") - with self.assertRaisesFullCode("DPI-1013"): - sub.registerquery("insert into TestTempTable (IntCol) values (1)") - conn.unsubscribe(sub) - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" - ) - def test_3006(self): - "3006 - test getting subscription attributes" - data = DMLSubscriptionData(1) + # enqueue a message + queue.enqone(conn.msgproperties(payload="Some data")) + conn.commit() + + # wait for all messages to be sent + data.wait_for_messages() + + +@pytest.mark.skip("fails intermittently") +def test_3003(skip_unless_has_client_23, test_env): + "3003 - test verifying what registerquery returns" + data = DMLSubscriptionData(5) + qos_constants = [ + oracledb.SUBSCR_QOS_QUERY, + oracledb.SUBSCR_QOS_RELIABLE, + oracledb.SUBSCR_QOS_DEREG_NFY, + oracledb.SUBSCR_QOS_ROWIDS, + oracledb.SUBSCR_QOS_BEST_EFFORT, + ] + for qos_constant in qos_constants: conn = test_env.get_connection(events=True) - cursor = conn.cursor() - args = dict( - callback=data.callback_handler, - ip_address=None, - port=0, - name="Sub1", - namespace=oracledb.SUBSCR_NAMESPACE_DBCHANGE, - timeout=10, - protocol=oracledb.SUBSCR_PROTO_OCI, - qos=oracledb.SUBSCR_QOS_QUERY, - operations=oracledb.OPCODE_INSERT, - ) - sub = conn.subscribe(**args) - for attr_name in args: - self.assertEqual(getattr(sub, attr_name), args[attr_name]) - self.assertEqual(sub.connection, conn) - cursor.execute("select REGID from USER_CHANGE_NOTIFICATION_REGS") - self.assertEqual(sub.id, cursor.fetchone()[0]) - self.assertEqual(sub.ipAddress, sub.ip_address) + sub = conn.subscribe(qos=qos_constant, callback=data.callback_handler) + query_id = sub.registerquery("select * from TestTempTable") + if qos_constant == oracledb.SUBSCR_QOS_QUERY: + assert isinstance(query_id, int) + assert isinstance(sub.id, int) + else: + assert query_id is None conn.unsubscribe(sub) conn.close() - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" - ) - def test_3007(self): - "3007 - test getting Message, MessageQuery, MessageTable attributes" - condition = threading.Condition() - conn = test_env.get_connection(events=True) - def callback_handler(message): - self.assertEqual( - message.dbname.upper(), conn.instance_name.upper() - ) - self.assertTrue(message.registered) - self.assertEqual(message.subscription, sub) - self.assertEqual(message.tables, []) - self.assertIsInstance(message.txid, bytes) - self.assertEqual(message.type, oracledb.EVENT_QUERYCHANGE) - self.assertIsInstance(message.queries, list) - (queries,) = message.queries - self.assertEqual(queries.id, sub_id) - self.assertEqual(queries.operation, oracledb.EVENT_QUERYCHANGE) - self.assertIsInstance(queries.tables, list) - (tables,) = queries.tables - table_name = f"{test_env.get_main_user().upper()}.TESTTEMPTABLE" - self.assertEqual(tables.name, table_name) - self.assertIsInstance(tables.operation, int) - self.assertIsInstance(tables.rows, list) - with condition: - condition.notify() - - sub = conn.subscribe( - callback=callback_handler, qos=oracledb.SUBSCR_QOS_QUERY - ) - cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - sub_id = sub.registerquery("select * from TestTempTable") - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test') - """ - ) - conn.commit() +def test_3004(skip_unless_has_client_23, test_env): + "3004 - test Subscription repr()" + data = DMLSubscriptionData(5) + with test_env.get_connection(events=True) as conn: + sub = conn.subscribe(callback=data.callback_handler) + assert repr(sub) == f"" + conn.unsubscribe(sub) + +def test_3005(skip_unless_has_client_23, test_env): + "3005 - test registerquery with invalid parameters" + data = DMLSubscriptionData(5) + conn = test_env.get_connection(events=True) + sub = conn.subscribe(callback=data.callback_handler) + pytest.raises( + TypeError, + sub.registerquery, + "select * from TestTempTable", + "invalid args", + ) + with test_env.assert_raises_full_code("ORA-00942"): + sub.registerquery("select * from Nonexistent") + with test_env.assert_raises_full_code("DPI-1013"): + sub.registerquery("insert into TestTempTable (IntCol) values (1)") + conn.unsubscribe(sub) + + +def test_3006(skip_unless_has_client_23, test_env): + "3006 - test getting subscription attributes" + data = DMLSubscriptionData(1) + conn = test_env.get_connection(events=True) + cursor = conn.cursor() + args = dict( + callback=data.callback_handler, + ip_address=None, + port=0, + name="Sub1", + namespace=oracledb.SUBSCR_NAMESPACE_DBCHANGE, + timeout=10, + protocol=oracledb.SUBSCR_PROTO_OCI, + qos=oracledb.SUBSCR_QOS_QUERY, + operations=oracledb.OPCODE_INSERT, + ) + sub = conn.subscribe(**args) + for attr_name in args: + assert getattr(sub, attr_name) == args[attr_name] + assert sub.connection == conn + cursor.execute("select REGID from USER_CHANGE_NOTIFICATION_REGS") + assert sub.id == cursor.fetchone()[0] + assert sub.ipAddress == sub.ip_address + conn.unsubscribe(sub) + conn.close() + + +@pytest.mark.skip("fails intermittently") +def test_3007(skip_unless_has_client_23, test_env): + "3007 - test getting Message, MessageQuery, MessageTable attributes" + condition = threading.Condition() + conn = test_env.get_connection(events=True) + + def callback_handler(message): + assert message.dbname.upper() == conn.instance_name.upper() + assert message.registered + assert message.subscription == sub + assert message.tables == [] + assert isinstance(message.txid, bytes) + assert message.type == oracledb.EVENT_QUERYCHANGE + assert isinstance(message.queries, list) + (queries,) = message.queries + assert queries.id == sub_id + assert queries.operation == oracledb.EVENT_QUERYCHANGE + assert isinstance(queries.tables, list) + (tables,) = queries.tables + table_name = f"{test_env.main_user.upper()}.TESTTEMPTABLE" + assert tables.name == table_name + assert isinstance(tables.operation, int) + assert isinstance(tables.rows, list) with condition: - self.assertTrue(condition.wait(5)) - conn.unsubscribe(sub) + condition.notify() - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + sub = conn.subscribe( + callback=callback_handler, qos=oracledb.SUBSCR_QOS_QUERY ) - def test_3008(self): - "3008 - test unsubscribe with invalid parameter" - conn = test_env.get_connection(events=True) - self.assertRaises(TypeError, conn.unsubscribe, "not a sub object") - sub = conn.subscribe(callback=lambda x: f"Message: {x}") + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + sub_id = sub.registerquery("select * from TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test') + """ + ) + conn.commit() + + with condition: + assert condition.wait(5) + conn.unsubscribe(sub) + + +def test_3008(skip_unless_has_client_23, test_env): + "3008 - test unsubscribe with invalid parameter" + conn = test_env.get_connection(events=True) + pytest.raises(TypeError, conn.unsubscribe, "not a sub object") + sub = conn.subscribe(callback=lambda x: f"Message: {x}") + conn.unsubscribe(sub) + with test_env.assert_raises_full_code("DPI-1002"): conn.unsubscribe(sub) - with self.assertRaisesFullCode("DPI-1002"): - conn.unsubscribe(sub) - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" - ) - def test_3010(self): - "3010 - test registerquery in the middle of an active transaction" - connection = test_env.get_connection(events=True) - cursor = connection.cursor() - cursor.execute("truncate table TestTempTable") - cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (1, 'test')" - ) - sub = connection.subscribe(callback=lambda x: f"Msg: {x}") - with self.assertRaisesFullCode("ORA-29975"): - sub.registerquery("select * from TestTempTable") - connection.unsubscribe(sub) - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" +def test_3010(skip_unless_has_client_23, test_env): + "3010 - test registerquery in the middle of an active transaction" + connection = test_env.get_connection(events=True) + cursor = connection.cursor() + cursor.execute("truncate table TestTempTable") + cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (1, 'test')" ) - def test_3011(self): - "3011 - test registerquery with aq subscription" - connection = test_env.get_connection(events=True) - sub = connection.subscribe( - callback=lambda x: None, - namespace=oracledb.SUBSCR_NAMESPACE_AQ, - name="TEST_RAW_QUEUE", - ) - with self.assertRaisesFullCode("ORA-24315"): - sub.registerquery("select * from TestTempTable") - connection.unsubscribe(sub) + sub = connection.subscribe(callback=lambda x: f"Msg: {x}") + with test_env.assert_raises_full_code("ORA-29975"): + sub.registerquery("select * from TestTempTable") + connection.unsubscribe(sub) - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + +def test_3011(skip_unless_has_client_23, test_env): + "3011 - test registerquery with aq subscription" + connection = test_env.get_connection(events=True) + sub = connection.subscribe( + callback=lambda x: None, + namespace=oracledb.SUBSCR_NAMESPACE_AQ, + name="TEST_RAW_QUEUE", ) - def test_3013(self): - "3013 - test subscription with SUBSCR_QOS_DEREG_NFY deregisters" - if self.is_on_oracle_cloud(): - self.skipTest("AQ notification not supported on the cloud") - - def callback(message): - self.assertFalse(message.registered) - with condition: - condition.notify() - - condition = threading.Condition() - self.cursor.execute("truncate table TestTempTable") - conn = test_env.get_connection(events=True) - cursor = conn.cursor() - sub = conn.subscribe( - callback=callback, qos=oracledb.SUBSCR_QOS_DEREG_NFY, timeout=2 - ) + with test_env.assert_raises_full_code("ORA-24315"): sub.registerquery("select * from TestTempTable") - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test') - """ - ) - conn.commit() - with condition: - self.assertTrue(condition.wait(5)) - conn.unsubscribe(sub) + connection.unsubscribe(sub) + + +def test_3013(skip_unless_has_client_23, cursor, test_env): + "3013 - test subscription with SUBSCR_QOS_DEREG_NFY deregisters" + if test_env.is_on_oracle_cloud: + pytest.skip("AQ notification not supported on the cloud") - @unittest.skipUnless( - test_env.has_client_version(23), "crashes in older clients" + def callback(message): + assert not message.registered + with condition: + condition.notify() + + condition = threading.Condition() + cursor.execute("truncate table TestTempTable") + conn = test_env.get_connection(events=True) + cursor = conn.cursor() + sub = conn.subscribe( + callback=callback, qos=oracledb.SUBSCR_QOS_DEREG_NFY, timeout=2 ) - def test_3014(self): - "3014 - test adding a consumer to a single consumer queue (negative)" - conn = test_env.get_connection(events=True) - single_consumer_queue = "TEST_RAW_QUEUE" - with self.assertRaisesFullCode("ORA-25256"): - conn.subscribe( - callback=lambda x: None, - namespace=oracledb.SUBSCR_NAMESPACE_AQ, - name=f"{single_consumer_queue}:SUBSCRIBER", - ) + sub.registerquery("select * from TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test') + """ + ) + conn.commit() + with condition: + assert condition.wait(5) + conn.unsubscribe(sub) -if __name__ == "__main__": - test_env.run_test_cases() +def test_3014(skip_unless_has_client_23, test_env): + "3014 - test adding a consumer to a single consumer queue (negative)" + conn = test_env.get_connection(events=True) + single_consumer_queue = "TEST_RAW_QUEUE" + with test_env.assert_raises_full_code("ORA-25256"): + conn.subscribe( + callback=lambda x: None, + namespace=oracledb.SUBSCR_NAMESPACE_AQ, + name=f"{single_consumer_queue}:SUBSCRIBER", + ) diff --git a/tests/test_3100_boolean_var.py b/tests/test_3100_boolean_var.py index ed60514e..15060887 100644 --- a/tests/test_3100_boolean_var.py +++ b/tests/test_3100_boolean_var.py @@ -27,120 +27,109 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_plsql_boolean_supported() -class TestCase(test_env.BaseTestCase): - def __test_bind_value_as_boolean(self, value): - expected_result = str(bool(value)).upper() - var = self.cursor.var(bool) - var.setvalue(0, value) - result = self.cursor.callfunc( - "pkg_TestBooleans.GetStringRep", str, [var] - ) - self.assertEqual(result, expected_result) - - def test_3100(self): - "3100 - test binding in a False value" - result = self.cursor.callfunc( - "pkg_TestBooleans.GetStringRep", str, [False] - ) - self.assertEqual(result, "FALSE") - - def test_3101(self): - "3101 - test binding in a float as a boolean" - self.__test_bind_value_as_boolean(0.0) - self.__test_bind_value_as_boolean(1.0) - - def test_3102(self): - "3102 - test binding in an integer as a boolean" - self.__test_bind_value_as_boolean(0) - self.__test_bind_value_as_boolean(1) - - def test_3103(self): - "3103 - test binding in a null value" - self.cursor.setinputsizes(None, bool) - result = self.cursor.callfunc( - "pkg_TestBooleans.GetStringRep", str, [None] - ) - self.assertEqual(result, "NULL") - - def test_3104(self): - "3104 - test binding out a boolean value (False)" - result = self.cursor.callfunc( - "pkg_TestBooleans.IsLessThan10", oracledb.DB_TYPE_BOOLEAN, [15] - ) - self.assertFalse(result) - - def test_3105(self): - "3105 - test binding out a boolean value (True)" - result = self.cursor.callfunc( - "pkg_TestBooleans.IsLessThan10", bool, [5] - ) - self.assertTrue(result) - - def test_3106(self): - "3106 - test binding in a string as a boolean" - self.__test_bind_value_as_boolean("") - self.__test_bind_value_as_boolean("0") - - def test_3107(self): - "3107 - test binding in a True value" - result = self.cursor.callfunc( - "pkg_TestBooleans.GetStringRep", str, [True] - ) - self.assertEqual(result, "TRUE") - - def test_3108(self): - "3108 - test binding out a boolean value (None)" - result = self.cursor.callfunc( - "pkg_TestBooleans.TestOutValueNull", bool - ) - self.assertIsNone(result) - - @test_env.skip_unless_native_boolean_supported() - def test_3109(self): - "3109 - test binding and fetching boolean with 23ai" - for value in (True, False): - with self.subTest(value=value): - self.cursor.execute("select not :1 from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, bool) - self.assertEqual(fetched_value, not value) - - @test_env.skip_unless_native_boolean_supported() - def test_3110(self): - "3110 - test binding and fetching string literals that represent True" - self.cursor.execute("truncate table TestBooleans") - true_values = ["true", "yes", "on", "1", "t", "y"] - self.cursor.executemany( - """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) - values (:1, :2, :3)""", - [(i, v, v) for i, v in enumerate(true_values)], - ) - self.cursor.execute( - "select BooleanCol1, BooleanCol2 from TestBooleans order by IntCol" - ) - expected_values = [(True, True) for _ in true_values] - self.assertEqual(self.cursor.fetchall(), expected_values) - - @test_env.skip_unless_native_boolean_supported() - def test_3111(self): - "3111 - test binding and fetching string literals that represent False" - self.cursor.execute("truncate table TestBooleans") - false_values = ["false", "no", "off", "0", "f", "n"] - self.cursor.executemany( - """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) - values (:1, :2, :3)""", - [(i, v, v) for i, v in enumerate(false_values)], - ) - self.cursor.execute( - "select BooleanCol1, BooleanCol2 from TestBooleans order by IntCol" - ) - expected_value = [(False, False) for _ in range(len(false_values))] - self.assertEqual(self.cursor.fetchall(), expected_value) - - -if __name__ == "__main__": - test_env.run_test_cases() + + +def _test_bind_value_as_boolean(cursor, value): + expected_result = str(bool(value)).upper() + var = cursor.var(bool) + var.setvalue(0, value) + result = cursor.callfunc("pkg_TestBooleans.GetStringRep", str, [var]) + assert result == expected_result + + +def test_3100(skip_unless_plsql_boolean_supported, cursor): + "3100 - test binding in a False value" + result = cursor.callfunc("pkg_TestBooleans.GetStringRep", str, [False]) + assert result == "FALSE" + + +def test_3101(skip_unless_plsql_boolean_supported, cursor): + "3101 - test binding in a float as a boolean" + _test_bind_value_as_boolean(cursor, 0.0) + _test_bind_value_as_boolean(cursor, 1.0) + + +def test_3102(skip_unless_plsql_boolean_supported, cursor): + "3102 - test binding in an integer as a boolean" + _test_bind_value_as_boolean(cursor, 0) + _test_bind_value_as_boolean(cursor, 1) + + +def test_3103(skip_unless_plsql_boolean_supported, cursor): + "3103 - test binding in a null value" + cursor.setinputsizes(None, bool) + result = cursor.callfunc("pkg_TestBooleans.GetStringRep", str, [None]) + assert result == "NULL" + + +def test_3104(skip_unless_plsql_boolean_supported, cursor): + "3104 - test binding out a boolean value (False)" + result = cursor.callfunc( + "pkg_TestBooleans.IsLessThan10", oracledb.DB_TYPE_BOOLEAN, [15] + ) + assert not result + + +def test_3105(skip_unless_plsql_boolean_supported, cursor): + "3105 - test binding out a boolean value (True)" + result = cursor.callfunc("pkg_TestBooleans.IsLessThan10", bool, [5]) + assert result + + +def test_3106(skip_unless_plsql_boolean_supported, cursor): + "3106 - test binding in a string as a boolean" + _test_bind_value_as_boolean(cursor, "") + _test_bind_value_as_boolean(cursor, "0") + + +def test_3107(skip_unless_plsql_boolean_supported, cursor): + "3107 - test binding in a True value" + result = cursor.callfunc("pkg_TestBooleans.GetStringRep", str, [True]) + assert result == "TRUE" + + +def test_3108(skip_unless_plsql_boolean_supported, cursor): + "3108 - test binding out a boolean value (None)" + result = cursor.callfunc("pkg_TestBooleans.TestOutValueNull", bool) + assert result is None + + +def test_3109(skip_unless_native_boolean_supported, cursor): + "3109 - test binding and fetching boolean with 23ai" + for value in (True, False): + cursor.execute("select not :1 from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, bool) + assert fetched_value == (not value) + + +def test_3110(skip_unless_native_boolean_supported, cursor): + "3110 - test binding and fetching string literals that represent True" + cursor.execute("truncate table TestBooleans") + true_values = ["true", "yes", "on", "1", "t", "y"] + cursor.executemany( + """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3)""", + [(i, v, v) for i, v in enumerate(true_values)], + ) + cursor.execute( + "select BooleanCol1, BooleanCol2 from TestBooleans order by IntCol" + ) + expected_values = [(True, True) for _ in true_values] + assert cursor.fetchall() == expected_values + + +def test_3111(skip_unless_native_boolean_supported, cursor): + "3111 - test binding and fetching string literals that represent False" + cursor.execute("truncate table TestBooleans") + false_values = ["false", "no", "off", "0", "f", "n"] + cursor.executemany( + """insert into TestBooleans (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3)""", + [(i, v, v) for i, v in enumerate(false_values)], + ) + cursor.execute( + "select BooleanCol1, BooleanCol2 from TestBooleans order by IntCol" + ) + expected_value = [(False, False) for _ in range(len(false_values))] + assert cursor.fetchall() == expected_value diff --git a/tests/test_3200_features_12_1.py b/tests/test_3200_features_12_1.py index db8da2fa..b13ce34b 100644 --- a/tests/test_3200_features_12_1.py +++ b/tests/test_3200_features_12_1.py @@ -27,676 +27,696 @@ """ import datetime -import unittest import oracledb -import test_env - - -@unittest.skipUnless(test_env.has_client_version(12, 1), "unsupported client") -class TestCase(test_env.BaseTestCase): - def test_3200(self): - "3200 - test executing with arraydmlrowcounts mode disabled" - self.cursor.execute("truncate table TestArrayDML") - rows = [(1, "First"), (2, "Second")] - sql = "insert into TestArrayDML (IntCol, StringCol) values (:1, :2)" - self.cursor.executemany(sql, rows, arraydmlrowcounts=False) - with self.assertRaisesFullCode("DPY-4006"): - self.cursor.getarraydmlrowcounts() - rows = [(3, "Third"), (4, "Fourth")] - self.cursor.executemany(sql, rows) - with self.assertRaisesFullCode("DPY-4006"): - self.cursor.getarraydmlrowcounts() - - def test_3201(self): - "3201 - test executing with arraydmlrowcounts mode enabled" - self.cursor.execute("truncate table TestArrayDML") - rows = [ - (1, "First", 100), - (2, "Second", 200), - (3, "Third", 300), - (4, "Fourth", 300), - (5, "Fifth", 300), - ] - self.cursor.executemany( - """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - arraydmlrowcounts=True, - ) - self.conn.commit() - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 1, 1, 1]) - self.cursor.execute("select count(*) from TestArrayDML") - (count,) = self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - def test_3202(self): - "3202 - test binding a boolean collection (in)" - type_obj = self.conn.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST") - obj = type_obj.newobject() - obj.setelement(1, True) - obj.extend([True, False, True, True, False, True]) - result = self.cursor.callfunc( - "pkg_TestBooleans.TestInArrays", int, [obj] - ) - self.assertEqual(result, 5) - - def test_3203(self): - "3203 - test binding a boolean collection (out)" - type_obj = self.conn.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST") - obj = type_obj.newobject() - self.cursor.callproc("pkg_TestBooleans.TestOutArrays", (6, obj)) - self.assertEqual(obj.aslist(), [True, False, True, False, True, False]) - - def test_3204(self): - "3204 - test binding a PL/SQL date collection (in)" - type_obj = self.conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") - obj = type_obj.newobject() - obj.setelement(1, datetime.datetime(2016, 2, 5)) - obj.append(datetime.datetime(2016, 2, 8, 12, 15, 30)) - obj.append(datetime.datetime(2016, 2, 12, 5, 44, 30)) - result = self.cursor.callfunc( - "pkg_TestDateArrays.TestInArrays", - oracledb.NUMBER, - (2, datetime.datetime(2016, 2, 1), obj), - ) - self.assertEqual(result, 24.75) - - def test_3205(self): - "3205 - test binding a PL/SQL date collection (in/out)" - type_obj = self.conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") - obj = type_obj.newobject() - obj.setelement(1, datetime.datetime(2016, 1, 1)) - obj.append(datetime.datetime(2016, 1, 7)) - obj.append(datetime.datetime(2016, 1, 13)) - obj.append(datetime.datetime(2016, 1, 19)) - self.cursor.callproc("pkg_TestDateArrays.TestInOutArrays", (4, obj)) - expected_values = [ - datetime.datetime(2016, 1, 8), - datetime.datetime(2016, 1, 14), - datetime.datetime(2016, 1, 20), - datetime.datetime(2016, 1, 26), - ] - self.assertEqual(obj.aslist(), expected_values) - - def test_3206(self): - "3206 - test binding a PL/SQL date collection (out)" - type_obj = self.conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") - obj = type_obj.newobject() - self.cursor.callproc("pkg_TestDateArrays.TestOutArrays", (3, obj)) - expected_values = [ - datetime.datetime(2002, 12, 13, 4, 48), - datetime.datetime(2002, 12, 14, 9, 36), - datetime.datetime(2002, 12, 15, 14, 24), - ] - self.assertEqual(obj.aslist(), expected_values) - - def test_3207(self): - "3207 - test binding a PL/SQL number collection (in)" - type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - obj.setelement(1, 10) - obj.extend([20, 30, 40, 50]) - result = self.cursor.callfunc( - "pkg_TestNumberArrays.TestInArrays", int, (5, obj) - ) - self.assertEqual(result, 155) - - def test_3208(self): - "3208 - test binding a PL/SQL number collection (in/out)" - type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - obj.setelement(1, 5) - obj.extend([8, 3, 2]) - self.cursor.callproc("pkg_TestNumberArrays.TestInOutArrays", (4, obj)) - self.assertEqual(obj.aslist(), [50, 80, 30, 20]) - - def test_3209(self): - "3209 - test binding a PL/SQL number collection (out)" - type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - self.cursor.callproc("pkg_TestNumberArrays.TestOutArrays", (3, obj)) - self.assertEqual(obj.aslist(), [100, 200, 300]) - - def test_3210(self): - "3210 - test binding an array of PL/SQL records (in)" - rec_type = self.conn.gettype("PKG_TESTRECORDS.UDT_RECORD") - array_type = self.conn.gettype("PKG_TESTRECORDS.UDT_RECORDARRAY") - array_obj = array_type.newobject() - for i in range(3): - obj = rec_type.newobject() - obj.NUMBERVALUE = i + 1 - obj.STRINGVALUE = f"String in record #{i + 1}" - obj.DATEVALUE = datetime.datetime(2017, i + 1, 1) - obj.TIMESTAMPVALUE = datetime.datetime(2017, 1, i + 1) - obj.BOOLEANVALUE = (i % 2) == 1 - obj.PLSINTEGERVALUE = i * 5 - obj.BINARYINTEGERVALUE = i * 2 - array_obj.append(obj) - result = self.cursor.callfunc( - "pkg_TestRecords.TestInArrays", str, [array_obj] - ) - expected_value = ( - "udt_Record(1, 'String in record #1', " - "to_date('2017-01-01', 'YYYY-MM-DD'), " - "to_timestamp('2017-01-01 00:00:00', " - "'YYYY-MM-DD HH24:MI:SS'), false, 0, 0); " - "udt_Record(2, 'String in record #2', " - "to_date('2017-02-01', 'YYYY-MM-DD'), " - "to_timestamp('2017-01-02 00:00:00', " - "'YYYY-MM-DD HH24:MI:SS'), true, 5, 2); " - "udt_Record(3, 'String in record #3', " - "to_date('2017-03-01', 'YYYY-MM-DD'), " - "to_timestamp('2017-01-03 00:00:00', " - "'YYYY-MM-DD HH24:MI:SS'), false, 10, 4)" - ) - self.assertEqual(result, expected_value) - - def test_3211(self): - "3211 - test binding a PL/SQL record (in)" - type_obj = self.conn.gettype("PKG_TESTRECORDS.UDT_RECORD") - obj = type_obj.newobject() - obj.NUMBERVALUE = 18 - obj.STRINGVALUE = "A string in a record" - obj.DATEVALUE = datetime.datetime(2016, 2, 15) - obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 12, 14, 25, 36) - obj.BOOLEANVALUE = False - obj.PLSINTEGERVALUE = 21 - obj.BINARYINTEGERVALUE = 5 - result = self.cursor.callfunc( - "pkg_TestRecords.GetStringRep", str, [obj] - ) - expected_value = ( - "udt_Record(18, 'A string in a record', " - "to_date('2016-02-15', 'YYYY-MM-DD'), " - "to_timestamp('2016-02-12 14:25:36', " - "'YYYY-MM-DD HH24:MI:SS'), false, 21, 5)" - ) - self.assertEqual(result, expected_value) - - def test_3212(self): - "3212 - test binding a PL/SQL record (out)" - type_obj = self.conn.gettype("PKG_TESTRECORDS.UDT_RECORD") - obj = type_obj.newobject() - obj.NUMBERVALUE = 5 - obj.STRINGVALUE = "Test value" - obj.DATEVALUE = datetime.datetime.today() - obj.TIMESTAMPVALUE = datetime.datetime.today() - obj.BOOLEANVALUE = False - obj.PLSINTEGERVALUE = 23 - obj.BINARYINTEGERVALUE = 9 - self.cursor.callproc("pkg_TestRecords.TestOut", [obj]) - self.assertEqual(obj.NUMBERVALUE, 25) - self.assertEqual(obj.STRINGVALUE, "String in record") - self.assertEqual(obj.DATEVALUE, datetime.datetime(2016, 2, 16)) - self.assertEqual( - obj.TIMESTAMPVALUE, datetime.datetime(2016, 2, 16, 18, 23, 55) - ) - self.assertEqual(obj.BOOLEANVALUE, True) - self.assertEqual(obj.PLSINTEGERVALUE, 45) - self.assertEqual(obj.BINARYINTEGERVALUE, 10) - - def test_3213(self): - "3213 - test binding a PL/SQL string collection (in)" - type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - obj.setelement(1, "First element") - obj.setelement(2, "Second element") - obj.setelement(3, "Third element") - result = self.cursor.callfunc( - "pkg_TestStringArrays.TestInArrays", int, (5, obj) - ) - self.assertEqual(result, 45) - - def test_3214(self): - "3214 - test binding a PL/SQL string collection (in/out)" - type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - obj.setelement(1, "The first element") - obj.append("The second element") - obj.append("The third and final element") - self.cursor.callproc("pkg_TestStringArrays.TestInOutArrays", (3, obj)) - expected_values = [ - "Converted element # 1 originally had length 17", - "Converted element # 2 originally had length 18", - "Converted element # 3 originally had length 27", - ] - self.assertEqual(obj.aslist(), expected_values) - - def test_3215(self): - "3215 - test binding a PL/SQL string collection (out)" - type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - self.cursor.callproc("pkg_TestStringArrays.TestOutArrays", (4, obj)) - expected_values = [f"Test out element # {i + 1}" for i in range(4)] - self.assertEqual(obj.aslist(), expected_values) - - def test_3216(self): - "3216 - test binding a PL/SQL string collection (out with holes)" - type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" - type_obj = self.conn.gettype(type_name) - obj = type_obj.newobject() - self.cursor.callproc("pkg_TestStringArrays.TestIndexBy", [obj]) - self.assertEqual(obj.first(), -1048576) - self.assertEqual(obj.last(), 8388608) - self.assertEqual(obj.next(-576), 284) - self.assertEqual(obj.prev(284), -576) - self.assertEqual(obj.size(), 4) - self.assertTrue(obj.exists(-576)) - self.assertFalse(obj.exists(-577)) - self.assertEqual(obj.getelement(284), "Third element") - expected_list = [ - "First element", - "Second element", - "Third element", - "Fourth element", - ] - self.assertEqual(obj.aslist(), expected_list) - expected_dict = { - -1048576: "First element", - -576: "Second element", - 284: "Third element", - 8388608: "Fourth element", - } - self.assertEqual(obj.asdict(), expected_dict) - obj.delete(-576) - obj.delete(284) - expected_list.pop(2) - expected_list.pop(1) - self.assertEqual(obj.aslist(), expected_list) - expected_dict.pop(-576) - expected_dict.pop(284) - self.assertEqual(obj.asdict(), expected_dict) - - def test_3217(self): - "3217 - test executing with arraydmlrowcounts with exception" - self.cursor.execute("truncate table TestArrayDML") - rows = [(1, "First"), (2, "Second"), (2, "Third"), (4, "Fourth")] - sql = "insert into TestArrayDML (IntCol,StringCol) values (:1,:2)" - with self.assertRaisesFullCode("ORA-00001"): - self.cursor.executemany(sql, rows, arraydmlrowcounts=True) - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1]) - - def test_3218(self): - "3218 - test executing delete statement with arraydmlrowcount mode" - self.cursor.execute("truncate table TestArrayDML") - rows = [ - (1, "First", 100), - (2, "Second", 200), - (3, "Third", 300), - (4, "Fourth", 300), - (5, "Fifth", 300), - (6, "Sixth", 400), - (7, "Seventh", 400), - (8, "Eighth", 500), - ] - self.cursor.executemany( - """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - ) - rows = [(200,), (300,), (400,)] - self.cursor.executemany( - "delete from TestArrayDML where IntCol2 = :1", - rows, - arraydmlrowcounts=True, - ) - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 3, 2]) - self.assertEqual(self.cursor.rowcount, 6) - - def test_3219(self): - "3219 - test executing update statement with arraydmlrowcount mode" - self.cursor.execute("truncate table TestArrayDML") - rows = [ - (1, "First", 100), - (2, "Second", 200), - (3, "Third", 300), - (4, "Fourth", 300), - (5, "Fifth", 300), - (6, "Sixth", 400), - (7, "Seventh", 400), - (8, "Eighth", 500), - ] - self.cursor.executemany( - """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - ) - rows = [("One", 100), ("Two", 200), ("Three", 300), ("Four", 400)] - self.cursor.executemany( - "update TestArrayDML set StringCol = :1 where IntCol2 = :2", - rows, - arraydmlrowcounts=True, - ) - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 3, 2]) - self.assertEqual(self.cursor.rowcount, 7) - - def test_3220(self): - "3220 - test getimplicitresults() returns the correct data" - self.cursor.execute( - """ - declare - c1 sys_refcursor; - c2 sys_refcursor; - begin - open c1 for - select NullableCol - from TestNumbers - where IntCol between 3 and 5; - - dbms_sql.return_result(c1); - - open c2 for - select NullableCol - from TestNumbers - where IntCol between 7 and 10; - - dbms_sql.return_result(c2); - end; - """ - ) - results = self.cursor.getimplicitresults() - self.assertEqual(len(results), 2) - self.assertEqual( - [n for n, in results[0]], [2924207, None, 59797108943] - ) - self.assertEqual( - [n for n, in results[1]], - [1222791080775407, None, 25004854810776297743, None], - ) - - def test_3221(self): - "3221 - test getimplicitresults() without executing a statement" - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-1004"): - cursor.getimplicitresults() - - def test_3222(self): - "3222 - test executing insert with multiple distinct batch errors" - self.cursor.execute("truncate table TestArrayDML") - rows = [ - (1, "First", 100), - (2, "Second", 200), - (2, "Third", 300), - (4, "Fourth", 400), - (5, "Fourth", 1000), - ] - self.cursor.executemany( - """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - batcherrors=True, - arraydmlrowcounts=True, - ) - actual_errors = [ - (error.offset, error.full_code) - for error in self.cursor.getbatcherrors() - ] - self.assertEqual(actual_errors, [(4, "ORA-01438"), (2, "ORA-00001")]) - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 1, 0, 1, 0]) - - def test_3223(self): - "3223 - test batcherrors mode set to False" - self.cursor.execute("truncate table TestArrayDML") - rows = [(1, "First", 100), (2, "Second", 200), (2, "Third", 300)] - sql = """insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3)""" - with self.assertRaisesFullCode("ORA-00001"): - self.cursor.executemany(sql, rows, batcherrors=False) - - def test_3224(self): - "3224 - test executing in succession with batch error" - self.cursor.execute("truncate table TestArrayDML") - rows = [ - (1, "First", 100), - (2, "Second", 200), - (3, "Third", 300), - (4, "Second", 300), - (5, "Fifth", 300), - (6, "Sixth", 400), - (6, "Seventh", 400), - (8, "Eighth", 100), - ] - self.cursor.executemany( - """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - batcherrors=True, - ) - actual_errors = [ - (error.offset, error.full_code) - for error in self.cursor.getbatcherrors() - ] - self.assertEqual(actual_errors, [(6, "ORA-00001")]) - rows = [ - (101, "First"), - (201, "Second"), - (3000, "Third"), - (900, "Ninth"), - (301, "Third"), - ] - self.cursor.executemany( - "update TestArrayDML set IntCol2 = :1 where StringCol = :2", - rows, - arraydmlrowcounts=True, - batcherrors=True, - ) - actual_errors = [ - (error.offset, error.full_code) - for error in self.cursor.getbatcherrors() - ] - self.assertEqual(actual_errors, [(2, "ORA-01438")]) - self.assertEqual(self.cursor.getarraydmlrowcounts(), [1, 2, 0, 0, 1]) - self.assertEqual(self.cursor.rowcount, 4) - - def test_3225(self): - "3225 - test using implicit cursors to execute new statements" - cursor = self.conn.cursor() - cursor.execute( - """ - declare - c1 sys_refcursor; - begin - open c1 for - select NumberCol - from TestNumbers - where IntCol between 3 and 5; - - dbms_sql.return_result(c1); - end; - """ - ) - results = cursor.getimplicitresults() - self.assertEqual(len(results), 1) - self.assertEqual([n for n, in results[0]], [3.75, 5, 6.25]) - results[0].execute("select :1 from dual", [7]) - (row,) = results[0].fetchone() - self.assertEqual(row, 7) - - def test_3226(self): - "3226 - test batcherrors mode without any errors produced" - self.cursor.execute("truncate table TestArrayDML") - rows = [(1, "First", 100), (2, "Second", 200), (3, "Third", 300)] - self.cursor.executemany( - """ +import pytest + + +@pytest.fixture(autouse=True) +def skip_unless_has_client_12_1(test_env): + """ + All tests in the file are skipped unless the client version is at least + 12.1. + """ + if not test_env.has_client_version(12, 1): + pytest.skip("unsupported client") + + +def test_3200(cursor, test_env): + "3200 - test executing with arraydmlrowcounts mode disabled" + cursor.execute("truncate table TestArrayDML") + rows = [(1, "First"), (2, "Second")] + sql = "insert into TestArrayDML (IntCol, StringCol) values (:1, :2)" + cursor.executemany(sql, rows, arraydmlrowcounts=False) + with test_env.assert_raises_full_code("DPY-4006"): + cursor.getarraydmlrowcounts() + rows = [(3, "Third"), (4, "Fourth")] + cursor.executemany(sql, rows) + with test_env.assert_raises_full_code("DPY-4006"): + cursor.getarraydmlrowcounts() + + +def test_3201(conn, cursor): + "3201 - test executing with arraydmlrowcounts mode enabled" + cursor.execute("truncate table TestArrayDML") + rows = [ + (1, "First", 100), + (2, "Second", 200), + (3, "Third", 300), + (4, "Fourth", 300), + (5, "Fifth", 300), + ] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + arraydmlrowcounts=True, + ) + conn.commit() + assert cursor.getarraydmlrowcounts() == [1, 1, 1, 1, 1] + cursor.execute("select count(*) from TestArrayDML") + (count,) = cursor.fetchone() + assert count == len(rows) + + +def test_3202(conn, cursor): + "3202 - test binding a boolean collection (in)" + type_obj = conn.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST") + obj = type_obj.newobject() + obj.setelement(1, True) + obj.extend([True, False, True, True, False, True]) + result = cursor.callfunc("pkg_TestBooleans.TestInArrays", int, [obj]) + assert result == 5 + + +def test_3203(conn, cursor): + "3203 - test binding a boolean collection (out)" + type_obj = conn.gettype("PKG_TESTBOOLEANS.UDT_BOOLEANLIST") + obj = type_obj.newobject() + cursor.callproc("pkg_TestBooleans.TestOutArrays", (6, obj)) + assert obj.aslist() == [True, False, True, False, True, False] + + +def test_3204(conn, cursor): + "3204 - test binding a PL/SQL date collection (in)" + type_obj = conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") + obj = type_obj.newobject() + obj.setelement(1, datetime.datetime(2016, 2, 5)) + obj.append(datetime.datetime(2016, 2, 8, 12, 15, 30)) + obj.append(datetime.datetime(2016, 2, 12, 5, 44, 30)) + result = cursor.callfunc( + "pkg_TestDateArrays.TestInArrays", + oracledb.NUMBER, + (2, datetime.datetime(2016, 2, 1), obj), + ) + assert result == 24.75 + + +def test_3205(conn, cursor): + "3205 - test binding a PL/SQL date collection (in/out)" + type_obj = conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") + obj = type_obj.newobject() + obj.setelement(1, datetime.datetime(2016, 1, 1)) + obj.append(datetime.datetime(2016, 1, 7)) + obj.append(datetime.datetime(2016, 1, 13)) + obj.append(datetime.datetime(2016, 1, 19)) + cursor.callproc("pkg_TestDateArrays.TestInOutArrays", (4, obj)) + expected_values = [ + datetime.datetime(2016, 1, 8), + datetime.datetime(2016, 1, 14), + datetime.datetime(2016, 1, 20), + datetime.datetime(2016, 1, 26), + ] + assert obj.aslist() == expected_values + + +def test_3206(conn, cursor): + "3206 - test binding a PL/SQL date collection (out)" + type_obj = conn.gettype("PKG_TESTDATEARRAYS.UDT_DATELIST") + obj = type_obj.newobject() + cursor.callproc("pkg_TestDateArrays.TestOutArrays", (3, obj)) + expected_values = [ + datetime.datetime(2002, 12, 13, 4, 48), + datetime.datetime(2002, 12, 14, 9, 36), + datetime.datetime(2002, 12, 15, 14, 24), + ] + assert obj.aslist() == expected_values + + +def test_3207(conn, cursor): + "3207 - test binding a PL/SQL number collection (in)" + type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + obj.setelement(1, 10) + obj.extend([20, 30, 40, 50]) + result = cursor.callfunc( + "pkg_TestNumberArrays.TestInArrays", int, (5, obj) + ) + assert result == 155 + + +def test_3208(conn, cursor): + "3208 - test binding a PL/SQL number collection (in/out)" + type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + obj.setelement(1, 5) + obj.extend([8, 3, 2]) + cursor.callproc("pkg_TestNumberArrays.TestInOutArrays", (4, obj)) + assert obj.aslist() == [50, 80, 30, 20] + + +def test_3209(conn, cursor): + "3209 - test binding a PL/SQL number collection (out)" + type_name = "PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + cursor.callproc("pkg_TestNumberArrays.TestOutArrays", (3, obj)) + assert obj.aslist() == [100, 200, 300] + + +def test_3210(conn, cursor): + "3210 - test binding an array of PL/SQL records (in)" + rec_type = conn.gettype("PKG_TESTRECORDS.UDT_RECORD") + array_type = conn.gettype("PKG_TESTRECORDS.UDT_RECORDARRAY") + array_obj = array_type.newobject() + for i in range(3): + obj = rec_type.newobject() + obj.NUMBERVALUE = i + 1 + obj.STRINGVALUE = f"String in record #{i + 1}" + obj.DATEVALUE = datetime.datetime(2017, i + 1, 1) + obj.TIMESTAMPVALUE = datetime.datetime(2017, 1, i + 1) + obj.BOOLEANVALUE = (i % 2) == 1 + obj.PLSINTEGERVALUE = i * 5 + obj.BINARYINTEGERVALUE = i * 2 + array_obj.append(obj) + result = cursor.callfunc("pkg_TestRecords.TestInArrays", str, [array_obj]) + expected_value = ( + "udt_Record(1, 'String in record #1', " + "to_date('2017-01-01', 'YYYY-MM-DD'), " + "to_timestamp('2017-01-01 00:00:00', " + "'YYYY-MM-DD HH24:MI:SS'), false, 0, 0); " + "udt_Record(2, 'String in record #2', " + "to_date('2017-02-01', 'YYYY-MM-DD'), " + "to_timestamp('2017-01-02 00:00:00', " + "'YYYY-MM-DD HH24:MI:SS'), true, 5, 2); " + "udt_Record(3, 'String in record #3', " + "to_date('2017-03-01', 'YYYY-MM-DD'), " + "to_timestamp('2017-01-03 00:00:00', " + "'YYYY-MM-DD HH24:MI:SS'), false, 10, 4)" + ) + assert result == expected_value + + +def test_3211(conn, cursor): + "3211 - test binding a PL/SQL record (in)" + type_obj = conn.gettype("PKG_TESTRECORDS.UDT_RECORD") + obj = type_obj.newobject() + obj.NUMBERVALUE = 18 + obj.STRINGVALUE = "A string in a record" + obj.DATEVALUE = datetime.datetime(2016, 2, 15) + obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 12, 14, 25, 36) + obj.BOOLEANVALUE = False + obj.PLSINTEGERVALUE = 21 + obj.BINARYINTEGERVALUE = 5 + result = cursor.callfunc("pkg_TestRecords.GetStringRep", str, [obj]) + expected_value = ( + "udt_Record(18, 'A string in a record', " + "to_date('2016-02-15', 'YYYY-MM-DD'), " + "to_timestamp('2016-02-12 14:25:36', " + "'YYYY-MM-DD HH24:MI:SS'), false, 21, 5)" + ) + assert result == expected_value + + +def test_3212(conn, cursor): + "3212 - test binding a PL/SQL record (out)" + type_obj = conn.gettype("PKG_TESTRECORDS.UDT_RECORD") + obj = type_obj.newobject() + obj.NUMBERVALUE = 5 + obj.STRINGVALUE = "Test value" + obj.DATEVALUE = datetime.datetime.today() + obj.TIMESTAMPVALUE = datetime.datetime.today() + obj.BOOLEANVALUE = False + obj.PLSINTEGERVALUE = 23 + obj.BINARYINTEGERVALUE = 9 + cursor.callproc("pkg_TestRecords.TestOut", [obj]) + assert obj.NUMBERVALUE == 25 + assert obj.STRINGVALUE == "String in record" + assert obj.DATEVALUE == datetime.datetime(2016, 2, 16) + assert obj.TIMESTAMPVALUE == datetime.datetime(2016, 2, 16, 18, 23, 55) + assert obj.BOOLEANVALUE + assert obj.PLSINTEGERVALUE == 45 + assert obj.BINARYINTEGERVALUE == 10 + + +def test_3213(conn, cursor): + "3213 - test binding a PL/SQL string collection (in)" + type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + obj.setelement(1, "First element") + obj.setelement(2, "Second element") + obj.setelement(3, "Third element") + result = cursor.callfunc( + "pkg_TestStringArrays.TestInArrays", int, (5, obj) + ) + assert result == 45 + + +def test_3214(conn, cursor): + "3214 - test binding a PL/SQL string collection (in/out)" + type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + obj.setelement(1, "The first element") + obj.append("The second element") + obj.append("The third and final element") + cursor.callproc("pkg_TestStringArrays.TestInOutArrays", (3, obj)) + expected_values = [ + "Converted element # 1 originally had length 17", + "Converted element # 2 originally had length 18", + "Converted element # 3 originally had length 27", + ] + assert obj.aslist() == expected_values + + +def test_3215(conn, cursor): + "3215 - test binding a PL/SQL string collection (out)" + type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + cursor.callproc("pkg_TestStringArrays.TestOutArrays", (4, obj)) + expected_values = [f"Test out element # {i + 1}" for i in range(4)] + assert obj.aslist() == expected_values + + +def test_3216(conn, cursor): + "3216 - test binding a PL/SQL string collection (out with holes)" + type_name = "PKG_TESTSTRINGARRAYS.UDT_STRINGLIST" + type_obj = conn.gettype(type_name) + obj = type_obj.newobject() + cursor.callproc("pkg_TestStringArrays.TestIndexBy", [obj]) + assert obj.first() == -1048576 + assert obj.last() == 8388608 + assert obj.next(-576) == 284 + assert obj.prev(284) == -576 + assert obj.size() == 4 + assert obj.exists(-576) + assert not obj.exists(-577) + assert obj.getelement(284) == "Third element" + expected_list = [ + "First element", + "Second element", + "Third element", + "Fourth element", + ] + assert obj.aslist() == expected_list + expected_dict = { + -1048576: "First element", + -576: "Second element", + 284: "Third element", + 8388608: "Fourth element", + } + assert obj.asdict() == expected_dict + obj.delete(-576) + obj.delete(284) + expected_list.pop(2) + expected_list.pop(1) + assert obj.aslist() == expected_list + expected_dict.pop(-576) + expected_dict.pop(284) + assert obj.asdict() == expected_dict + + +def test_3217(cursor, test_env): + "3217 - test executing with arraydmlrowcounts with exception" + cursor.execute("truncate table TestArrayDML") + rows = [(1, "First"), (2, "Second"), (2, "Third"), (4, "Fourth")] + sql = "insert into TestArrayDML (IntCol,StringCol) values (:1,:2)" + with test_env.assert_raises_full_code("ORA-00001"): + cursor.executemany(sql, rows, arraydmlrowcounts=True) + assert cursor.getarraydmlrowcounts() == [1, 1] + + +def test_3218(cursor): + "3218 - test executing delete statement with arraydmlrowcount mode" + cursor.execute("truncate table TestArrayDML") + rows = [ + (1, "First", 100), + (2, "Second", 200), + (3, "Third", 300), + (4, "Fourth", 300), + (5, "Fifth", 300), + (6, "Sixth", 400), + (7, "Seventh", 400), + (8, "Eighth", 500), + ] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + ) + rows = [(200,), (300,), (400,)] + cursor.executemany( + "delete from TestArrayDML where IntCol2 = :1", + rows, + arraydmlrowcounts=True, + ) + assert cursor.getarraydmlrowcounts() == [1, 3, 2] + assert cursor.rowcount == 6 + + +def test_3219(cursor): + "3219 - test executing update statement with arraydmlrowcount mode" + cursor.execute("truncate table TestArrayDML") + rows = [ + (1, "First", 100), + (2, "Second", 200), + (3, "Third", 300), + (4, "Fourth", 300), + (5, "Fifth", 300), + (6, "Sixth", 400), + (7, "Seventh", 400), + (8, "Eighth", 500), + ] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + ) + rows = [("One", 100), ("Two", 200), ("Three", 300), ("Four", 400)] + cursor.executemany( + "update TestArrayDML set StringCol = :1 where IntCol2 = :2", + rows, + arraydmlrowcounts=True, + ) + assert cursor.getarraydmlrowcounts() == [1, 1, 3, 2] + assert cursor.rowcount == 7 + + +def test_3220(cursor): + "3220 - test getimplicitresults() returns the correct data" + cursor.execute( + """ + declare + c1 sys_refcursor; + c2 sys_refcursor; + begin + open c1 for + select NullableCol + from TestNumbers + where IntCol between 3 and 5; + + dbms_sql.return_result(c1); + + open c2 for + select NullableCol + from TestNumbers + where IntCol between 7 and 10; + + dbms_sql.return_result(c2); + end; + """ + ) + results = cursor.getimplicitresults() + assert len(results) == 2 + assert [n for n, in results[0]] == [2924207, None, 59797108943] + assert [n for n, in results[1]] == [ + 1222791080775407, + None, + 25004854810776297743, + None, + ] + + +def test_3221(cursor, test_env): + "3221 - test getimplicitresults() without executing a statement" + with test_env.assert_raises_full_code("DPY-1004"): + cursor.getimplicitresults() + + +def test_3222(cursor): + "3222 - test executing insert with multiple distinct batch errors" + cursor.execute("truncate table TestArrayDML") + rows = [ + (1, "First", 100), + (2, "Second", 200), + (2, "Third", 300), + (4, "Fourth", 400), + (5, "Fourth", 1000), + ] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + batcherrors=True, + arraydmlrowcounts=True, + ) + actual_errors = [ + (error.offset, error.full_code) for error in cursor.getbatcherrors() + ] + assert actual_errors == [(4, "ORA-01438"), (2, "ORA-00001")] + assert cursor.getarraydmlrowcounts() == [1, 1, 0, 1, 0] + + +def test_3223(cursor, test_env): + "3223 - test batcherrors mode set to False" + cursor.execute("truncate table TestArrayDML") + rows = [(1, "First", 100), (2, "Second", 200), (2, "Third", 300)] + sql = """insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3)""" + with test_env.assert_raises_full_code("ORA-00001"): + cursor.executemany(sql, rows, batcherrors=False) + + +def test_3224(cursor): + "3224 - test executing in succession with batch error" + cursor.execute("truncate table TestArrayDML") + rows = [ + (1, "First", 100), + (2, "Second", 200), + (3, "Third", 300), + (4, "Second", 300), + (5, "Fifth", 300), + (6, "Sixth", 400), + (6, "Seventh", 400), + (8, "Eighth", 100), + ] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + batcherrors=True, + ) + actual_errors = [ + (error.offset, error.full_code) for error in cursor.getbatcherrors() + ] + assert actual_errors == [(6, "ORA-00001")] + rows = [ + (101, "First"), + (201, "Second"), + (3000, "Third"), + (900, "Ninth"), + (301, "Third"), + ] + cursor.executemany( + "update TestArrayDML set IntCol2 = :1 where StringCol = :2", + rows, + arraydmlrowcounts=True, + batcherrors=True, + ) + actual_errors = [ + (error.offset, error.full_code) for error in cursor.getbatcherrors() + ] + assert actual_errors == [(2, "ORA-01438")] + assert cursor.getarraydmlrowcounts() == [1, 2, 0, 0, 1] + assert cursor.rowcount == 4 + + +def test_3225(cursor): + "3225 - test using implicit cursors to execute new statements" + cursor.execute( + """ + declare + c1 sys_refcursor; + begin + open c1 for + select NumberCol + from TestNumbers + where IntCol between 3 and 5; + + dbms_sql.return_result(c1); + end; + """ + ) + results = cursor.getimplicitresults() + assert len(results) == 1 + assert [n for n, in results[0]] == [3.75, 5, 6.25] + results[0].execute("select :1 from dual", [7]) + (row,) = results[0].fetchone() + assert row == 7 + + +def test_3226(cursor): + "3226 - test batcherrors mode without any errors produced" + cursor.execute("truncate table TestArrayDML") + rows = [(1, "First", 100), (2, "Second", 200), (3, "Third", 300)] + cursor.executemany( + """ + insert into TestArrayDML (IntCol, StringCol, IntCol2) + values (:1, :2, :3) + """, + rows, + batcherrors=True, + ) + assert cursor.getbatcherrors() == [] + + +def test_3227(cursor): + "3227 - test batcherrors mode with multiple executes" + cursor.execute("truncate table TestArrayDML") + rows_1 = [ + (1, "Value 1", 100), + (2, "Value 2", 200), + (2, "Value 2", 200), + ] + rows_2 = [ + (3, "Value 3", 300), + (3, "Value 3", 300), + (4, "Value 4", 400), + ] + sql = """ insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3) - """, - rows, - batcherrors=True, - ) - self.assertEqual(self.cursor.getbatcherrors(), []) - - def test_3227(self): - "3227 - test batcherrors mode with multiple executes" - self.cursor.execute("truncate table TestArrayDML") - rows_1 = [ - (1, "Value 1", 100), - (2, "Value 2", 200), - (2, "Value 2", 200), + values (:1, :2, :3)""" + cursor.executemany(sql, rows_1, batcherrors=True) + actual_errors = [ + (error.offset, error.full_code) for error in cursor.getbatcherrors() + ] + assert actual_errors == [(2, "ORA-00001")] + cursor.executemany(sql, rows_2, batcherrors=True) + actual_errors = [ + (error.offset, error.full_code) for error in cursor.getbatcherrors() + ] + assert actual_errors == [(1, "ORA-00001")] + + +def test_3228(conn): + "3228 - test %ROWTYPE record type" + type_obj = conn.gettype("TESTTEMPTABLE%ROWTYPE") + assert type_obj.attributes[3].name == "NUMBERCOL" + + +def test_3229(conn): + "3229 - test collection of %ROWTYPE record type" + type_name = "PKG_TESTBINDOBJECT.UDT_COLLECTIONROWTYPE" + type_obj = conn.gettype(type_name) + assert type_obj.element_type.attributes[3].name == "NUMBERCOL" + + +def test_3230(cursor, test_env): + "3230 - enabling batcherrors parameter with PL/SQL" + with test_env.assert_raises_full_code("DPY-2040"): + cursor.executemany("begin null; end;", 30, batcherrors=True) + + +def test_3231(cursor, test_env): + "3231 - enabling arraydmlrowcountsbatcherrors parameter with PL/SQL" + with test_env.assert_raises_full_code("DPY-2040"): + cursor.executemany("begin null; end;", 31, arraydmlrowcounts=True) + + +def test_3232(test_env): + "3232 - fetch implicit cursors after closing connection" + conn = test_env.get_connection() + cursor = conn.cursor() + cursor.execute( + """ + declare + c1 sys_refcursor; + c2 sys_refcursor; + begin + open c1 for + select NullableCol + from TestNumbers; + + dbms_sql.return_result(c1); + + open c2 for + select NullableCol + from TestNumbers; + + dbms_sql.return_result(c2); + end; + """ + ) + cursor1, cursor2 = cursor.getimplicitresults() + conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + cursor1.fetchall() + with test_env.assert_raises_full_code("DPY-1001"): + cursor2.fetchall() + + +def test_3233(conn, test_env): + "3233 - fetch implicit cursors after closing parent cursor" + cursor = conn.cursor() + cursor.execute( + """ + declare + c1 sys_refcursor; + c2 sys_refcursor; + begin + open c1 for + select NullableCol + from TestNumbers + where IntCol between 3 and 5; + + dbms_sql.return_result(c1); + + open c2 for + select NullableCol + from TestNumbers + where IntCol between 7 and 10; + + dbms_sql.return_result(c2); + end; + """ + ) + cursor1, cursor2 = cursor.getimplicitresults() + cursor.close() + if conn.thin: + assert [n for n, in cursor1] == [2924207, None, 59797108943] + assert [n for n, in cursor2] == [ + 1222791080775407, + None, + 25004854810776297743, + None, ] - rows_2 = [ - (3, "Value 3", 300), - (3, "Value 3", 300), - (4, "Value 4", 400), - ] - sql = """ - insert into TestArrayDML (IntCol, StringCol, IntCol2) - values (:1, :2, :3)""" - self.cursor.executemany(sql, rows_1, batcherrors=True) - actual_errors = [ - (error.offset, error.full_code) - for error in self.cursor.getbatcherrors() - ] - self.assertEqual(actual_errors, [(2, "ORA-00001")]) - self.cursor.executemany(sql, rows_2, batcherrors=True) - actual_errors = [ - (error.offset, error.full_code) - for error in self.cursor.getbatcherrors() - ] - self.assertEqual(actual_errors, [(1, "ORA-00001")]) - - def test_3228(self): - "3228 - test %ROWTYPE record type" - type_obj = self.conn.gettype("TESTTEMPTABLE%ROWTYPE") - self.assertEqual(type_obj.attributes[3].name, "NUMBERCOL") - - def test_3229(self): - "3229 - test collection of %ROWTYPE record type" - type_name = "PKG_TESTBINDOBJECT.UDT_COLLECTIONROWTYPE" - type_obj = self.conn.gettype(type_name) - self.assertEqual(type_obj.element_type.attributes[3].name, "NUMBERCOL") - - def test_3230(self): - "3230 - enabling batcherrors parameter with PL/SQL" - with self.assertRaisesFullCode("DPY-2040"): - self.cursor.executemany("begin null; end;", 30, batcherrors=True) - - def test_3231(self): - "3231 - enabling arraydmlrowcountsbatcherrors parameter with PL/SQL" - with self.assertRaisesFullCode("DPY-2040"): - self.cursor.executemany( - "begin null; end;", 31, arraydmlrowcounts=True - ) - - def test_3232(self): - "3232 - fetch implicit cursors after closing connection" - conn = test_env.get_connection() - cursor = conn.cursor() - cursor.execute( - """ - declare - c1 sys_refcursor; - c2 sys_refcursor; - begin - open c1 for - select NullableCol - from TestNumbers; - - dbms_sql.return_result(c1); - - open c2 for - select NullableCol - from TestNumbers; - - dbms_sql.return_result(c2); - end; - """ - ) - cursor1, cursor2 = cursor.getimplicitresults() - conn.close() - with self.assertRaisesFullCode("DPY-1001"): + else: + with test_env.assert_raises_full_code("DPI-1039"): + cursor1.fetchall() + with test_env.assert_raises_full_code("DPI-1039"): cursor1.fetchall() - with self.assertRaisesFullCode("DPY-1001"): - cursor2.fetchall() - - def test_3233(self): - "3233 - fetch implicit cursors after closing parent cursor" - cursor = self.conn.cursor() - cursor.execute( - """ - declare - c1 sys_refcursor; - c2 sys_refcursor; - begin - open c1 for - select NullableCol - from TestNumbers - where IntCol between 3 and 5; - - dbms_sql.return_result(c1); - - open c2 for - select NullableCol - from TestNumbers - where IntCol between 7 and 10; - - dbms_sql.return_result(c2); - end; - """ - ) - cursor1, cursor2 = cursor.getimplicitresults() - cursor.close() - if self.conn.thin: - self.assertEqual( - [n for n, in cursor1], [2924207, None, 59797108943] - ) - self.assertEqual( - [n for n, in cursor2], - [1222791080775407, None, 25004854810776297743, None], - ) - else: - with self.assertRaisesFullCode("DPI-1039"): - cursor1.fetchall() - with self.assertRaisesFullCode("DPI-1039"): - cursor1.fetchall() - - def test_3234(self): - "3234 - test PL/SQL record metadata" - rec_type = self.conn.gettype("PKG_TESTRECORDS.UDT_RECORD") - expected_metadata = [ - ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), - ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 30), - ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), - ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), - ("BOOLEANVALUE", oracledb.DB_TYPE_BOOLEAN, None, None, None), - ( - "PLSINTEGERVALUE", - oracledb.DB_TYPE_BINARY_INTEGER, - None, - None, - None, - ), - ( - "BINARYINTEGERVALUE", - oracledb.DB_TYPE_BINARY_INTEGER, - None, - None, - None, - ), - ] - actual_metadata = [ - (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) - for attr in rec_type.attributes - ] - self.assertEqual(actual_metadata, expected_metadata) -if __name__ == "__main__": - test_env.run_test_cases() +def test_3234(conn): + "3234 - test PL/SQL record metadata" + rec_type = conn.gettype("PKG_TESTRECORDS.UDT_RECORD") + expected_metadata = [ + ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), + ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 30), + ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), + ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), + ("BOOLEANVALUE", oracledb.DB_TYPE_BOOLEAN, None, None, None), + ( + "PLSINTEGERVALUE", + oracledb.DB_TYPE_BINARY_INTEGER, + None, + None, + None, + ), + ( + "BINARYINTEGERVALUE", + oracledb.DB_TYPE_BINARY_INTEGER, + None, + None, + None, + ), + ] + actual_metadata = [ + (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) + for attr in rec_type.attributes + ] + assert actual_metadata == expected_metadata diff --git a/tests/test_3300_soda_database.py b/tests/test_3300_soda_database.py index f01423f8..c3143b40 100644 --- a/tests/test_3300_soda_database.py +++ b/tests/test_3300_soda_database.py @@ -29,240 +29,205 @@ import datetime import decimal import json -import unittest -import test_env - - -@unittest.skipIf( - test_env.skip_soda_tests(), "unsupported client/server combination" -) -class TestCase(test_env.BaseTestCase): - def __verify_doc( - self, - doc, - bytes_content, - str_content=None, - content=None, - key=None, - media_type="application/json", - ): - self.assertEqual(doc.getContentAsBytes(), bytes_content) - if str_content is not None: - self.assertEqual(doc.getContentAsString(), str_content) - if content is not None: - self.assertEqual(doc.getContent(), content) - self.assertEqual(doc.key, key) - self.assertEqual(doc.mediaType, media_type) - self.assertIsNone(doc.version) - self.assertIsNone(doc.lastModified) - self.assertIsNone(doc.createdOn) - - def test_3300(self): - "3300 - test creating documents with JSON data" - soda_db = self.get_soda_database() - val = {"testKey1": "testValue1", "testKey2": "testValue2"} - if test_env.has_client_version(23, 4): - str_val = str(val) - else: - str_val = json.dumps(val) - bytes_val = str_val.encode() - key = "MyKey" - media_type = "text/plain" - doc = soda_db.createDocument(val) - self.__verify_doc(doc, bytes_val, str_val, val) - str_val = json.dumps(val) - bytes_val = str_val.encode() - doc = soda_db.createDocument(str_val, key) - self.__verify_doc(doc, bytes_val, str_val, val, key) - doc = soda_db.createDocument(bytes_val, key, media_type) - self.__verify_doc(doc, bytes_val, str_val, bytes_val, key, media_type) - - def test_3301(self): - "3301 - test creating documents with raw data" - soda_db = self.get_soda_database() - val = b"" - key = "MyRawKey" - media_type = "text/html" - doc = soda_db.createDocument(val) - self.__verify_doc(doc, val) - doc = soda_db.createDocument(val, key) - self.__verify_doc(doc, val, key=key) - doc = soda_db.createDocument(val, key, media_type) - self.__verify_doc(doc, val, key=key, media_type=media_type) - - def test_3302(self): - "3302 - test getting collection names from the database" - soda_db = self.get_soda_database() - self.assertEqual(soda_db.getCollectionNames(), []) - names = ["zCol", "dCol", "sCol", "aCol", "gCol"] - sorted_names = list(sorted(names)) - for name in names: - soda_db.createCollection(name) - self.assertEqual(soda_db.getCollectionNames(), sorted_names) - self.assertEqual(soda_db.getCollectionNames(limit=2), sorted_names[:2]) - self.assertEqual(soda_db.getCollectionNames("a"), sorted_names) - self.assertEqual(soda_db.getCollectionNames("C"), sorted_names) - self.assertEqual( - soda_db.getCollectionNames("b", limit=3), sorted_names[1:4] - ) - self.assertEqual(soda_db.getCollectionNames("z"), sorted_names[-1:]) - - def test_3303(self): - "3303 - test opening a collection" - soda_db = self.get_soda_database() - coll = soda_db.openCollection("CollectionThatDoesNotExist") - self.assertIsNone(coll) - created_coll = soda_db.createCollection("TestOpenCollection") - coll = soda_db.openCollection(created_coll.name) - self.assertEqual(coll.name, created_coll.name) - - def test_3304(self): - "3304 - test SodaDatabase repr() and str()" - soda_db = self.get_soda_database() - self.assertEqual( - repr(soda_db), f"" - ) - self.assertEqual( - str(soda_db), f"" - ) - - def test_3305(self): - "3305 - test negative cases for SODA database methods" - soda_db = self.get_soda_database() - self.assertRaises(TypeError, soda_db.createCollection) - self.assertRaises(TypeError, soda_db.createCollection, 1) - with self.assertRaisesFullCode("ORA-40658"): - soda_db.createCollection(None) - with self.assertRaisesFullCode("ORA-40675"): - soda_db.createCollection("CollMetadata", 7) - self.assertRaises(TypeError, soda_db.getCollectionNames, 1) - - @unittest.skipUnless( - test_env.has_client_version(23, 4), "unsupported data types" - ) - def test_3306(self): - "3306 - test creating documents with JSON data using extended types" - soda_db = self.get_soda_database() - val = { - "testKey1": "testValue1", - "testKey2": decimal.Decimal("12.78"), - "testKey3": datetime.datetime(2023, 7, 3, 11, 10, 24), - } - doc = soda_db.createDocument(val) +import pytest + + +def _verify_doc( + doc, + bytes_content, + str_content=None, + content=None, + key=None, + media_type="application/json", +): + assert doc.getContentAsBytes() == bytes_content + if str_content is not None: + assert doc.getContentAsString() == str_content + if content is not None: + assert doc.getContent() == content + assert doc.key == key + assert doc.mediaType == media_type + assert doc.version is None + assert doc.lastModified is None + assert doc.createdOn is None + + +def test_3300(soda_db, test_env): + "3300 - test creating documents with JSON data" + val = {"testKey1": "testValue1", "testKey2": "testValue2"} + if test_env.has_client_version(23, 4): str_val = str(val) - bytes_val = str_val.encode() - self.__verify_doc(doc, bytes_val, str_val, val) - - def test_3307(self): - "3307 - test creating documents with int scalar value" - soda_db = self.conn.getSodaDatabase() - val = 144 - str_val = "144" - bytes_val = b"144" - key = "MyKey" - media_type = "application/json" - doc = soda_db.createDocument(val) - self.__verify_doc(doc, bytes_val, str_val, val) - doc = soda_db.createDocument(val, key) - self.__verify_doc(doc, bytes_val, str_val, val, key) - doc = soda_db.createDocument(val, key, media_type) - self.__verify_doc(doc, bytes_val, str_val, val, key, media_type) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - and test_env.has_server_version(23, 4), - "data types serialized differently", - ) - def test_3308(self): - "3308 - test creating documents with float scalar value" - soda_db = self.conn.getSodaDatabase() - val = 12.2 - str_val = "12.2" - bytes_val = b"12.2" - decimal_val = decimal.Decimal(str_val) - key = "MyKey" - media_type = "application/json" - doc = soda_db.createDocument(val) - self.__verify_doc(doc, bytes_val, str_val, decimal_val) - doc = soda_db.createDocument(val, key) - self.__verify_doc(doc, bytes_val, str_val, decimal_val, key) - doc = soda_db.createDocument(val, key, media_type) - self.__verify_doc( - doc, bytes_val, str_val, decimal_val, key, media_type - ) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - and test_env.has_server_version(23, 4), - "unsupported data types", + else: + str_val = json.dumps(val) + bytes_val = str_val.encode() + key = "MyKey" + media_type = "text/plain" + doc = soda_db.createDocument(val) + _verify_doc(doc, bytes_val, str_val, val) + str_val = json.dumps(val) + bytes_val = str_val.encode() + doc = soda_db.createDocument(str_val, key) + _verify_doc(doc, bytes_val, str_val, val, key) + doc = soda_db.createDocument(bytes_val, key, media_type) + _verify_doc(doc, bytes_val, str_val, bytes_val, key, media_type) + + +def test_3301(soda_db): + "3301 - test creating documents with raw data" + val = b"" + key = "MyRawKey" + media_type = "text/html" + doc = soda_db.createDocument(val) + _verify_doc(doc, val) + doc = soda_db.createDocument(val, key) + _verify_doc(doc, val, key=key) + doc = soda_db.createDocument(val, key, media_type) + _verify_doc(doc, val, key=key, media_type=media_type) + + +def test_3302(soda_db): + "3302 - test getting collection names from the database" + assert soda_db.getCollectionNames() == [] + names = ["zCol", "dCol", "sCol", "aCol", "gCol"] + sorted_names = list(sorted(names)) + for name in names: + soda_db.createCollection(name) + assert soda_db.getCollectionNames() == sorted_names + assert soda_db.getCollectionNames(limit=2) == sorted_names[:2] + assert soda_db.getCollectionNames("a") == sorted_names + assert soda_db.getCollectionNames("C") == sorted_names + assert soda_db.getCollectionNames("b", limit=3) == sorted_names[1:4] + assert soda_db.getCollectionNames("z") == sorted_names[-1:] + + +def test_3303(soda_db): + "3303 - test opening a collection" + coll = soda_db.openCollection("CollectionThatDoesNotExist") + assert coll is None + created_coll = soda_db.createCollection("TestOpenCollection") + coll = soda_db.openCollection(created_coll.name) + assert coll.name == created_coll.name + + +def test_3304(soda_db, conn): + "3304 - test SodaDatabase repr() and str()" + assert repr(soda_db) == f"" + assert str(soda_db) == f"" + + +def test_3305(soda_db, test_env): + "3305 - test negative cases for SODA database methods" + pytest.raises(TypeError, soda_db.createCollection) + pytest.raises(TypeError, soda_db.createCollection, 1) + with test_env.assert_raises_full_code("ORA-40658"): + soda_db.createCollection(None) + with test_env.assert_raises_full_code("ORA-40675"): + soda_db.createCollection("CollMetadata", 7) + pytest.raises(TypeError, soda_db.getCollectionNames, 1) + + +def test_3306(soda_db, test_env): + "3306 - test creating documents with JSON data using extended types" + if not test_env.has_client_version(23, 4): + pytest.skip("unsupported data types") + val = { + "testKey1": "testValue1", + "testKey2": decimal.Decimal("12.78"), + "testKey3": datetime.datetime(2023, 7, 3, 11, 10, 24), + } + doc = soda_db.createDocument(val) + str_val = str(val) + bytes_val = str_val.encode() + _verify_doc(doc, bytes_val, str_val, val) + + +def test_3307(soda_db): + "3307 - test creating documents with int scalar value" + val = 144 + str_val = "144" + bytes_val = b"144" + key = "MyKey" + media_type = "application/json" + doc = soda_db.createDocument(val) + _verify_doc(doc, bytes_val, str_val, val) + doc = soda_db.createDocument(val, key) + _verify_doc(doc, bytes_val, str_val, val, key) + doc = soda_db.createDocument(val, key, media_type) + _verify_doc(doc, bytes_val, str_val, val, key, media_type) + + +def test_3308(soda_db, test_env): + "3308 - test creating documents with float scalar value" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("data types serialized differently") + val = 12.2 + str_val = "12.2" + bytes_val = b"12.2" + decimal_val = decimal.Decimal(str_val) + key = "MyKey" + media_type = "application/json" + doc = soda_db.createDocument(val) + _verify_doc(doc, bytes_val, str_val, decimal_val) + doc = soda_db.createDocument(val, key) + _verify_doc(doc, bytes_val, str_val, decimal_val, key) + doc = soda_db.createDocument(val, key, media_type) + _verify_doc(doc, bytes_val, str_val, decimal_val, key, media_type) + + +def test_3309(soda_db, test_env): + "3309 - test creating documents with a list" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("unsupported data types") + val = [12, "str", b"bytes", [1], {"dict": "3"}] + decimal_val = [ + decimal.Decimal("12"), + "str", + b"bytes", + [decimal.Decimal("1")], + {"dict": "3"}, + ] + str_val = "[Decimal('12'), 'str', b'bytes', [Decimal('1')], {'dict': '3'}]" + bytes_val = ( + b"[Decimal('12'), 'str', b'bytes', [Decimal('1')], {'dict': '3'}]" ) - def test_3309(self): - "3309 - test creating documents with a list" - soda_db = self.conn.getSodaDatabase() - val = [12, "str", b"bytes", [1], {"dict": "3"}] - decimal_val = [ - decimal.Decimal("12"), - "str", - b"bytes", - [decimal.Decimal("1")], - {"dict": "3"}, - ] - str_val = ( - "[Decimal('12'), 'str', b'bytes', [Decimal('1')], {'dict': '3'}]" - ) - bytes_val = ( - b"[Decimal('12'), 'str', b'bytes', [Decimal('1')], {'dict': '3'}]" - ) - key = "MyKey" - media_type = "application/json" + key = "MyKey" + media_type = "application/json" + doc = soda_db.createDocument(val) + _verify_doc(doc, bytes_val, str_val, decimal_val) + doc = soda_db.createDocument(val, key) + _verify_doc(doc, bytes_val, str_val, decimal_val, key) + doc = soda_db.createDocument(val, key, media_type) + _verify_doc(doc, bytes_val, str_val, decimal_val, key, media_type) + + +def test_3310(soda_db, test_env): + "3310 - test creating documents with a boolean scalar value" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("data types serialized differently") + test_values = [(True, "True", b"True"), (False, "False", b"False")] + key = "MyKey" + media_type = "application/json" + for val, str_val, bytes_val in test_values: doc = soda_db.createDocument(val) - self.__verify_doc(doc, bytes_val, str_val, decimal_val) + _verify_doc(doc, bytes_val, str_val, val) doc = soda_db.createDocument(val, key) - self.__verify_doc(doc, bytes_val, str_val, decimal_val, key) + _verify_doc(doc, bytes_val, str_val, val, key) doc = soda_db.createDocument(val, key, media_type) - self.__verify_doc( - doc, bytes_val, str_val, decimal_val, key, media_type - ) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - and test_env.has_server_version(23, 4), - "data types serialized differently", - ) - def test_3310(self): - "3310 - test creating documents with a boolean scalar value" - soda_db = self.conn.getSodaDatabase() - test_values = [(True, "True", b"True"), (False, "False", b"False")] - key = "MyKey" - media_type = "application/json" - for val, str_val, bytes_val in test_values: - doc = soda_db.createDocument(val) - self.__verify_doc(doc, bytes_val, str_val, val) - doc = soda_db.createDocument(val, key) - self.__verify_doc(doc, bytes_val, str_val, val, key) - doc = soda_db.createDocument(val, key, media_type) - self.__verify_doc(doc, bytes_val, str_val, val, key, media_type) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - and test_env.has_server_version(23, 4), - "data types serialized differently", - ) - def test_3311(self): - "3311 - test creating documents with unsupported types" - soda_db = self.conn.getSodaDatabase() - values = [ - tuple([144, 2]), - set("144"), - bytearray("omg", "utf-8"), - complex(2j), - range(4), - ] - for value in values: - with self.assertRaisesFullCode("DPY-3003"): - soda_db.createDocument(value) - - -if __name__ == "__main__": - test_env.run_test_cases() + _verify_doc(doc, bytes_val, str_val, val, key, media_type) + + +def test_3311(soda_db, test_env): + "3311 - test creating documents with unsupported types" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("data types serialized differently") + values = [ + tuple([144, 2]), + set("144"), + bytearray("omg", "utf-8"), + complex(2j), + range(4), + ] + for value in values: + with test_env.assert_raises_full_code("DPY-3003"): + soda_db.createDocument(value) diff --git a/tests/test_3400_soda_collection.py b/tests/test_3400_soda_collection.py index 6e7dcc2e..c35560e3 100644 --- a/tests/test_3400_soda_collection.py +++ b/tests/test_3400_soda_collection.py @@ -29,1024 +29,980 @@ import datetime import decimal import json -import unittest +import re import oracledb -import test_env - - -@unittest.skipIf( - test_env.skip_soda_tests(), "unsupported client/server combination" -) -class TestCase(test_env.BaseTestCase): - def __normalize_docs(self, docs): - """ - Remove the embedded OID added in Oracle Database 23ai, if found, in - order to ease comparison. - """ - for doc in docs: - if doc is not None and "_id" in doc: - del doc["_id"] - - def __test_skip(self, coll, num_to_skip, expected_content): - filter_spec = {"$orderby": [{"path": "name", "order": "desc"}]} - doc = coll.find().filter(filter_spec).skip(num_to_skip).getOne() - content = doc.getContent() if doc is not None else None - self.__normalize_docs([content]) - self.assertEqual(content, expected_content) - - def test_3400(self): - "3400 - test inserting invalid JSON value into SODA collection" - invalid_json = "{testKey:testValue}" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("InvalidJSON") - doc = soda_db.createDocument(invalid_json) - with self.assertRaisesFullCode("ORA-40780", "ORA-02290"): - coll.insertOne(doc) - - def test_3401(self): - "3401 - test inserting documents into a SODA collection" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestInsertDocs") - values_to_insert = [ - {"name": "George", "age": 47}, - {"name": "Susan", "age": 39}, - {"name": "John", "age": 50}, - {"name": "Jill", "age": 54}, - ] - inserted_keys = [] - for value in values_to_insert: - doc = coll.insertOneAndGet(value) - inserted_keys.append(doc.key) - self.conn.commit() - self.assertEqual(coll.find().count(), len(values_to_insert)) - for key, value in zip(inserted_keys, values_to_insert): - doc = coll.find().key(key).getOne().getContent() - self.__normalize_docs([doc]) - self.assertEqual(doc, value) - - def test_3402(self): - "3402 - test skipping documents in a SODA collection" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSkipDocs") - values_to_insert = [ - {"name": "Anna", "age": 62}, - {"name": "Mark", "age": 37}, - {"name": "Martha", "age": 43}, - {"name": "Matthew", "age": 28}, - ] - for value in values_to_insert: - coll.insertOne(value) - self.conn.commit() - self.__test_skip(coll, 0, values_to_insert[3]) - self.__test_skip(coll, 1, values_to_insert[2]) - self.__test_skip(coll, 3, values_to_insert[0]) - self.__test_skip(coll, 4, None) - self.__test_skip(coll, 125, None) - - def test_3403(self): - "3403 - test replace documents in SODA collection" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestReplaceDoc") - content = {"name": "John", "address": {"city": "Sydney"}} - doc = coll.insertOneAndGet(content) - new_content = {"name": "John", "address": {"city": "Melbourne"}} - replaced = coll.find().key(doc.key).replaceOne(new_content) - self.assertTrue(replaced) - self.conn.commit() - doc = coll.find().key(doc.key).getOne().getContent() - self.__normalize_docs([doc]) - self.assertEqual(doc, new_content) - - def test_3404(self): - "3404 - test search documents with different QBEs" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSearchDocContent") - data = [ - { - "name": "John", - "age": 22, - "birthday": "2000-12-15", - "locations": [{"city": "Bangalore"}, {"city": "Other"}], - }, +import pytest + + +@pytest.fixture +def skip_if_map_mode_not_supported(test_env): + """ + Map mode is not supported in Oracle Client 23ai. + """ + if test_env.has_client_version(23): + pytest.skip("map mode not supported with native collections") + + +@pytest.fixture +def skip_if_save_not_supported(test_env): + """ + save() is not supported in 23ai and requires a minimum client version of + 19.9 as well. + """ + if test_env.has_client_version(23): + pytest.skip("save() is not implemented in Oracle Database 23ai") + if not test_env.has_client_version(19, 9): + pytest.skip("unsupported client") + + +def _normalize_docs(docs): + """ + Remove the embedded OID added in Oracle Database 23ai, if found, in + order to ease comparison. + """ + for doc in docs: + if doc is not None and "_id" in doc: + del doc["_id"] + + +def _test_skip(coll, num_to_skip, expected_content): + filter_spec = {"$orderby": [{"path": "name", "order": "desc"}]} + doc = coll.find().filter(filter_spec).skip(num_to_skip).getOne() + content = doc.getContent() if doc is not None else None + _normalize_docs([content]) + assert content == expected_content + + +def test_3400(soda_db, test_env): + "3400 - test inserting invalid JSON value into SODA collection" + invalid_json = "{testKey:testValue}" + coll = soda_db.createCollection("InvalidJSON") + doc = soda_db.createDocument(invalid_json) + with test_env.assert_raises_full_code("ORA-40780", "ORA-02290"): + coll.insertOne(doc) + + +def test_3401(soda_db, conn): + "3401 - test inserting documents into a SODA collection" + coll = soda_db.createCollection("TestInsertDocs") + values_to_insert = [ + {"name": "George", "age": 47}, + {"name": "Susan", "age": 39}, + {"name": "John", "age": 50}, + {"name": "Jill", "age": 54}, + ] + inserted_keys = [] + for value in values_to_insert: + doc = coll.insertOneAndGet(value) + inserted_keys.append(doc.key) + conn.commit() + assert coll.find().count() == len(values_to_insert) + for key, value in zip(inserted_keys, values_to_insert): + doc = coll.find().key(key).getOne().getContent() + _normalize_docs([doc]) + assert doc == value + + +def test_3402(soda_db, conn): + "3402 - test skipping documents in a SODA collection" + coll = soda_db.createCollection("TestSkipDocs") + values_to_insert = [ + {"name": "Anna", "age": 62}, + {"name": "Mark", "age": 37}, + {"name": "Martha", "age": 43}, + {"name": "Matthew", "age": 28}, + ] + for value in values_to_insert: + coll.insertOne(value) + conn.commit() + _test_skip(coll, 0, values_to_insert[3]) + _test_skip(coll, 1, values_to_insert[2]) + _test_skip(coll, 3, values_to_insert[0]) + _test_skip(coll, 4, None) + _test_skip(coll, 125, None) + + +def test_3403(soda_db, conn): + "3403 - test replace documents in SODA collection" + coll = soda_db.createCollection("TestReplaceDoc") + content = {"name": "John", "address": {"city": "Sydney"}} + doc = coll.insertOneAndGet(content) + new_content = {"name": "John", "address": {"city": "Melbourne"}} + replaced = coll.find().key(doc.key).replaceOne(new_content) + assert replaced + conn.commit() + doc = coll.find().key(doc.key).getOne().getContent() + _normalize_docs([doc]) + assert doc == new_content + + +def test_3404(soda_db, conn): + "3404 - test search documents with different QBEs" + coll = soda_db.createCollection("TestSearchDocContent") + data = [ + { + "name": "John", + "age": 22, + "birthday": "2000-12-15", + "locations": [{"city": "Bangalore"}, {"city": "Other"}], + }, + { + "name": "Johnson", + "age": 45, + "birthday": "1978-02-03", + "locations": [{"city": "Banaras"}, {"city": "Manhattan"}], + }, + { + "name": "William", + "age": 32, + "birthday": "1991-05-17", + "locations": {"city": "New Banaras"}, + }, + ] + coll.insertMany(data) + conn.commit() + + # create index so $contains works + index = {"name": "js_ix_3404"} + coll.createIndex(index) + + filter_specs = [ + ({"name": {"$contains": "John"}}, 1), + ({"age": {"$contains": "45"}}, 1), + ({"name": {"$like": "J%n"}}, 2), + ({"name": {"$regex": ".*[ho]n"}}, 2), + ("""{"locations.city": {"$regex": "^Ban.*"}}""", 2), + ({"birthday": {"$date": {"$gt": "2000-01-01"}}}, 1), + ({"birthday": {"$date": "2000-12-15"}}, 1), + ({"age": {"$gt": 18}}, 3), + ({"age": {"$lt": 25}}, 1), + ( { - "name": "Johnson", - "age": 45, - "birthday": "1978-02-03", - "locations": [{"city": "Banaras"}, {"city": "Manhattan"}], + "$or": [ + {"age": {"$gt": 50}}, + {"locations[*].city": {"$like": "%Ban%"}}, + ] }, + 3, + ), + ( { - "name": "William", - "age": 32, - "birthday": "1991-05-17", - "locations": {"city": "New Banaras"}, + "$and": [ + {"age": {"$gt": 40}}, + {"locations[0 to 1].city": {"$like": "%aras"}}, + ] }, - ] - coll.insertMany(data) - self.conn.commit() - - # create index so $contains works - index = {"name": "js_ix_3404"} - coll.createIndex(index) - - filter_specs = [ - ({"name": {"$contains": "John"}}, 1), - ({"age": {"$contains": "45"}}, 1), - ({"name": {"$like": "J%n"}}, 2), - ({"name": {"$regex": ".*[ho]n"}}, 2), - ("""{"locations.city": {"$regex": "^Ban.*"}}""", 2), - ({"birthday": {"$date": {"$gt": "2000-01-01"}}}, 1), - ({"birthday": {"$date": "2000-12-15"}}, 1), - ({"age": {"$gt": 18}}, 3), - ({"age": {"$lt": 25}}, 1), - ( - { - "$or": [ - {"age": {"$gt": 50}}, - {"locations[*].city": {"$like": "%Ban%"}}, - ] - }, - 3, - ), - ( - { - "$and": [ - {"age": {"$gt": 40}}, - {"locations[0 to 1].city": {"$like": "%aras"}}, - ] - }, - 1, - ), - ({"name": {"$hasSubstring": "John"}}, 2), - ({"name": {"$instr": "John"}}, 2), - ({"name": {"$startsWith": "John"}}, 2), - ({"name": {"$upper": {"$startsWith": "JO"}}}, 2), - ({"age": {"$not": {"$eq": 22}}}, 2), - ({"age": {"$not": {"$lt": 30, "$gt": 10}}}, 2), - ({"locations": {"$type": "array"}}, 2), - ] - for filter_spec, expected_count in filter_specs: - self.assertEqual( - coll.find().filter(filter_spec).count(), - expected_count, - filter_spec, - ) - - def test_3405(self): - "3405 - test removing documents" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestRemoveDocs") - data = [ - {"name": "John", "address": {"city": "Bangalore"}}, - {"name": "Johnson", "address": {"city": "Banaras"}}, - {"name": "Joseph", "address": {"city": "Mangalore"}}, - {"name": "Jibin", "address": {"city": "Secunderabad"}}, - {"name": "Andrew", "address": {"city": "Hyderabad"}}, - {"name": "Matthew", "address": {"city": "Mumbai"}}, - ] - docs = [coll.insertOneAndGet(v) for v in data] - self.assertEqual(coll.find().key(docs[3].key).remove(), 1) - self.assertEqual(coll.find().count(), len(data) - 1) - search_results = coll.find().filter({"name": {"$like": "Jibin"}}) - self.assertEqual(search_results.count(), 0) - self.assertEqual( - coll.find().filter({"name": {"$like": "John%"}}).remove(), 2 - ) - self.assertEqual(coll.find().count(), len(data) - 3) - self.assertEqual( - coll.find().filter({"name": {"$regex": "J.*"}}).remove(), 1 - ) - self.assertEqual(coll.find().count(), len(data) - 4) - self.conn.commit() - - def test_3406(self): - "3406 - test create and drop Index" - index_name = "TestIndexes_ix_1" - index_spec = { - "name": index_name, - "fields": [ - {"path": "address.city", "datatype": "string", "order": "asc"} - ], - } - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestIndexes") - self.conn.commit() - coll.dropIndex(index_name) + 1, + ), + ({"name": {"$hasSubstring": "John"}}, 2), + ({"name": {"$instr": "John"}}, 2), + ({"name": {"$startsWith": "John"}}, 2), + ({"name": {"$upper": {"$startsWith": "JO"}}}, 2), + ({"age": {"$not": {"$eq": 22}}}, 2), + ({"age": {"$not": {"$lt": 30, "$gt": 10}}}, 2), + ({"locations": {"$type": "array"}}, 2), + ] + for filter_spec, expected_count in filter_specs: + assert ( + coll.find().filter(filter_spec).count() == expected_count + ), filter_spec + + +def test_3405(soda_db, conn): + "3405 - test removing documents" + coll = soda_db.createCollection("TestRemoveDocs") + data = [ + {"name": "John", "address": {"city": "Bangalore"}}, + {"name": "Johnson", "address": {"city": "Banaras"}}, + {"name": "Joseph", "address": {"city": "Mangalore"}}, + {"name": "Jibin", "address": {"city": "Secunderabad"}}, + {"name": "Andrew", "address": {"city": "Hyderabad"}}, + {"name": "Matthew", "address": {"city": "Mumbai"}}, + ] + docs = [coll.insertOneAndGet(v) for v in data] + assert coll.find().key(docs[3].key).remove() == 1 + assert coll.find().count() == len(data) - 1 + search_results = coll.find().filter({"name": {"$like": "Jibin"}}) + assert search_results.count() == 0 + assert coll.find().filter({"name": {"$like": "John%"}}).remove() == 2 + assert coll.find().count() == len(data) - 3 + assert coll.find().filter({"name": {"$regex": "J.*"}}).remove() == 1 + assert coll.find().count() == len(data) - 4 + conn.commit() + + +def test_3406(soda_db, conn, test_env): + "3406 - test create and drop Index" + index_name = "TestIndexes_ix_1" + index_spec = { + "name": index_name, + "fields": [ + {"path": "address.city", "datatype": "string", "order": "asc"} + ], + } + coll = soda_db.createCollection("TestIndexes") + conn.commit() + coll.dropIndex(index_name) + coll.createIndex(index_spec) + pytest.raises(TypeError, coll.createIndex, 3) + with test_env.assert_raises_full_code("ORA-40733"): coll.createIndex(index_spec) - self.assertRaises(TypeError, coll.createIndex, 3) - with self.assertRaisesFullCode("ORA-40733"): - coll.createIndex(index_spec) - self.assertTrue(coll.dropIndex(index_name)) - self.assertFalse(coll.dropIndex(index_name)) - - def test_3407(self): - "3407 - test getting documents from Collection" - self.conn.autocommit = True - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestGetDocs") - data = [ - {"name": "John", "address": {"city": "Bangalore"}}, - {"name": "Johnson", "address": {"city": "Banaras"}}, - {"name": "Joseph", "address": {"city": "Mangalore"}}, - {"name": "Jibin", "address": {"city": "Secunderabad"}}, - {"name": "Andrew", "address": {"city": "Hyderabad"}}, - ] - inserted_keys = list(sorted(coll.insertOneAndGet(v).key for v in data)) - fetched_keys = list( - sorted(doc.key for doc in coll.find().getDocuments()) - ) - self.assertEqual(fetched_keys, inserted_keys) - - def test_3408(self): - "3408 - test fetching documents from a cursor" - self.conn.autocommit = True - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestFindViaCursor") - data = [ - {"name": "John", "address": {"city": "Bangalore"}}, - {"name": "Johnson", "address": {"city": "Banaras"}}, - {"name": "Joseph", "address": {"city": "Mangalore"}}, - ] - inserted_keys = list(sorted(coll.insertOneAndGet(v).key for v in data)) - fetched_keys = list(sorted(doc.key for doc in coll.find().getCursor())) - self.assertEqual(fetched_keys, inserted_keys) - - def test_3409(self): - "3409 - test removing multiple documents using multiple keys" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestRemoveMultipleDocs") - data = [ - {"name": "John", "address": {"city": "Bangalore"}}, - {"name": "Johnson", "address": {"city": "Banaras"}}, - {"name": "Joseph", "address": {"city": "Mangalore"}}, - {"name": "Jibin", "address": {"city": "Secunderabad"}}, - {"name": "Andrew", "address": {"city": "Hyderabad"}}, - {"name": "Matthew", "address": {"city": "Mumbai"}}, - ] - docs = [coll.insertOneAndGet(v) for v in data] - keys = [docs[i].key for i in (1, 3, 5)] - num_removed = coll.find().keys(keys).remove() - self.assertEqual(num_removed, len(keys)) - self.assertEqual(coll.find().count(), len(data) - len(keys)) - self.conn.commit() - - def test_3410(self): - "3410 - test using version to get documents and remove them" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestDocumentVersion") - content = {"name": "John", "address": {"city": "Bangalore"}} - inserted_doc = coll.insertOneAndGet(content) - key = inserted_doc.key - version = inserted_doc.version - doc = coll.find().key(key).version(version).getOne().getContent() - self.__normalize_docs([doc]) - self.assertEqual(doc, content) - new_content = {"name": "James", "address": {"city": "Delhi"}} - replaced_doc = coll.find().key(key).replaceOneAndGet(new_content) - new_version = replaced_doc.version - doc = coll.find().key(key).version(version).getOne() - self.assertIsNone(doc) - doc = coll.find().key(key).version(new_version).getOne().getContent() - self.__normalize_docs([doc]) - self.assertEqual(doc, new_content) - self.assertEqual(coll.find().key(key).version(version).remove(), 0) - self.assertEqual(coll.find().key(key).version(new_version).remove(), 1) - self.assertEqual(coll.find().count(), 0) - self.conn.commit() - - def test_3411(self): - "3411 - test keys with GetCursor" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestKeysWithGetCursor") - values_to_insert = [ - {"name": "John", "address": {"city": "Bangalore"}}, - {"name": "Johnson", "address": {"city": "Banaras"}}, - {"name": "Joseph", "address": {"city": "Mangalore"}}, - {"name": "Jibin", "address": {"city": "Secunderabad"}}, - {"name": "Andrew", "address": {"city": "Hyderabad"}}, - {"name": "Matthew", "address": {"city": "Mumbai"}}, - ] - docs = [coll.insertOneAndGet(value) for value in values_to_insert] - keys = [docs[i].key for i in (2, 4, 5)] - fetched_keys = [doc.key for doc in coll.find().keys(keys).getCursor()] - self.assertEqual(list(sorted(fetched_keys)), list(sorted(keys))) - self.conn.commit() - - def test_3412(self): - "3412 - test createdOn attribute of Document" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("CreatedOn") - data = {"name": "John", "address": {"city": "Bangalore"}} - doc = coll.insertOneAndGet(data) - self.assertEqual(doc.createdOn, doc.lastModified) - - @unittest.skipUnless(test_env.has_client_version(20), "unsupported client") - def test_3413(self): - "3413 - test Soda truncate" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestTruncateDocs") - values_to_insert = [ - {"name": "George", "age": 47}, - {"name": "Susan", "age": 39}, - {"name": "John", "age": 50}, - {"name": "Jill", "age": 54}, - ] - for value in values_to_insert: - coll.insertOne(value) - self.conn.commit() - self.assertEqual(coll.find().count(), len(values_to_insert)) - coll.truncate() - self.assertEqual(coll.find().count(), 0) - - @unittest.skipUnless( - test_env.has_client_version(19, 11), - "client version not supported.. min required 19.11", - ) - def test_3414(self): - "3414 - verify hints are reflected in the executed SQL statement" - soda_db = self.get_soda_database() - cursor = self.conn.cursor() - statement = """ - SELECT - ( SELECT t2.sql_fulltext - FROM v$sql t2 - WHERE t2.sql_id = t1.prev_sql_id - AND t2.child_number = t1.prev_child_number - ) - FROM v$session t1 - WHERE t1.audsid = sys_context('userenv', 'sessionid')""" - coll = soda_db.createCollection("TestSodaHint") - values_to_insert = [ - {"name": "George", "age": 47}, - {"name": "Susan", "age": 39}, - ] - coll.insertOneAndGet(values_to_insert[0], hint="MONITOR") - cursor.execute(statement) - (result,) = cursor.fetchone() - self.assertIn("MONITOR", result.read()) - - coll.find().hint("MONITOR").getOne().getContent() - cursor.execute(statement) - (result,) = cursor.fetchone() - self.assertIn("MONITOR", result.read()) - - coll.insertOneAndGet(values_to_insert[1], hint="NO_MONITOR") - cursor.execute(statement) - (result,) = cursor.fetchone() - self.assertIn("NO_MONITOR", result.read()) - - def test_3415(self): - "3415 - test error for invalid type for soda hint" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("InvalidSodaHint") - self.assertRaises( - TypeError, coll.insertOneAndGet, dict(name="Fred", age=16), hint=5 - ) - self.assertRaises( - TypeError, - coll.insertManyAndGet, - dict(name="George", age=25), - hint=10, - ) - self.assertRaises( - TypeError, coll.saveAndGet, dict(name="Sally", age=36), hint=5 - ) - self.assertRaises(TypeError, coll.find().hint, 2) - - def test_3416(self): - "3416 - test name and metadata attribute" - soda_db = self.get_soda_database() - collection_name = "TestCollectionMetadata" - coll = soda_db.createCollection(collection_name) - self.assertEqual(coll.name, collection_name) - self.assertEqual(coll.metadata["tableName"], collection_name) - - def test_3417(self): - "3417 - test insertMany" - soda_db = self.get_soda_database(minclient=(18, 5)) - coll = soda_db.createCollection("TestInsertMany") - values_to_insert = [ - dict(name="George", age=25), - soda_db.createDocument(dict(name="Lucas", age=47)), - ] - coll.insertMany(values_to_insert) - self.conn.commit() - fetched_values = [doc.getContent() for doc in coll.find().getCursor()] - fetched_values.sort(key=lambda x: x["name"]) - for fetched_val, expected_val in zip(fetched_values, values_to_insert): - if not isinstance(expected_val, dict): - expected_val = expected_val.getContent() - self.assertEqual(fetched_val, fetched_val) - with self.assertRaisesFullCode("DPI-1031"): - coll.insertMany([]) - - @unittest.skipIf( - test_env.has_client_version(23), - "save() is not implemented in Oracle Database 23ai", + assert coll.dropIndex(index_name) + assert not coll.dropIndex(index_name) + + +def test_3407(soda_db, conn): + "3407 - test getting documents from Collection" + conn.autocommit = True + coll = soda_db.createCollection("TestGetDocs") + data = [ + {"name": "John", "address": {"city": "Bangalore"}}, + {"name": "Johnson", "address": {"city": "Banaras"}}, + {"name": "Joseph", "address": {"city": "Mangalore"}}, + {"name": "Jibin", "address": {"city": "Secunderabad"}}, + {"name": "Andrew", "address": {"city": "Hyderabad"}}, + ] + inserted_keys = list(sorted(coll.insertOneAndGet(v).key for v in data)) + fetched_keys = list(sorted(doc.key for doc in coll.find().getDocuments())) + assert fetched_keys == inserted_keys + + +def test_3408(soda_db, conn): + "3408 - test fetching documents from a cursor" + conn.autocommit = True + coll = soda_db.createCollection("TestFindViaCursor") + data = [ + {"name": "John", "address": {"city": "Bangalore"}}, + {"name": "Johnson", "address": {"city": "Banaras"}}, + {"name": "Joseph", "address": {"city": "Mangalore"}}, + ] + inserted_keys = list(sorted(coll.insertOneAndGet(v).key for v in data)) + fetched_keys = list(sorted(doc.key for doc in coll.find().getCursor())) + assert fetched_keys == inserted_keys + + +def test_3409(soda_db, conn): + "3409 - test removing multiple documents using multiple keys" + coll = soda_db.createCollection("TestRemoveMultipleDocs") + data = [ + {"name": "John", "address": {"city": "Bangalore"}}, + {"name": "Johnson", "address": {"city": "Banaras"}}, + {"name": "Joseph", "address": {"city": "Mangalore"}}, + {"name": "Jibin", "address": {"city": "Secunderabad"}}, + {"name": "Andrew", "address": {"city": "Hyderabad"}}, + {"name": "Matthew", "address": {"city": "Mumbai"}}, + ] + docs = [coll.insertOneAndGet(v) for v in data] + keys = [docs[i].key for i in (1, 3, 5)] + num_removed = coll.find().keys(keys).remove() + assert num_removed == len(keys) + assert coll.find().count() == len(data) - len(keys) + conn.commit() + + +def test_3410(soda_db, conn): + "3410 - test using version to get documents and remove them" + coll = soda_db.createCollection("TestDocumentVersion") + content = {"name": "John", "address": {"city": "Bangalore"}} + inserted_doc = coll.insertOneAndGet(content) + key = inserted_doc.key + version = inserted_doc.version + doc = coll.find().key(key).version(version).getOne().getContent() + _normalize_docs([doc]) + assert doc == content + new_content = {"name": "James", "address": {"city": "Delhi"}} + replaced_doc = coll.find().key(key).replaceOneAndGet(new_content) + new_version = replaced_doc.version + doc = coll.find().key(key).version(version).getOne() + assert doc is None + doc = coll.find().key(key).version(new_version).getOne().getContent() + _normalize_docs([doc]) + assert doc == new_content + assert coll.find().key(key).version(version).remove() == 0 + assert coll.find().key(key).version(new_version).remove() == 1 + assert coll.find().count() == 0 + conn.commit() + + +def test_3411(soda_db, conn): + "3411 - test keys with GetCursor" + coll = soda_db.createCollection("TestKeysWithGetCursor") + values_to_insert = [ + {"name": "John", "address": {"city": "Bangalore"}}, + {"name": "Johnson", "address": {"city": "Banaras"}}, + {"name": "Joseph", "address": {"city": "Mangalore"}}, + {"name": "Jibin", "address": {"city": "Secunderabad"}}, + {"name": "Andrew", "address": {"city": "Hyderabad"}}, + {"name": "Matthew", "address": {"city": "Mumbai"}}, + ] + docs = [coll.insertOneAndGet(value) for value in values_to_insert] + keys = [docs[i].key for i in (2, 4, 5)] + fetched_keys = [doc.key for doc in coll.find().keys(keys).getCursor()] + assert list(sorted(fetched_keys)) == list(sorted(keys)) + conn.commit() + + +def test_3412(soda_db): + "3412 - test createdOn attribute of Document" + coll = soda_db.createCollection("CreatedOn") + data = {"name": "John", "address": {"city": "Bangalore"}} + doc = coll.insertOneAndGet(data) + assert doc.createdOn == doc.lastModified + + +def test_3413(soda_db, conn, test_env): + "3413 - test Soda truncate" + if not test_env.has_client_version(20): + pytest.skip("unsupported client") + coll = soda_db.createCollection("TestTruncateDocs") + values_to_insert = [ + {"name": "George", "age": 47}, + {"name": "Susan", "age": 39}, + {"name": "John", "age": 50}, + {"name": "Jill", "age": 54}, + ] + for value in values_to_insert: + coll.insertOne(value) + conn.commit() + assert coll.find().count() == len(values_to_insert) + coll.truncate() + assert coll.find().count() == 0 + + +def test_3414(soda_db, cursor, test_env): + "3414 - verify hints are reflected in the executed SQL statement" + if not test_env.has_client_version(19, 11): + pytest.skip("client version not supported") + statement = """ + SELECT + ( SELECT t2.sql_fulltext + FROM v$sql t2 + WHERE t2.sql_id = t1.prev_sql_id + AND t2.child_number = t1.prev_child_number + ) + FROM v$session t1 + WHERE t1.audsid = sys_context('userenv', 'sessionid')""" + coll = soda_db.createCollection("TestSodaHint") + values_to_insert = [ + {"name": "George", "age": 47}, + {"name": "Susan", "age": 39}, + ] + coll.insertOneAndGet(values_to_insert[0], hint="MONITOR") + cursor.execute(statement) + (result,) = cursor.fetchone() + assert "MONITOR" in result.read() + + coll.find().hint("MONITOR").getOne().getContent() + cursor.execute(statement) + (result,) = cursor.fetchone() + assert "MONITOR" in result.read() + + coll.insertOneAndGet(values_to_insert[1], hint="NO_MONITOR") + cursor.execute(statement) + (result,) = cursor.fetchone() + assert "NO_MONITOR" in result.read() + + +def test_3415(soda_db): + "3415 - test error for invalid type for soda hint" + coll = soda_db.createCollection("InvalidSodaHint") + pytest.raises( + TypeError, coll.insertOneAndGet, dict(name="Fred", age=16), hint=5 ) - def test_3418(self): - "3418 - test save" - soda_db = self.get_soda_database(minclient=(19, 9)) - coll = soda_db.createCollection("TestSodaSave") - values_to_save = [ - dict(name="Jill", age=37), - soda_db.createDocument(dict(name="John", age=7)), - soda_db.createDocument(dict(name="Charles", age=24)), - ] - for value in values_to_save: - coll.save(value) - self.conn.commit() - fetched_docs = coll.find().getDocuments() - for fetched_doc, expected_doc in zip(fetched_docs, values_to_save): - if isinstance(expected_doc, dict): - expected_doc = soda_db.createDocument(expected_doc) - self.assertEqual( - fetched_doc.getContent(), expected_doc.getContent() - ) - - @unittest.skipIf( - test_env.has_client_version(23), - "save() is not implemented in Oracle Database 23ai", + pytest.raises( + TypeError, + coll.insertManyAndGet, + dict(name="George", age=25), + hint=10, ) - def test_3419(self): - "3419 - test saveAndGet with hint" - soda_db = self.get_soda_database(minclient=(19, 11)) - cursor = self.conn.cursor() - statement = """ - SELECT - ( SELECT t2.sql_fulltext - FROM v$sql t2 - WHERE t2.sql_id = t1.prev_sql_id - AND t2.child_number = t1.prev_child_number - ) - FROM v$session t1 - WHERE t1.audsid = sys_context('userenv', 'sessionid')""" - coll = soda_db.createCollection("TestSodaSaveWithHint") - - values_to_save = [ - dict(name="Jordan", age=59), - dict(name="Curry", age=34), - ] - hints = ["MONITOR", "NO_MONITOR"] - for value, hint in zip(values_to_save, hints): - coll.saveAndGet(value, hint=hint) - coll.find().hint(hint).getOne().getContent() - cursor.execute(statement) - (result,) = cursor.fetchone() - self.assertIn(hint, result.read()) - - @unittest.skipIf( - test_env.has_client_version(23), - "save() is not implemented in Oracle Database 23ai", + pytest.raises( + TypeError, coll.saveAndGet, dict(name="Sally", age=36), hint=5 ) - def test_3420(self): - "3420 - test saveAndGet" - soda_db = self.get_soda_database(minclient=(19, 9)) - coll = soda_db.createCollection("TestSodaSaveAndGet") - values_to_save = [ - dict(name="John", age=50), - soda_db.createDocument(dict(name="Mark", age=45)), - soda_db.createDocument(dict(name="Jill", age=32)), - ] - inserted_keys = [] - for value in values_to_save: - doc = coll.saveAndGet(value) - inserted_keys.append(doc.key) - fetched_docs = coll.find().getDocuments() - self.conn.commit() - self.assertEqual(coll.find().count(), len(values_to_save)) - for key, fetched_doc in zip(inserted_keys, fetched_docs): - doc = coll.find().key(key).getOne() - self.assertEqual(doc.getContent(), fetched_doc.getContent()) - - def test_3421(self): - "3421 - test insert many and get" - soda_db = self.get_soda_database(minclient=(18, 5)) - for name in soda_db.getCollectionNames(): - soda_db.openCollection(name).drop() - coll = soda_db.createCollection("TestInsertManyAndGet") - values_to_insert = [ - dict(name="George", age=25), - soda_db.createDocument(dict(name="Lucas", age=47)), - ] - docs = coll.insertManyAndGet(values_to_insert) - inserted_keys = [doc.key for doc in docs] - self.conn.commit() - self.assertEqual(coll.find().count(), len(values_to_insert)) - for key, expected_doc in zip(inserted_keys, values_to_insert): - if isinstance(expected_doc, dict): - expected_doc = soda_db.createDocument(expected_doc) - doc = coll.find().key(key).getOne().getContent() - self.__normalize_docs([doc]) - self.assertEqual(doc, expected_doc.getContent()) - - def test_3422(self): - "3422 - close document cursor and confirm exception is raised" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestCloseSodaDocCursor") - cursor = coll.find().getCursor() + pytest.raises(TypeError, coll.find().hint, 2) + + +def test_3416(soda_db): + "3416 - test name and metadata attribute" + collection_name = "TestCollectionMetadata" + coll = soda_db.createCollection(collection_name) + assert coll.name == collection_name + assert coll.metadata["tableName"] == collection_name + + +def test_3417(soda_db, conn, test_env): + "3417 - test insertMany" + if not test_env.has_client_version(18, 5): + pytest.skip("unsupported client") + coll = soda_db.createCollection("TestInsertMany") + values_to_insert = [ + dict(name="George", age=25), + soda_db.createDocument(dict(name="Lucas", age=47)), + ] + coll.insertMany(values_to_insert) + conn.commit() + fetched_values = [doc.getContent() for doc in coll.find().getCursor()] + fetched_values.sort(key=lambda x: x["name"]) + for fetched_val, expected_val in zip(fetched_values, values_to_insert): + if not isinstance(expected_val, dict): + expected_val = expected_val.getContent() + assert fetched_val == fetched_val + with test_env.assert_raises_full_code("DPI-1031"): + coll.insertMany([]) + + +def test_3418(skip_if_save_not_supported, soda_db, conn, test_env): + "3418 - test save" + coll = soda_db.createCollection("TestSodaSave") + values_to_save = [ + dict(name="Jill", age=37), + soda_db.createDocument(dict(name="John", age=7)), + soda_db.createDocument(dict(name="Charles", age=24)), + ] + for value in values_to_save: + coll.save(value) + conn.commit() + fetched_docs = coll.find().getDocuments() + for fetched_doc, expected_doc in zip(fetched_docs, values_to_save): + if isinstance(expected_doc, dict): + expected_doc = soda_db.createDocument(expected_doc) + assert fetched_doc.getContent() == expected_doc.getContent() + + +def test_3419(skip_if_save_not_supported, soda_db, cursor, test_env): + "3419 - test saveAndGet with hint" + statement = """ + SELECT + ( SELECT t2.sql_fulltext + FROM v$sql t2 + WHERE t2.sql_id = t1.prev_sql_id + AND t2.child_number = t1.prev_child_number + ) + FROM v$session t1 + WHERE t1.audsid = sys_context('userenv', 'sessionid')""" + coll = soda_db.createCollection("TestSodaSaveWithHint") + + values_to_save = [ + dict(name="Jordan", age=59), + dict(name="Curry", age=34), + ] + hints = ["MONITOR", "NO_MONITOR"] + for value, hint in zip(values_to_save, hints): + coll.saveAndGet(value, hint=hint) + coll.find().hint(hint).getOne().getContent() + cursor.execute(statement) + (result,) = cursor.fetchone() + assert hint in result.read() + + +def test_3420(skip_if_save_not_supported, soda_db, conn): + "3420 - test saveAndGet" + coll = soda_db.createCollection("TestSodaSaveAndGet") + values_to_save = [ + dict(name="John", age=50), + soda_db.createDocument(dict(name="Mark", age=45)), + soda_db.createDocument(dict(name="Jill", age=32)), + ] + inserted_keys = [] + for value in values_to_save: + doc = coll.saveAndGet(value) + inserted_keys.append(doc.key) + fetched_docs = coll.find().getDocuments() + conn.commit() + assert coll.find().count() == len(values_to_save) + for key, fetched_doc in zip(inserted_keys, fetched_docs): + doc = coll.find().key(key).getOne() + assert doc.getContent() == fetched_doc.getContent() + + +def test_3421(soda_db, conn, test_env): + "3421 - test insert many and get" + if not test_env.has_client_version(18, 5): + pytest.skip("unsupported client") + for name in soda_db.getCollectionNames(): + soda_db.openCollection(name).drop() + coll = soda_db.createCollection("TestInsertManyAndGet") + values_to_insert = [ + dict(name="George", age=25), + soda_db.createDocument(dict(name="Lucas", age=47)), + ] + docs = coll.insertManyAndGet(values_to_insert) + inserted_keys = [doc.key for doc in docs] + conn.commit() + assert coll.find().count() == len(values_to_insert) + for key, expected_doc in zip(inserted_keys, values_to_insert): + if isinstance(expected_doc, dict): + expected_doc = soda_db.createDocument(expected_doc) + doc = coll.find().key(key).getOne().getContent() + _normalize_docs([doc]) + assert doc == expected_doc.getContent() + + +def test_3422(soda_db, test_env): + "3422 - close document cursor and confirm exception is raised" + coll = soda_db.createCollection("TestCloseSodaDocCursor") + cursor = coll.find().getCursor() + cursor.close() + with test_env.assert_raises_full_code("DPY-1006"): cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - next(cursor) - - def test_3423(self): - "3423 - test limit to get specific amount of documents" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaLimit") - values_to_insert = [{"group": "Camila"} for i in range(20)] - coll.insertMany(values_to_insert) - self.conn.commit() - docs = coll.find().getDocuments() - self.assertEqual(len(docs), len(values_to_insert)) - docs = coll.find().limit(3).getDocuments() - self.assertEqual(len(docs), 3) - - def test_3424(self): - "3424 - get count exceptions when using limit and skip" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaCountExceptions") - data = [{"song": "WYMCA"} for i in range(20)] - coll.insertMany(data) - self.conn.commit() - with self.assertRaisesFullCode("ORA-40748"): - coll.find().limit(5).count() - with self.assertRaisesFullCode("ORA-40748"): - coll.find().skip(10).count() - - @unittest.skipIf( - test_env.has_client_version(23), - "map mode not supported with native collections in Oracle Database 23", - ) - def test_3425(self): - "3425 - test mapMode parameter" - soda_db = self.get_soda_database() - data = [{"price": 4900}, {"price": 8}] - expected_data = data * 2 - - original_coll = soda_db.createCollection("TestCollMapMode") - original_coll.insertMany(data) - mapped_coll = soda_db.createCollection("TestCollMapMode", mapMode=True) - mapped_coll.insertMany(data) - - for coll in [original_coll, mapped_coll]: - fetched_data = list( - doc.getContent() for doc in coll.find().getDocuments() - ) - self.__normalize_docs(fetched_data) - self.assertEqual(fetched_data, expected_data) - with self.assertRaisesFullCode("ORA-40626"): - coll.drop() - self.conn.commit() - self.assertTrue(original_coll.drop()) - self.assertFalse(mapped_coll.drop()) - - @unittest.skipIf( - test_env.has_client_version(23), - "map mode not supported with native collections in Oracle Database 23", - ) - def test_3426(self): - "3426 - test mapping a new collection from an non-existent table" - soda_db = self.get_soda_database() - with self.assertRaisesFullCode("ORA-40623"): - soda_db.createCollection("TestSodaMapNonExistent", mapMode=True) - - def test_3427(self): - "3427 - test negative cases for SodaOperation methods" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaOperationNegative") - self.assertRaises(TypeError, coll.find().filter, 5) - self.assertRaises(TypeError, coll.find().key, 2) - self.assertRaises(TypeError, coll.find().keys, [1, 2, 3]) - self.assertRaises(TypeError, coll.find().skip, "word") - self.assertRaises(TypeError, coll.find().skip, -5) - self.assertRaises(TypeError, coll.find().version, 1971) - self.assertRaises(TypeError, coll.find().limit, "a word") - - def test_3428(self): - "3428 - test fetchArraySize" - soda_db = self.get_soda_database(minclient=(19, 5)) - coll = soda_db.createCollection("TestSodaFetchArraySize") - for i in range(90): - coll.insertOne({"name": "Emmanuel", "age": i + 1}) - self.conn.commit() - - self.setup_round_trip_checker() - # setting array size to 0 will use the default value of 100 - # requires a single round-trip - coll.find().fetchArraySize(0).getDocuments() - self.assertRoundTrips(1) - - # setting array size to 1 requires a round-trip for each SodaDoc - coll.find().fetchArraySize(1).getDocuments() - self.assertRoundTrips(91) - - # setting array size to 20 requires 5 round-trips - coll.find().fetchArraySize(20).getDocuments() - self.assertRoundTrips(5) - - # getting a SodaDocCursor requires a round-trip - coll.find().fetchArraySize(0).getCursor() - self.assertRoundTrips(1) - - # setting array size to 1 and iterating the SodaDocCursor requires a - # round-trip for each SodaDoc - soda_doc_cursor = coll.find().fetchArraySize(1).getCursor() - for soda_doc in soda_doc_cursor: - continue - self.assertRoundTrips(91) - - # setting array size to 50 and iterating the SodaDocCursor requires - # two round-trips - soda_doc_cursor = coll.find().fetchArraySize(50).getCursor() - for soda_doc in soda_doc_cursor: - continue - self.assertRoundTrips(2) - - # check a few negative scenarios - self.assertRaises(TypeError, coll.find().fetchArraySize, "Mijares") - self.assertRaises(TypeError, coll.find().fetchArraySize, -1) - - def test_3429(self): - "3429 - test getting indexes on a collection" - soda_db = self.get_soda_database(minclient=(19, 13)) - coll = soda_db.createCollection("TestSodaListIndexes") - index_1 = { - "name": "ix_3428-1", - "fields": [ - {"path": "address.city", "datatype": "string", "order": "asc"} - ], - } - index_2 = { - "name": "ix_3428-2", - "fields": [ - { - "path": "address.postal_code", - "datatype": "string", - "order": "asc", - } - ], - } - self.assertEqual(coll.listIndexes(), []) - coll.createIndex(index_1) - coll.createIndex(index_2) - indexes = coll.listIndexes() - indexes.sort(key=lambda x: x["name"]) - self.assertEqual(indexes[0]["fields"][0]["path"], "address.city") - self.assertEqual( - indexes[1]["fields"][0]["path"], "address.postal_code" - ) - - def test_3430(self): - "3430 - test locking documents on fetch" - soda_db = self.get_soda_database(minclient=(19, 11)) - coll = soda_db.createCollection("TestSodaLockDocs") - values_to_insert = [ - {"name": "Bob", "age": 46}, - {"name": "Barb", "age": 45}, - {"name": "Sandy", "age": 47}, - ] - coll.insertMany(values_to_insert) - coll.find().lock().getDocuments() - - def test_3431(self): - "3431 - test that drop returns the correct boolean" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestDropCollection") - self.assertTrue(coll.drop()) - - # the collection has already been dropped - self.assertFalse(coll.drop()) - - @unittest.skipIf( - test_env.has_client_version(23), - "map mode not supported with native collections in Oracle Database 23", - ) - def test_3432(self): - "3432 - test drop with an empty mapped collection" - soda_db = self.get_soda_database() - original_coll = soda_db.createCollection("TestDropMapMode") - mapped_coll = soda_db.createCollection("TestDropMapMode", mapMode=True) - self.assertTrue(mapped_coll.drop()) - self.assertFalse(original_coll.drop()) - - def test_3433(self): - "3433 - test that replaceOne() returns a correct boolean" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestReplaceDocReturns") - doc = coll.insertOneAndGet({"address": {"city": "Sydney"}}) - - new_content = {"address": {"city": "Melbourne"}} - self.assertTrue(coll.find().key(doc.key).replaceOne(new_content)) - - unregistered_key = "DB4A2628F1E0985C891F3F4836" - self.assertFalse( - coll.find().key(unregistered_key).replaceOne(new_content) + with test_env.assert_raises_full_code("DPY-1006"): + next(cursor) + + +def test_3423(soda_db, conn): + "3423 - test limit to get specific amount of documents" + coll = soda_db.createCollection("TestSodaLimit") + values_to_insert = [{"group": "Camila"} for i in range(20)] + coll.insertMany(values_to_insert) + conn.commit() + docs = coll.find().getDocuments() + assert len(docs) == len(values_to_insert) + docs = coll.find().limit(3).getDocuments() + assert len(docs) == 3 + + +def test_3424(soda_db, conn, test_env): + "3424 - get count exceptions when using limit and skip" + coll = soda_db.createCollection("TestSodaCountExceptions") + data = [{"song": "WYMCA"} for i in range(20)] + coll.insertMany(data) + conn.commit() + with test_env.assert_raises_full_code("ORA-40748"): + coll.find().limit(5).count() + with test_env.assert_raises_full_code("ORA-40748"): + coll.find().skip(10).count() + + +def test_3425(skip_if_map_mode_not_supported, soda_db, conn, test_env): + "3425 - test mapMode parameter" + data = [{"price": 4900}, {"price": 8}] + expected_data = data * 2 + + original_coll = soda_db.createCollection("TestCollMapMode") + original_coll.insertMany(data) + mapped_coll = soda_db.createCollection("TestCollMapMode", mapMode=True) + mapped_coll.insertMany(data) + + for coll in [original_coll, mapped_coll]: + fetched_data = list( + doc.getContent() for doc in coll.find().getDocuments() ) - self.conn.commit() - - def test_3434(self): - "3434 - replaceOne() and replaceOneAndGet() with invalid scenarios" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestReplaceOneNegative") - coll.insertMany([{"Wisdom": 1.7} for d in range(2)]) - keys = [d.key for d in coll.find().getDocuments()] - with self.assertRaisesFullCode("ORA-40734"): - coll.find().keys(keys).replaceOne({"data": "new"}) - with self.assertRaisesFullCode("ORA-40734"): - coll.find().keys(keys).replaceOneAndGet({"data": "new"}) - - @unittest.skipUnless( - test_env.has_client_version(19, 9), - "client version not supported.. min required 19.9", - ) - def test_3435(self): - "3435 - test writting a read-only collection" - soda_db = self.get_soda_database() - metadata = { - "readOnly": True, - } - coll = soda_db.createCollection("TestCollReadOnly", metadata) - - methods = [ - coll.insertOne, - coll.insertOneAndGet, - coll.insertMany, - coll.insertManyAndGet, - coll.save, - coll.saveAndGet, - ] - for method in methods: - with self.subTest(method=method): - with self.assertRaisesFullCode("ORA-40663"): - method({"Song 1": "No end"}) - - def test_3436(self): - "3436 - createCollection() with the same name and metadata" - soda_db = self.get_soda_database() - coll_name = "TestCollSameMetadata" - coll1 = soda_db.createCollection(coll_name, {"readOnly": True}) - coll2 = soda_db.createCollection(coll_name, {"readOnly": True}) - self.assertTrue(coll1.drop()) - self.assertFalse(coll2.drop()) - - def test_3437(self): - "3437 - createCollection() with the same name but different metadata" - soda_db = self.get_soda_database() - coll_name = "TestCollDifferentMetadata" - coll = soda_db.createCollection(coll_name) - with self.assertRaisesFullCode("ORA-40669"): - soda_db.createCollection(coll_name, {"readOnly": False}) - coll.drop() - - coll = soda_db.createCollection(coll_name, {"readOnly": True}) - with self.assertRaisesFullCode("ORA-40669"): - soda_db.createCollection(coll_name, {"readOnly": False}) - - def test_3438(self): - "3438 - test getDataGuide() with an index with data-guide support" - self.conn = test_env.get_connection() - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaDataGuideEnabled") - data = [ + _normalize_docs(fetched_data) + assert fetched_data == expected_data + with test_env.assert_raises_full_code("ORA-40626"): + coll.drop() + conn.commit() + assert original_coll.drop() + assert not mapped_coll.drop() + + +def test_3426(skip_if_map_mode_not_supported, soda_db, test_env): + "3426 - test mapping a new collection from an non-existent table" + with test_env.assert_raises_full_code("ORA-40623"): + soda_db.createCollection("TestSodaMapNonExistent", mapMode=True) + + +def test_3427(soda_db): + "3427 - test negative cases for SodaOperation methods" + coll = soda_db.createCollection("TestSodaOperationNegative") + pytest.raises(TypeError, coll.find().filter, 5) + pytest.raises(TypeError, coll.find().key, 2) + pytest.raises(TypeError, coll.find().keys, [1, 2, 3]) + pytest.raises(TypeError, coll.find().skip, "word") + pytest.raises(TypeError, coll.find().skip, -5) + pytest.raises(TypeError, coll.find().version, 1971) + pytest.raises(TypeError, coll.find().limit, "a word") + + +def test_3428(soda_db, conn, round_trip_checker, test_env): + "3428 - test fetchArraySize" + if not test_env.has_client_version(19, 5): + pytest.skip("unsupported client") + coll = soda_db.createCollection("TestSodaFetchArraySize") + for i in range(90): + coll.insertOne({"name": "Emmanuel", "age": i + 1}) + conn.commit() + + # setting array size to 0 will use the default value of 100 + # requires a single round-trip + round_trip_checker.get_value() + coll.find().fetchArraySize(0).getDocuments() + assert round_trip_checker.get_value() == 1 + + # setting array size to 1 requires a round-trip for each SodaDoc + coll.find().fetchArraySize(1).getDocuments() + assert round_trip_checker.get_value() == 91 + + # setting array size to 20 requires 5 round-trips + coll.find().fetchArraySize(20).getDocuments() + assert round_trip_checker.get_value() == 5 + + # getting a SodaDocCursor requires a round-trip + coll.find().fetchArraySize(0).getCursor() + assert round_trip_checker.get_value() == 1 + + # setting array size to 1 and iterating the SodaDocCursor requires a + # round-trip for each SodaDoc + soda_doc_cursor = coll.find().fetchArraySize(1).getCursor() + for soda_doc in soda_doc_cursor: + continue + assert round_trip_checker.get_value() == 91 + + # setting array size to 50 and iterating the SodaDocCursor requires + # two round-trips + soda_doc_cursor = coll.find().fetchArraySize(50).getCursor() + for soda_doc in soda_doc_cursor: + continue + assert round_trip_checker.get_value() == 2 + + # check a few negative scenarios + pytest.raises(TypeError, coll.find().fetchArraySize, "Mijares") + pytest.raises(TypeError, coll.find().fetchArraySize, -1) + + +def test_3429(soda_db): + "3429 - test getting indexes on a collection" + coll = soda_db.createCollection("TestSodaListIndexes") + index_1 = { + "name": "ix_3428-1", + "fields": [ + {"path": "address.city", "datatype": "string", "order": "asc"} + ], + } + index_2 = { + "name": "ix_3428-2", + "fields": [ { - "team": "backend", - "created_in": 2001, - "members": [{"developer": "Joseph"}, {"tester": "Mark"}], - }, - {"team": "frontend", "area": "user interface"}, - ] - coll.insertMany(data) - self.conn.commit() - index = { - "name": "ix_3438", - "dataguide": "on", - } - coll.createIndex(index) - - data_guide = coll.getDataGuide().getContent() - - if test_env.has_client_version(23, 4) and test_env.has_server_version( - 23, 4 - ): - self.assertEqual(data_guide["properties"]["_id"]["type"], "id") - - values = [ - ("team", "string"), - ("created_in", "number"), - ("area", "string"), - ] - for name, typ in values: - self.assertEqual(data_guide["properties"][name]["type"], typ) - self.assertRegex( - data_guide["properties"][name]["o:preferred_column_name"], - f"(JSON_DOCUMENT|DATA)\\${name}", - ) - self.assertEqual(data_guide["properties"]["members"]["type"], "array") - - members_values = [ - ("tester", "string", 4), - ("developer", "string", 8), - ] - for name, typ, length in members_values: - members_items = data_guide["properties"]["members"]["items"] - self.assertEqual(members_items["properties"][name]["type"], typ) - self.assertEqual( - members_items["properties"][name]["o:length"], length - ) - self.assertRegex( - members_items["properties"][name]["o:preferred_column_name"], - f"(JSON_DOCUMENT|DATA)\\${name}", - ) - - def test_3439(self): - "3439 - test getDataGuide() with an index without data-guide support" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaDataGuideDisabled") - - coll.insertOne({"data": "test_3439"}) - self.conn.commit() - index = { - "name": "ix-3439", - "dataguide": "off", - } - coll.createIndex(index) - with self.assertRaisesFullCode("ORA-40582"): - coll.getDataGuide() - - def test_3440(self): - "3440 - test getDataGuide() with an empty collection" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestDataGuideWithEmptyColl") - coll.createIndex({"name": "ix_3440", "dataguide": "on"}) - self.assertIsNone(coll.getDataGuide()) - - def test_3441(self): - "3441 - test getDataGuide() without a json search index" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestSodaDataGuideWithoutIndex") - with self.assertRaisesFullCode("ORA-40582"): - coll.getDataGuide() - - def test_3442(self): - "3442 - test mapMode parameter with metadata" - soda_db = self.get_soda_database() - data = [{"price": 4900}, {"price": 8}] - expected_data = data * 2 - coll_name = "TestCollOriginal" - - metadata = { - "tableName": coll_name, - "keyColumn": {"name": "ID"}, - "contentColumn": {"name": "JSON_DOCUMENT", "sqlType": "BLOB"}, - "versionColumn": {"name": "VERSION", "method": "UUID"}, - "lastModifiedColumn": {"name": "LAST_MODIFIED"}, - "creationTimeColumn": {"name": "CREATED_ON"}, - } - - original_coll = soda_db.createCollection(coll_name, metadata=metadata) - original_coll.insertMany(data) - mapped_coll = soda_db.createCollection( - "TestCollMapMode", metadata=metadata, mapMode=True - ) - mapped_coll.insertMany(data) - - for coll in [original_coll, mapped_coll]: - fetched_data = list( - doc.getContent() for doc in coll.find().getDocuments() - ) - self.__normalize_docs(fetched_data) - self.assertEqual(fetched_data, expected_data) - - with self.assertRaisesFullCode("ORA-40626"): - original_coll.drop() - self.assertTrue(mapped_coll.drop()) - self.conn.commit() - self.assertTrue(original_coll.drop()) - self.assertFalse(mapped_coll.drop()) - - def test_3443(self): - "3443 - test mapping a new collection from an non-existent table" - soda_db = self.get_soda_database() - metadata = {"tableName": "TestNone"} - with self.assertRaisesFullCode("ORA-40623"): - soda_db.createCollection( - "TestSodaMapNonExistent", metadata=metadata, mapMode=True - ) - - def test_3444(self): - "3444 - test collections with mixture of media types" - soda_db = self.get_soda_database() - metadata = dict(mediaTypeColumn=dict(name="media_type")) - coll = soda_db.createCollection("TestMixedMedia", metadata=metadata) - test_data = [ - (dict(name="George", age=28), "application/json"), - ("Sample Text", "text/plain"), - (b"\x57\x25\xfe\x34\x56", "application/octet-stream"), - ] - for value, media_type in test_data: - coll.find().remove() - coll.insertOne(soda_db.createDocument(value, mediaType=media_type)) - fetched_doc = coll.find().getDocuments()[0] - self.assertEqual(fetched_doc.mediaType, media_type) - if media_type == "application/json": - self.assertEqual(fetched_doc.getContent(), value) - self.assertEqual( - json.loads(fetched_doc.getContentAsString()), value - ) - self.assertEqual( - json.loads(fetched_doc.getContentAsBytes().decode()), value - ) - elif media_type == "text/plain": - self.assertEqual(fetched_doc.getContent(), value.encode()) - self.assertEqual(fetched_doc.getContentAsString(), value) - self.assertEqual( - fetched_doc.getContentAsBytes(), value.encode() - ) - else: - self.assertEqual(fetched_doc.getContent(), value) - self.assertEqual(fetched_doc.getContentAsBytes(), value) - self.assertRaises( - UnicodeDecodeError, fetched_doc.getContentAsString - ) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - or test_env.has_server_version(23, 4), - "unsupported data types", - ) - def test_3445(self): - "3445 - test fetching documents with JSON data using extended types" - soda_db = self.get_soda_database() - val = { - "testKey1": "testValue1", - "testKey2": decimal.Decimal("12.78"), - "testKey3": datetime.datetime(2023, 7, 3, 11, 10, 24), - } - doc = soda_db.createDocument(val) - self.assertEqual(doc.getContent(), val) - coll = soda_db.createCollection("TestJSONExtendedTypes") - coll.insertOne(doc) - fetched_doc = coll.find().getDocuments()[0] - fetched_content = fetched_doc.getContent() - self.__normalize_docs([fetched_content]) - self.assertEqual(fetched_content, val) - self.assertEqual(doc.getContent(), val) - - @unittest.skipUnless( - test_env.has_client_version(23, 4) - or test_env.has_server_version(23, 4), - "unsupported data types", + "path": "address.postal_code", + "datatype": "string", + "order": "asc", + } + ], + } + assert coll.listIndexes() == [] + coll.createIndex(index_1) + coll.createIndex(index_2) + indexes = coll.listIndexes() + indexes.sort(key=lambda x: x["name"]) + assert indexes[0]["fields"][0]["path"] == "address.city" + assert indexes[1]["fields"][0]["path"] == "address.postal_code" + + +def test_3430(soda_db, test_env): + "3430 - test locking documents on fetch" + if not test_env.has_client_version(19, 11): + pytest.skip("unsupported client") + coll = soda_db.createCollection("TestSodaLockDocs") + values_to_insert = [ + {"name": "Bob", "age": 46}, + {"name": "Barb", "age": 45}, + {"name": "Sandy", "age": 47}, + ] + coll.insertMany(values_to_insert) + coll.find().lock().getDocuments() + + +def test_3431(soda_db): + "3431 - test that drop returns the correct boolean" + coll = soda_db.createCollection("TestDropCollection") + assert coll.drop() + + # the collection has already been dropped + assert not coll.drop() + + +def test_3432(skip_if_map_mode_not_supported, soda_db): + "3432 - test drop with an empty mapped collection" + original_coll = soda_db.createCollection("TestDropMapMode") + mapped_coll = soda_db.createCollection("TestDropMapMode", mapMode=True) + assert mapped_coll.drop() + assert not original_coll.drop() + + +def test_3433(soda_db, conn): + "3433 - test that replaceOne() returns a correct boolean" + coll = soda_db.createCollection("TestReplaceDocReturns") + doc = coll.insertOneAndGet({"address": {"city": "Sydney"}}) + + new_content = {"address": {"city": "Melbourne"}} + assert coll.find().key(doc.key).replaceOne(new_content) + + unregistered_key = "DB4A2628F1E0985C891F3F4836" + assert not coll.find().key(unregistered_key).replaceOne(new_content) + conn.commit() + + +def test_3434(soda_db, test_env): + "3434 - replaceOne() and replaceOneAndGet() with invalid scenarios" + coll = soda_db.createCollection("TestReplaceOneNegative") + coll.insertMany([{"Wisdom": 1.7} for d in range(2)]) + keys = [d.key for d in coll.find().getDocuments()] + with test_env.assert_raises_full_code("ORA-40734"): + coll.find().keys(keys).replaceOne({"data": "new"}) + with test_env.assert_raises_full_code("ORA-40734"): + coll.find().keys(keys).replaceOneAndGet({"data": "new"}) + + +def test_3435(soda_db, test_env): + "3435 - test writting a read-only collection" + if not test_env.has_client_version(19, 9): + pytest.skip("client version not supported") + + metadata = { + "readOnly": True, + } + coll = soda_db.createCollection("TestCollReadOnly", metadata) + + methods = [ + coll.insertOne, + coll.insertOneAndGet, + coll.insertMany, + coll.insertManyAndGet, + coll.save, + coll.saveAndGet, + ] + for method in methods: + with test_env.assert_raises_full_code("ORA-40663"): + method({"Song 1": "No end"}) + + +def test_3436(soda_db): + "3436 - createCollection() with the same name and metadata" + coll_name = "TestCollSameMetadata" + coll1 = soda_db.createCollection(coll_name, {"readOnly": True}) + coll2 = soda_db.createCollection(coll_name, {"readOnly": True}) + assert coll1.drop() + assert not coll2.drop() + + +def test_3437(soda_db, test_env): + "3437 - createCollection() with the same name but different metadata" + coll_name = "TestCollDifferentMetadata" + coll = soda_db.createCollection(coll_name) + with test_env.assert_raises_full_code("ORA-40669"): + soda_db.createCollection(coll_name, {"readOnly": False}) + coll.drop() + + coll = soda_db.createCollection(coll_name, {"readOnly": True}) + with test_env.assert_raises_full_code("ORA-40669"): + soda_db.createCollection(coll_name, {"readOnly": False}) + + +def test_3438(soda_db, conn, test_env): + "3438 - test getDataGuide() with an index with data-guide support" + coll = soda_db.createCollection("TestSodaDataGuideEnabled") + data = [ + { + "team": "backend", + "created_in": 2001, + "members": [{"developer": "Joseph"}, {"tester": "Mark"}], + }, + {"team": "frontend", "area": "user interface"}, + ] + coll.insertMany(data) + conn.commit() + index = { + "name": "ix_3438", + "dataguide": "on", + } + coll.createIndex(index) + + data_guide = coll.getDataGuide().getContent() + if test_env.has_client_and_server_version(23, 4): + assert data_guide["properties"]["_id"]["type"] == "id" + + values = [ + ("team", "string"), + ("created_in", "number"), + ("area", "string"), + ] + for name, typ in values: + assert data_guide["properties"][name]["type"] == typ + regex = f"(JSON_DOCUMENT|DATA)\\${name}" + val = data_guide["properties"][name]["o:preferred_column_name"] + assert re.fullmatch(regex, val) is not None + + members_values = [ + ("tester", "string", 4), + ("developer", "string", 8), + ] + for name, typ, length in members_values: + members_items = data_guide["properties"]["members"]["items"] + assert members_items["properties"][name]["type"] == typ + assert members_items["properties"][name]["o:length"] == length + regex = f"(JSON_DOCUMENT|DATA)\\${name}" + val = members_items["properties"][name]["o:preferred_column_name"] + assert re.fullmatch(regex, val) is not None + + +def test_3439(soda_db, conn, test_env): + "3439 - test getDataGuide() with an index without data-guide support" + coll = soda_db.createCollection("TestSodaDataGuideDisabled") + + coll.insertOne({"data": "test_3439"}) + conn.commit() + index = { + "name": "ix-3439", + "dataguide": "off", + } + coll.createIndex(index) + with test_env.assert_raises_full_code("ORA-40582"): + coll.getDataGuide() + + +def test_3440(soda_db): + "3440 - test getDataGuide() with an empty collection" + coll = soda_db.createCollection("TestDataGuideWithEmptyColl") + coll.createIndex({"name": "ix_3440", "dataguide": "on"}) + assert coll.getDataGuide() is None + + +def test_3441(soda_db, test_env): + "3441 - test getDataGuide() without a json search index" + coll = soda_db.createCollection("TestSodaDataGuideWithoutIndex") + with test_env.assert_raises_full_code("ORA-40582"): + coll.getDataGuide() + + +def test_3442(soda_db, conn, test_env): + "3442 - test mapMode parameter with metadata" + data = [{"price": 4900}, {"price": 8}] + expected_data = data * 2 + coll_name = "TestCollOriginal" + + metadata = { + "tableName": coll_name, + "keyColumn": {"name": "ID"}, + "contentColumn": {"name": "JSON_DOCUMENT", "sqlType": "BLOB"}, + "versionColumn": {"name": "VERSION", "method": "UUID"}, + "lastModifiedColumn": {"name": "LAST_MODIFIED"}, + "creationTimeColumn": {"name": "CREATED_ON"}, + } + + original_coll = soda_db.createCollection(coll_name, metadata=metadata) + original_coll.insertMany(data) + mapped_coll = soda_db.createCollection( + "TestCollMapMode", metadata=metadata, mapMode=True ) - def test_3446(self): - "3446 - test round-trip of JsonId" - soda_db = self.get_soda_database() - coll = soda_db.createCollection("TestJsonId") - val = { - "key1": 5, - "key2": "A string", - "key3": b"Raw data", - "key4": datetime.datetime(2024, 3, 2, 10, 1, 36), - } - doc = soda_db.createDocument(val) - coll.insertOne(doc) - self.conn.commit() - fetched_doc = coll.find().getDocuments()[0] - fetched_content = fetched_doc.getContent() - self.assertIs(type(fetched_content["_id"]), oracledb.JsonId) - updated_val = val.copy() - updated_val["key1"] = 25 - content = fetched_content.copy() - content["key1"] = updated_val["key1"] - updated_doc = soda_db.createDocument(content) - coll.find().key(fetched_doc.key).replaceOne(updated_doc) - fetched_doc = coll.find().getDocuments()[0] - self.assertEqual(fetched_doc.getContent(), content) - - def test_3447(self): - "3447 - test getting documents with client-assigned keys" - soda_db = self.conn.getSodaDatabase() - metadata = {"keyColumn": {"assignmentMethod": "client"}} - coll = soda_db.createCollection( - "TestSearchByClientAssignedKeys", metadata - ) - test_values = [ - ("doc1", {"name": "Help others", "files": []}), - ("doc2", {"name": "Family", "files": ["kids.txt"]}), - ("doc3", {"name": "Our pets", "files": ["dogs.pdf"]}), - ] - docs = [soda_db.createDocument(d, k) for k, d in test_values] - coll.insertMany(docs) - - for key, data in test_values: - (fetched_doc,) = coll.find().key(key).getDocuments() - self.assertEqual(fetched_doc.getContent(), data) + mapped_coll.insertMany(data) - keys = [key for key, _ in test_values] - fetched_docs = coll.find().keys(keys).getDocuments() - self.assertEqual(len(fetched_docs), 3) + for coll in [original_coll, mapped_coll]: + fetched_data = list( + doc.getContent() for doc in coll.find().getDocuments() + ) + _normalize_docs(fetched_data) + assert fetched_data == expected_data + + with test_env.assert_raises_full_code("ORA-40626"): + original_coll.drop() + assert mapped_coll.drop() + conn.commit() + assert original_coll.drop() + assert not mapped_coll.drop() + + +def test_3443(soda_db, test_env): + "3443 - test mapping a new collection from an non-existent table" + metadata = {"tableName": "TestNone"} + with test_env.assert_raises_full_code("ORA-40623"): + soda_db.createCollection( + "TestSodaMapNonExistent", metadata=metadata, mapMode=True + ) -if __name__ == "__main__": - test_env.run_test_cases() +def test_3444(soda_db): + "3444 - test collections with mixture of media types" + metadata = dict(mediaTypeColumn=dict(name="media_type")) + coll = soda_db.createCollection("TestMixedMedia", metadata=metadata) + test_data = [ + (dict(name="George", age=28), "application/json"), + ("Sample Text", "text/plain"), + (b"\x57\x25\xfe\x34\x56", "application/octet-stream"), + ] + for value, media_type in test_data: + coll.find().remove() + coll.insertOne(soda_db.createDocument(value, mediaType=media_type)) + fetched_doc = coll.find().getDocuments()[0] + assert fetched_doc.mediaType == media_type + if media_type == "application/json": + assert fetched_doc.getContent() == value + assert json.loads(fetched_doc.getContentAsString()) == value + as_bytes = fetched_doc.getContentAsBytes().decode() + assert json.loads(as_bytes) == value + elif media_type == "text/plain": + assert fetched_doc.getContent() == value.encode() + assert fetched_doc.getContentAsString() == value + assert fetched_doc.getContentAsBytes() == value.encode() + else: + assert fetched_doc.getContent() == value + assert fetched_doc.getContentAsBytes() == value + pytest.raises(UnicodeDecodeError, fetched_doc.getContentAsString) + + +def test_3445(soda_db, test_env): + "3445 - test fetching documents with JSON data using extended types" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("unsupported data types") + + val = { + "testKey1": "testValue1", + "testKey2": decimal.Decimal("12.78"), + "testKey3": datetime.datetime(2023, 7, 3, 11, 10, 24), + } + doc = soda_db.createDocument(val) + assert doc.getContent() == val + coll = soda_db.createCollection("TestJSONExtendedTypes") + coll.insertOne(doc) + fetched_doc = coll.find().getDocuments()[0] + fetched_content = fetched_doc.getContent() + _normalize_docs([fetched_content]) + assert fetched_content == val + assert doc.getContent() == val + + +def test_3446(soda_db, conn, test_env): + "3446 - test round-trip of JsonId" + if not test_env.has_client_and_server_version(23, 4): + pytest.skip("unsupported data types") + coll = soda_db.createCollection("TestJsonId") + val = { + "key1": 5, + "key2": "A string", + "key3": b"Raw data", + "key4": datetime.datetime(2024, 3, 2, 10, 1, 36), + } + doc = soda_db.createDocument(val) + coll.insertOne(doc) + conn.commit() + fetched_doc = coll.find().getDocuments()[0] + fetched_content = fetched_doc.getContent() + assert type(fetched_content["_id"]) is oracledb.JsonId + updated_val = val.copy() + updated_val["key1"] = 25 + content = fetched_content.copy() + content["key1"] = updated_val["key1"] + updated_doc = soda_db.createDocument(content) + coll.find().key(fetched_doc.key).replaceOne(updated_doc) + fetched_doc = coll.find().getDocuments()[0] + assert fetched_doc.getContent() == content + + +def test_3447(soda_db): + "3447 - test getting documents with client-assigned keys" + metadata = {"keyColumn": {"assignmentMethod": "client"}} + coll = soda_db.createCollection("TestSearchByClientAssignedKeys", metadata) + test_values = [ + ("doc1", {"name": "Help others", "files": []}), + ("doc2", {"name": "Family", "files": ["kids.txt"]}), + ("doc3", {"name": "Our pets", "files": ["dogs.pdf"]}), + ] + docs = [soda_db.createDocument(d, k) for k, d in test_values] + coll.insertMany(docs) + + for key, data in test_values: + (fetched_doc,) = coll.find().key(key).getDocuments() + assert fetched_doc.getContent() == data + + keys = [key for key, _ in test_values] + fetched_docs = coll.find().keys(keys).getDocuments() + assert len(fetched_docs) == 3 diff --git a/tests/test_3500_json.py b/tests/test_3500_json.py index c5a6a11a..c4268351 100644 --- a/tests/test_3500_json.py +++ b/tests/test_3500_json.py @@ -30,12 +30,22 @@ import decimal import oracledb -import test_env +import pytest -@test_env.skip_unless_native_json_supported() -class TestCase(test_env.BaseTestCase): - json_data = [ +@pytest.fixture(autouse=True) +def skip_tests(skip_unless_native_json_supported): + """ + Skip all tests in the file unless native JSON is supported. + """ + + +@pytest.fixture(scope="module") +def json_data(): + """ + Returns the data used for the tests in this module. + """ + return [ True, False, "String", @@ -63,298 +73,303 @@ class TestCase(test_env.BaseTestCase): }, ] - def __bind_scalar_as_json(self, data): - self.cursor.execute("delete from TestJson") - out_var = self.cursor.var(oracledb.DB_TYPE_JSON, arraysize=len(data)) - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON, out_var) - bind_data = list(enumerate(data)) - self.cursor.executemany( - """ - insert into TestJson values (:1, :2) - returning JsonCol into :json_out - """, - bind_data, - ) - self.conn.commit() - self.assertEqual(out_var.values, [[value] for value in data]) - - def test_3500(self): - "3500 - insert and fetch single row with JSON" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.execute( - "insert into TestJson values (:1, :2)", [1, self.json_data] - ) - self.cursor.execute("select JsonCol from TestJson") - (result,) = self.cursor.fetchone() - self.assertEqual(result, self.json_data) - - def test_3501(self): - "3501 - inserting single rows with JSON and DML returning" - json_val = self.json_data[11] - self.cursor.execute("delete from TestJson") - json_out = self.cursor.var(oracledb.DB_TYPE_JSON) - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON, json_out) - self.cursor.execute( - """ - insert into TestJson - values (:1, :2) - returning JsonCol into :json_out - """, - [1, json_val], - ) - self.assertEqual(json_out.getvalue(0), [json_val]) - - def test_3502(self): - "3502 - insert and fetch multiple rows with JSON" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - data = list(enumerate(self.json_data)) - self.cursor.executemany("insert into TestJson values(:1, :2)", data) - self.cursor.execute("select * from TestJson") - self.assertEqual(self.cursor.fetchall(), data) - - def test_3503(self): - "3503 - inserting multiple rows with JSON and DML returning" - self.cursor.execute("delete from TestJson") - int_values = [i for i in range(len(self.json_data))] - out_int_var = self.cursor.var(int, arraysize=len(int_values)) - out_json_var = self.cursor.var( - oracledb.DB_TYPE_JSON, arraysize=len(int_values) - ) - self.cursor.setinputsizes( - None, oracledb.DB_TYPE_JSON, out_int_var, out_json_var - ) - data = list(zip(int_values, self.json_data)) - self.cursor.executemany( - """ - insert into TestJson - values(:int_val, :json_val) - returning IntCol, JsonCol into :int_var, :json_var - """, - data, - ) - self.assertEqual(out_int_var.values, [[v] for v in int_values]) - self.assertEqual(out_json_var.values, [[v] for v in self.json_data]) - - def test_3504(self): - "3504 - test binding boolean values as scalar JSON values" - data = [True, False, True, True, False, True] - self.__bind_scalar_as_json(data) - - def test_3505(self): - "3505 - test binding strings/bytes values as scalar JSON values" - data = [ - "String 1", - b"A raw value", - "A much longer string", - b"A much longer RAW value", - "Short string", - b"Y", - ] - self.__bind_scalar_as_json(data) - - def test_3506(self): - "3506 - test binding dates/intervals as scalar JSON values" - data = [ - datetime.datetime.today(), - datetime.datetime(2004, 2, 1, 3, 4, 5), - datetime.datetime(2020, 12, 2, 13, 29, 14), - datetime.timedelta(8.5), - datetime.datetime(2002, 12, 13, 9, 36, 0), - oracledb.Timestamp(2002, 12, 13, 9, 36, 0), - datetime.datetime(2002, 12, 13), - ] - self.__bind_scalar_as_json(data) - - def test_3507(self): - "3507 - test binding number in json values" - data = [ - 0, - 1, - 25.25, - 6088343244, - -9999999999999999999, - decimal.Decimal("0.25"), - decimal.Decimal("10.25"), - decimal.Decimal("319438950232418390.273596"), - ] - self.__bind_scalar_as_json(data) - - def test_3508(self): - "3508 - test binding unsupported python type with JSON" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - insert_sql = "insert into TestJson values (:1, :2)" - with self.assertRaisesFullCode("DPY-3003"): - self.cursor.execute(insert_sql, [1, list]) - - def test_3509(self): - "3509 - test fetching an unsupported python type with JSON" - self.cursor.prefetchrows = 0 - self.cursor.execute( - "select json(json_scalar(to_yminterval('8-04'))) from dual" - ) - with self.assertRaisesFullCode("DPY-3007"): - self.cursor.fetchone() - - def test_3510(self): - "3510 - fetch all supported types" - sql = """ - select json('{ - "binary_float": {"$numberFloat": 38.75}, - "binary_double": {"$numberDouble": 125.875}, - "date_no_time": {"$oracleDate": "2022-12-05"}, - "date_with_time": {"$oracleDate": "2022-12-05T15:06:05"}, - "empty_string": "", - "explicit_long": {"$numberLong": 9223372036854775807}, - "false": false, - "interval_ds": {"$intervalDaySecond" : "P133DT2H5M8.123S"}, - "long_integer": 12345678901234567890123456789012345, - "null": null, - "short_decimal": {"$numberDecimal": 18.25}, - "short_integer": {"$numberInt": 5 }, - "short_raw": {"$rawhex": "73686f72745f726177"}, - "short_string": "A short string", - "small_integer": 1234, - "small_float": 25.25, - "string_uint8": "A longer string but still < 256 bytes", - "true": true, - "ts_no_fs": {"$oracleTimestamp": "2022-12-06T18:12:35"}, - "ts_tz": {"$oracleTimestampTZ": "2022-12-07T22:59:15.1234Z"}, - "ts_with_fs": {"$oracleTimestamp": "2022-12-06T18:12:35.123"} - }' - extended) from dual""" - expected_data = dict( - binary_float=38.75, - binary_double=125.875, - date_no_time=datetime.datetime(2022, 12, 5), - date_with_time=datetime.datetime(2022, 12, 5, 15, 6, 5), - empty_string="", - explicit_long=9223372036854775807, - false=False, - interval_ds=datetime.timedelta( - days=133, seconds=7508, microseconds=123000 - ), - null=None, - long_integer=12345678901234567890123456789012345, - short_decimal=18.25, - short_integer=5, - short_raw=b"short_raw", - short_string="A short string", - small_integer=1234, - small_float=25.25, - string_uint8="A longer string but still < 256 bytes", - true=True, - ts_no_fs=datetime.datetime(2022, 12, 6, 18, 12, 35), - ts_tz=datetime.datetime(2022, 12, 7, 22, 59, 15, 123400), - ts_with_fs=datetime.datetime(2022, 12, 6, 18, 12, 35, 123000), - ) - self.cursor.execute(sql) - (actual_data,) = self.cursor.fetchone() - self.assertEqual(actual_data, expected_data) - - def test_3511(self): - "3511 - test inserting and updating JSON" - self.cursor.execute("delete from TestJSON") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "insert into TestJSON values (:1, :2)", - list(enumerate(self.json_data)), - ) - data = [({"a": i}, i) for i in range(len(self.json_data))] - self.cursor.setinputsizes(oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "update TestJSON set JsonCol = :1 where IntCol = :2", - data, - ) - self.cursor.execute( - "select JsonCol, IntCol from TestJSON order by IntCol" - ) - self.assertEqual(self.cursor.fetchall(), data) - - def test_3512(self): - "3512 - test fetching json with json_query" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "insert into TestJSON values (:1, :2)", - list(enumerate(self.json_data)), - ) - cases = [(1, "$.employees.employee1.name"), (2, "$.employees")] - for num_rows, json_query in cases: - self.cursor.execute( - f""" - select json_query(JsonCol, '{json_query}') - from TestJson - order by IntCol - """ - ) - result = [r for r, in self.cursor if r is not None] - self.assertEqual(len(result), num_rows) - - def test_3513(self): - "3513 - test fetching json with json_exists" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "insert into TestJSON values (:1, :2)", - list(enumerate(self.json_data)), - ) - cases = [(1, "$.Permanent"), (2, "$.employees")] - for num_rows, json_query in cases: - self.cursor.execute( - f""" - select count(*) - from TestJson - where json_exists(JsonCol, '{json_query}') - """ - ) - (count,) = self.cursor.fetchone() - self.assertEqual(count, num_rows) - - def test_3514(self): - "3514 - test selecting json data" - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "insert into TestJSON values (:1, :2)", - list(enumerate(self.json_data)), - ) - self.cursor.execute( - """ - select t.JsonCol.employees - from TestJson t - where t.JsonCol.employees is not null - order by t.IntCol + +def _bind_scalar_as_json(cursor, data): + cursor.execute("delete from TestJson") + out_var = cursor.var(oracledb.DB_TYPE_JSON, arraysize=len(data)) + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON, out_var) + bind_data = list(enumerate(data)) + cursor.executemany( + """ + insert into TestJson values (:1, :2) + returning JsonCol into :json_out + """, + bind_data, + ) + cursor.connection.commit() + assert out_var.values == [[value] for value in data] + + +def test_3500(cursor, json_data): + "3500 - insert and fetch single row with JSON" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.execute("insert into TestJson values (:1, :2)", [1, json_data]) + cursor.execute("select JsonCol from TestJson") + (result,) = cursor.fetchone() + assert result == json_data + + +def test_3501(cursor, json_data): + "3501 - inserting single rows with JSON and DML returning" + json_val = json_data[11] + cursor.execute("delete from TestJson") + json_out = cursor.var(oracledb.DB_TYPE_JSON) + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON, json_out) + cursor.execute( + """ + insert into TestJson + values (:1, :2) + returning JsonCol into :json_out + """, + [1, json_val], + ) + assert json_out.getvalue(0) == [json_val] + + +def test_3502(cursor, json_data): + "3502 - insert and fetch multiple rows with JSON" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + data = list(enumerate(json_data)) + cursor.executemany("insert into TestJson values(:1, :2)", data) + cursor.execute("select * from TestJson") + assert cursor.fetchall() == data + + +def test_3503(cursor, json_data): + "3503 - inserting multiple rows with JSON and DML returning" + cursor.execute("delete from TestJson") + int_values = [i for i in range(len(json_data))] + out_int_var = cursor.var(int, arraysize=len(int_values)) + out_json_var = cursor.var(oracledb.DB_TYPE_JSON, arraysize=len(int_values)) + cursor.setinputsizes( + None, oracledb.DB_TYPE_JSON, out_int_var, out_json_var + ) + data = list(zip(int_values, json_data)) + cursor.executemany( + """ + insert into TestJson + values(:int_val, :json_val) + returning IntCol, JsonCol into :int_var, :json_var + """, + data, + ) + assert out_int_var.values == [[v] for v in int_values] + assert out_json_var.values == [[v] for v in json_data] + + +def test_3504(cursor): + "3504 - test binding boolean values as scalar JSON values" + data = [True, False, True, True, False, True] + _bind_scalar_as_json(cursor, data) + + +def test_3505(cursor): + "3505 - test binding strings/bytes values as scalar JSON values" + data = [ + "String 1", + b"A raw value", + "A much longer string", + b"A much longer RAW value", + "Short string", + b"Y", + ] + _bind_scalar_as_json(cursor, data) + + +def test_3506(cursor): + "3506 - test binding dates/intervals as scalar JSON values" + data = [ + datetime.datetime.today(), + datetime.datetime(2004, 2, 1, 3, 4, 5), + datetime.datetime(2020, 12, 2, 13, 29, 14), + datetime.timedelta(8.5), + datetime.datetime(2002, 12, 13, 9, 36, 0), + oracledb.Timestamp(2002, 12, 13, 9, 36, 0), + datetime.datetime(2002, 12, 13), + ] + _bind_scalar_as_json(cursor, data) + + +def test_3507(cursor): + "3507 - test binding number in json values" + data = [ + 0, + 1, + 25.25, + 6088343244, + -9999999999999999999, + decimal.Decimal("0.25"), + decimal.Decimal("10.25"), + decimal.Decimal("319438950232418390.273596"), + ] + _bind_scalar_as_json(cursor, data) + + +def test_3508(cursor, test_env): + "3508 - test binding unsupported python type with JSON" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + insert_sql = "insert into TestJson values (:1, :2)" + with test_env.assert_raises_full_code("DPY-3003"): + cursor.execute(insert_sql, [1, list]) + + +def test_3509(cursor, test_env): + "3509 - test fetching an unsupported python type with JSON" + cursor.prefetchrows = 0 + cursor.execute("select json(json_scalar(to_yminterval('8-04'))) from dual") + with test_env.assert_raises_full_code("DPY-3007"): + cursor.fetchone() + + +def test_3510(cursor): + "3510 - fetch all supported types" + sql = """ + select json('{ + "binary_float": {"$numberFloat": 38.75}, + "binary_double": {"$numberDouble": 125.875}, + "date_no_time": {"$oracleDate": "2022-12-05"}, + "date_with_time": {"$oracleDate": "2022-12-05T15:06:05"}, + "empty_string": "", + "explicit_long": {"$numberLong": 9223372036854775807}, + "false": false, + "interval_ds": {"$intervalDaySecond" : "P133DT2H5M8.123S"}, + "long_integer": 12345678901234567890123456789012345, + "null": null, + "short_decimal": {"$numberDecimal": 18.25}, + "short_integer": {"$numberInt": 5 }, + "short_raw": {"$rawhex": "73686f72745f726177"}, + "short_string": "A short string", + "small_integer": 1234, + "small_float": 25.25, + "string_uint8": "A longer string but still < 256 bytes", + "true": true, + "ts_no_fs": {"$oracleTimestamp": "2022-12-06T18:12:35"}, + "ts_tz": {"$oracleTimestampTZ": "2022-12-07T22:59:15.1234Z"}, + "ts_with_fs": {"$oracleTimestamp": "2022-12-06T18:12:35.123"} + }' + extended) from dual""" + expected_data = dict( + binary_float=38.75, + binary_double=125.875, + date_no_time=datetime.datetime(2022, 12, 5), + date_with_time=datetime.datetime(2022, 12, 5, 15, 6, 5), + empty_string="", + explicit_long=9223372036854775807, + false=False, + interval_ds=datetime.timedelta( + days=133, seconds=7508, microseconds=123000 + ), + null=None, + long_integer=12345678901234567890123456789012345, + short_decimal=18.25, + short_integer=5, + short_raw=b"short_raw", + short_string="A short string", + small_integer=1234, + small_float=25.25, + string_uint8="A longer string but still < 256 bytes", + true=True, + ts_no_fs=datetime.datetime(2022, 12, 6, 18, 12, 35), + ts_tz=datetime.datetime(2022, 12, 7, 22, 59, 15, 123400), + ts_with_fs=datetime.datetime(2022, 12, 6, 18, 12, 35, 123000), + ) + cursor.execute(sql) + (actual_data,) = cursor.fetchone() + assert actual_data == expected_data + + +def test_3511(cursor, json_data): + "3511 - test inserting and updating JSON" + cursor.execute("delete from TestJSON") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany( + "insert into TestJSON values (:1, :2)", + list(enumerate(json_data)), + ) + data = [({"a": i}, i) for i in range(len(json_data))] + cursor.setinputsizes(oracledb.DB_TYPE_JSON) + cursor.executemany( + "update TestJSON set JsonCol = :1 where IntCol = :2", + data, + ) + cursor.execute("select JsonCol, IntCol from TestJSON order by IntCol") + assert cursor.fetchall() == data + + +def test_3512(cursor, json_data): + "3512 - test fetching json with json_query" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany( + "insert into TestJSON values (:1, :2)", + list(enumerate(json_data)), + ) + cases = [(1, "$.employees.employee1.name"), (2, "$.employees")] + for num_rows, json_query in cases: + cursor.execute( + f""" + select json_query(JsonCol, '{json_query}') + from TestJson + order by IntCol """ ) - expected_data = [ - self.json_data[-2]["employees"], - self.json_data[-1]["employees"], - ] - data = [r for r, in self.cursor] - self.assertEqual(data, expected_data) - - def test_3515(self): - "3515 - test fetching json with json_serialize" - self.cursor.execute("delete from TestJson") - data = [{"a": 12.5}, {"b": True}, {"c": None}] - expected_data = ['{"a":12.5}', '{"b":true}', '{"c":null}'] - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany( - "insert into TestJSON values (:1, :2)", list(enumerate(data)) - ) - self.cursor.execute( - """ - select json_serialize(JsonCol) + result = [r for r, in cursor if r is not None] + assert len(result) == num_rows + + +def test_3513(cursor, json_data): + "3513 - test fetching json with json_exists" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany( + "insert into TestJSON values (:1, :2)", + list(enumerate(json_data)), + ) + cases = [(1, "$.Permanent"), (2, "$.employees")] + for num_rows, json_query in cases: + cursor.execute( + f""" + select count(*) from TestJson - order by IntCol + where json_exists(JsonCol, '{json_query}') """ ) - fetched_data = [r for r, in self.cursor] - self.assertEqual(fetched_data, expected_data) + (count,) = cursor.fetchone() + assert count == num_rows + + +def test_3514(cursor, json_data): + "3514 - test selecting json data" + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany( + "insert into TestJSON values (:1, :2)", + list(enumerate(json_data)), + ) + cursor.execute( + """ + select t.JsonCol.employees + from TestJson t + where t.JsonCol.employees is not null + order by t.IntCol + """ + ) + expected_data = [ + json_data[-2]["employees"], + json_data[-1]["employees"], + ] + data = [r for r, in cursor] + assert data == expected_data -if __name__ == "__main__": - test_env.run_test_cases() +def test_3515(cursor): + "3515 - test fetching json with json_serialize" + cursor.execute("delete from TestJson") + data = [{"a": 12.5}, {"b": True}, {"c": None}] + expected_data = ['{"a":12.5}', '{"b":true}', '{"c":null}'] + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany( + "insert into TestJSON values (:1, :2)", list(enumerate(data)) + ) + cursor.execute( + """ + select json_serialize(JsonCol) + from TestJson + order by IntCol + """ + ) + fetched_data = [r for r, in cursor] + assert fetched_data == expected_data diff --git a/tests/test_3600_outputtypehandler.py b/tests/test_3600_outputtypehandler.py index 127575d7..e917a4fa 100644 --- a/tests/test_3600_outputtypehandler.py +++ b/tests/test_3600_outputtypehandler.py @@ -30,703 +30,868 @@ import decimal import oracledb -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - def __test_type_handler( - self, input_type, output_type, in_value, expected_out_value - ): - def type_handler(cursor, metadata): - return cursor.var(output_type, arraysize=cursor.arraysize) +def _test_type_handler( + cursor, input_type, output_type, in_value, expected_out_value +): + def type_handler(cursor, metadata): + return cursor.var(output_type, arraysize=cursor.arraysize) - self.cursor.outputtypehandler = type_handler - self.assertEqual(self.cursor.outputtypehandler, type_handler) - var = self.cursor.var(input_type) - var.setvalue(0, in_value) - self.cursor.execute("select :1 from dual", [var]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(type(fetched_value), type(expected_out_value)) - self.assertEqual(fetched_value, expected_out_value) - - def __test_type_handler_lob(self, lob_type, output_type): - db_type = getattr(oracledb, lob_type) - - def type_handler(cursor, metadata): - if metadata.type_code is db_type: - return cursor.var(output_type, arraysize=cursor.arraysize) - - self.cursor.outputtypehandler = type_handler - in_value = f"Some {lob_type} data" - if lob_type == "BLOB": - in_value = in_value.encode() - self.cursor.execute(f"delete from Test{lob_type}s") - self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values(1, :val) - """, - val=in_value, - ) - self.conn.commit() - self.cursor.execute( - f"select {lob_type}Col, IntCol, {lob_type}Col from Test{lob_type}s" - ) - self.assertEqual(self.cursor.fetchone(), (in_value, 1, in_value)) - - def setUp(self): - super().setUp() - self.cursor.execute( - """ - ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' - NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF6' - NLS_TIMESTAMP_TZ_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF6' - time_zone='Europe/London' - """ - ) + cursor.outputtypehandler = type_handler + assert cursor.outputtypehandler == type_handler + var = cursor.var(input_type) + var.setvalue(0, in_value) + cursor.execute("select :1 from dual", [var]) + (fetched_value,) = cursor.fetchone() + assert type(fetched_value) == type(expected_out_value) + assert fetched_value == expected_out_value - def test_3600(self): - "3600 - output type handler: from VARCHAR to NUMBER" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 - ) - def test_3601(self): - "3601 - output type handler: from CHAR to NUMBER" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 - ) +def _test_type_handler_lob(cursor, lob_type, output_type): + db_type = getattr(oracledb, lob_type) - def test_3602(self): - "3602 - output type handler: from LONG to NUMBER" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 - ) + def type_handler(cursor, metadata): + if metadata.type_code is db_type: + return cursor.var(output_type, arraysize=cursor.arraysize) - def test_3603(self): - "3603 - test output type handler: from INTEGER to NUMBER" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_INTEGER, oracledb.DB_TYPE_NUMBER, 31, 31 - ) + cursor.outputtypehandler = type_handler + in_value = f"Some {lob_type} data" + if lob_type == "BLOB": + in_value = in_value.encode() + cursor.execute(f"delete from Test{lob_type}s") + cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values(1, :val) + """, + val=in_value, + ) + cursor.connection.commit() + cursor.execute( + f"select {lob_type}Col, IntCol, {lob_type}Col from Test{lob_type}s" + ) + assert cursor.fetchone() == (in_value, 1, in_value) + + +@pytest.fixture(autouse=True) +def setup(cursor): + cursor.execute( + """ + ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS' + NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF6' + NLS_TIMESTAMP_TZ_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF6' + time_zone='Europe/London' + """ + ) + + +def test_3600(cursor): + "3600 - output type handler: from VARCHAR to NUMBER" + _test_type_handler( + cursor, oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 + ) + + +def test_3601(cursor): + "3601 - output type handler: from CHAR to NUMBER" + _test_type_handler( + cursor, oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 + ) + + +def test_3602(cursor): + "3602 - output type handler: from LONG to NUMBER" + _test_type_handler( + cursor, oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_NUMBER, "31.5", 31.5 + ) + + +def test_3603(cursor): + "3603 - test output type handler: from INTEGER to NUMBER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_INTEGER, + oracledb.DB_TYPE_NUMBER, + 31, + 31, + ) + + +def test_3604(cursor): + "3604 - output type handler: from VARCHAR to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_BINARY_INTEGER, + "31.5", + 31, + ) + + +def test_3605(cursor): + "3605 - output type handler: from CHAR to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_CHAR, + oracledb.DB_TYPE_BINARY_INTEGER, + "31.5", + 31, + ) + + +def test_3606(cursor): + "3606 - output type handler: from LONG to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_LONG, + oracledb.DB_TYPE_BINARY_INTEGER, + "31.5", + 31, + ) + + +def test_3607(cursor): + "3607 - output type handler: from NUMBER to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BINARY_INTEGER, + 31.5, + 31, + ) + + +def test_3608(cursor): + "3608 - output type handler: from DOUBLE to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_DOUBLE, + oracledb.DB_TYPE_BINARY_INTEGER, + 31.5, + 31, + ) + + +def test_3609(cursor): + "3609 - output type handler: from FLOAT to INTEGER" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_FLOAT, + oracledb.DB_TYPE_BINARY_INTEGER, + 31.5, + 31, + ) + + +def test_3610(cursor): + "3610 - output type handler: from DATE to VARCHAR" + in_val = datetime.date(2021, 2, 1) + out_val = "2021-02-01 00:00:00" + _test_type_handler( + cursor, + oracledb.DB_TYPE_DATE, + oracledb.DB_TYPE_VARCHAR, + in_val, + out_val, + ) + + +def test_3611(cursor): + "3611 - output type handler: from DATE to CHAR" + in_val = datetime.date(2021, 2, 1) + out_val = "2021-02-01 00:00:00" + _test_type_handler( + cursor, oracledb.DB_TYPE_DATE, oracledb.DB_TYPE_CHAR, in_val, out_val + ) + + +def test_3612(cursor): + "3612 - output type handler: from DATE to LONG" + in_val = datetime.date(2021, 2, 1) + out_val = "2021-02-01 00:00:00" + _test_type_handler( + cursor, oracledb.DB_TYPE_DATE, oracledb.DB_TYPE_LONG, in_val, out_val + ) + + +def test_3613(cursor): + "3613 - output type handler: from NUMBER to VARCHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, 31.5, "31.5" + ) + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, 0, "0" + ) + + +def test_3614(cursor): + "3614 - output type handler: from NUMBER to CHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_CHAR, 31.5, "31.5" + ) + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_CHAR, 0, "0" + ) + + +def test_3615(cursor): + "3615 - output type handler: from NUMBER to LONG" + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_LONG, 31.5, "31.5" + ) + + +def test_3616(conn, cursor): + "3616 - output type handler: from INTERVAL to VARCHAR" + in_val = datetime.timedelta(days=-1, seconds=86314, microseconds=431152) + if conn.thin: + out_val = str(in_val) + else: + out_val = "-000000001 23:58:34.431152000" + _test_type_handler( + cursor, + oracledb.DB_TYPE_INTERVAL_DS, + oracledb.DB_TYPE_VARCHAR, + in_val, + out_val, + ) + + +def test_3617(conn, cursor): + "3617 - output type handler: from INTERVAL to CHAR" + in_val = datetime.timedelta(days=-1, seconds=86314, microseconds=431152) + if conn.thin: + out_val = str(in_val) + else: + out_val = "-000000001 23:58:34.431152000" + _test_type_handler( + cursor, + oracledb.DB_TYPE_INTERVAL_DS, + oracledb.DB_TYPE_CHAR, + in_val, + out_val, + ) + + +def test_3618(conn, cursor): + "3618 - output type handler: from INTERVAL to LONG" + in_val = datetime.timedelta(days=-1, seconds=86314, microseconds=431152) + if conn.thin: + out_val = str(in_val) + else: + out_val = "-000000001 23:58:34.431152000" + _test_type_handler( + cursor, + oracledb.DB_TYPE_INTERVAL_DS, + oracledb.DB_TYPE_LONG, + in_val, + out_val, + ) + + +def test_3619(cursor): + "3619 - output type handler: from TIMESTAMP to VARCHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP, + oracledb.DB_TYPE_VARCHAR, + in_val, + str(in_val), + ) + + +def test_3620(cursor): + "3620 - output type handler: from TIMESTAMP to CHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP, + oracledb.DB_TYPE_CHAR, + in_val, + str(in_val), + ) + + +def test_3621(cursor): + "3621 - output type handler: from TIMESTAMP to LONG" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP, + oracledb.DB_TYPE_LONG, + in_val, + str(in_val), + ) + + +def test_3622(cursor): + "3622 - output type handler: from TIMESTAMP_TZ to VARCHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_TZ, + oracledb.DB_TYPE_VARCHAR, + in_val, + str(in_val), + ) + + +def test_3623(cursor): + "3623 - output type handler: from TIMESTAMP_TZ to CHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_TZ, + oracledb.DB_TYPE_CHAR, + in_val, + str(in_val), + ) + + +def test_3624(cursor): + "3624 - output type handler: from TIMESTAMP_TZ to LONG" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_TZ, + oracledb.DB_TYPE_LONG, + in_val, + str(in_val), + ) + + +def test_3625(cursor): + "3625 - output type handler: from TIMESTAMP_LTZ to VARCHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_LTZ, + oracledb.DB_TYPE_VARCHAR, + in_val, + str(in_val), + ) + + +def test_3626(cursor): + "3626 - output type handler: from TIMESTAMP_LTZ to CHAR" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_LTZ, + oracledb.DB_TYPE_CHAR, + in_val, + str(in_val), + ) + + +def test_3627(cursor): + "3627 - output type handler: from TIMESTAMP_LTZ to LONG" + in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_LTZ, + oracledb.DB_TYPE_LONG, + in_val, + str(in_val), + ) + + +def test_3628(cursor): + "3628 - output type handler: from INTEGER to VARCHAR" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_INTEGER, + oracledb.DB_TYPE_VARCHAR, + 31, + "31", + ) + + +def test_3629(cursor): + "3629 - output type handler: from INTEGER to CHAR" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_INTEGER, + oracledb.DB_TYPE_CHAR, + 31, + "31", + ) + + +def test_3630(cursor): + "3630 - output type handler: from INTEGER to LONG" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_INTEGER, + oracledb.DB_TYPE_LONG, + 31, + "31", + ) + + +def test_3631(cursor): + "3631 - output type handler: from NUMBER to DOUBLE" + _test_type_handler( + cursor, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BINARY_DOUBLE, + 31.5, + 31.5, + ) + + +def test_3632(cursor): + "3632 - output type handler: from FLOAT to DOUBLE" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_FLOAT, + oracledb.DB_TYPE_BINARY_DOUBLE, + 31.5, + 31.5, + ) + + +def test_3633(cursor): + "3633 - output type handler: from VARCHAR to DOUBLE" + _test_type_handler( + cursor, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_BINARY_DOUBLE, + "31.5", + 31.5, + ) + + +def test_3634(cursor): + "3634 - output type handler: from CHAR to DOUBLE" + _test_type_handler( + cursor, + oracledb.DB_TYPE_CHAR, + oracledb.DB_TYPE_BINARY_DOUBLE, + "31.5", + 31.5, + ) + + +def test_3635(cursor): + "3635 - output type handler: from LONG to DOUBLE" + _test_type_handler( + cursor, + oracledb.DB_TYPE_LONG, + oracledb.DB_TYPE_BINARY_DOUBLE, + "31.5", + 31.5, + ) + + +def test_3636(cursor): + "3636 - output type handler: from NUMBER to FLOAT" + _test_type_handler( + cursor, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BINARY_FLOAT, + 31.5, + 31.5, + ) + + +def test_3637(cursor): + "3637 - output type handler: from DOUBLE to FLOAT" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_DOUBLE, + oracledb.DB_TYPE_BINARY_FLOAT, + 31.5, + 31.5, + ) + + +def test_3638(cursor): + "3638 - output type handler: from VARCHAR to FLOAT" + _test_type_handler( + cursor, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_BINARY_FLOAT, + "31.5", + 31.5, + ) + + +def test_3639(cursor): + "3639 - output type handler: from CHAR to FLOAT" + _test_type_handler( + cursor, + oracledb.DB_TYPE_CHAR, + oracledb.DB_TYPE_BINARY_FLOAT, + "31.5", + 31.5, + ) + + +def test_3640(cursor): + "3640 - output type handler: from LONG to FLOAT" + _test_type_handler( + cursor, + oracledb.DB_TYPE_LONG, + oracledb.DB_TYPE_BINARY_FLOAT, + "31.5", + 31.5, + ) + + +def test_3641(cursor): + "3641 - output type handler: from VARCHAR to CHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_CHAR, "31.5", "31.5" + ) + + +def test_3642(cursor): + "3642 - output type handler: from VARCHAR to LONG" + _test_type_handler( + cursor, oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_LONG, "31.5", "31.5" + ) + + +def test_3643(cursor): + "3643 - output type handler: from LONG to VARCHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_VARCHAR, "31.5", "31.5" + ) + + +def test_3644(cursor): + "3644 - output type handler: from LONG to CHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_CHAR, "31.5", "31.5" + ) + + +def test_3645(cursor): + "3645 - output type handler: from CHAR to VARCHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_VARCHAR, "31.5", "31.5" + ) + + +def test_3646(cursor): + "3646 - output type handler: from CHAR to LONG" + _test_type_handler( + cursor, oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_LONG, "31.5", "31.5" + ) + + +def test_3647(cursor): + "3647 - output type handler: from TIMESTAMP to TIMESTAMP_TZ" + val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP, + oracledb.DB_TYPE_TIMESTAMP_TZ, + val, + val, + ) - def test_3604(self): - "3604 - output type handler: from VARCHAR to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_BINARY_INTEGER, - "31.5", - 31, - ) - def test_3605(self): - "3605 - output type handler: from CHAR to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_BINARY_INTEGER, "31.5", 31 - ) +def test_3648(cursor): + "3648 - output type handler: from TIMESTAMP to TIMESTAMP_LTZ" + val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP, + oracledb.DB_TYPE_TIMESTAMP_LTZ, + val, + val, + ) - def test_3606(self): - "3606 - output type handler: from LONG to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_BINARY_INTEGER, "31.5", 31 - ) - def test_3607(self): - "3607 - output type handler: from NUMBER to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_BINARY_INTEGER, 31.5, 31 - ) +def test_3649(cursor): + "3649 - output type handler: from TIMESTAMP_TZ to TIMESTAMP" + val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) + _test_type_handler( + cursor, + oracledb.DB_TYPE_TIMESTAMP_TZ, + oracledb.DB_TYPE_TIMESTAMP, + val, + val, + ) - def test_3608(self): - "3608 - output type handler: from DOUBLE to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_DOUBLE, - oracledb.DB_TYPE_BINARY_INTEGER, - 31.5, - 31, - ) - def test_3609(self): - "3609 - output type handler: from FLOAT to INTEGER" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_FLOAT, - oracledb.DB_TYPE_BINARY_INTEGER, - 31.5, - 31, +def test_3650(cursor, test_env): + "3650 - output type handler: from NUMBER to DATE is invalid" + with test_env.assert_raises_full_code("DPY-4007", "ORA-00932"): + _test_type_handler( + cursor, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_DATE, 3, 3 ) - def test_3610(self): - "3610 - output type handler: from DATE to VARCHAR" - in_val = datetime.date(2021, 2, 1) - out_val = "2021-02-01 00:00:00" - self.__test_type_handler( - oracledb.DB_TYPE_DATE, oracledb.DB_TYPE_VARCHAR, in_val, out_val - ) - def test_3611(self): - "3611 - output type handler: from DATE to CHAR" - in_val = datetime.date(2021, 2, 1) - out_val = "2021-02-01 00:00:00" - self.__test_type_handler( - oracledb.DB_TYPE_DATE, oracledb.DB_TYPE_CHAR, in_val, out_val - ) +def test_3651(cursor): + "3651 - output type handler: from CLOB to CHAR" + val = "Some Clob String" + _test_type_handler( + cursor, oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_CHAR, val, val + ) - def test_3612(self): - "3612 - output type handler: from DATE to LONG" - in_val = datetime.date(2021, 2, 1) - out_val = "2021-02-01 00:00:00" - self.__test_type_handler( - oracledb.DB_TYPE_DATE, oracledb.DB_TYPE_LONG, in_val, out_val - ) - def test_3613(self): - "3613 - output type handler: from NUMBER to VARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, 31.5, "31.5" - ) - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, 0, "0" - ) +def test_3652(cursor): + "3652 - output type handler: from CLOB to VARCHAR" + val = "Some Clob String" + _test_type_handler( + cursor, oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_VARCHAR, val, val + ) - def test_3614(self): - "3614 - output type handler: from NUMBER to CHAR" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_CHAR, 31.5, "31.5" - ) - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_CHAR, 0, "0" - ) - def test_3615(self): - "3615 - output type handler: from NUMBER to LONG" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_LONG, 31.5, "31.5" - ) +def test_3653(cursor): + "3653 - output type handler: from CLOB to LONG" + val = "Some Clob String" + _test_type_handler( + cursor, oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_LONG, val, val + ) - def test_3616(self): - "3616 - output type handler: from INTERVAL to VARCHAR" - in_val = datetime.timedelta( - days=-1, seconds=86314, microseconds=431152 - ) - if self.conn.thin: - out_val = str(in_val) - else: - out_val = "-000000001 23:58:34.431152000" - self.__test_type_handler( - oracledb.DB_TYPE_INTERVAL_DS, - oracledb.DB_TYPE_VARCHAR, - in_val, - out_val, - ) - def test_3617(self): - "3617 - output type handler: from INTERVAL to CHAR" - in_val = datetime.timedelta( - days=-1, seconds=86314, microseconds=431152 - ) - if self.conn.thin: - out_val = str(in_val) - else: - out_val = "-000000001 23:58:34.431152000" - self.__test_type_handler( - oracledb.DB_TYPE_INTERVAL_DS, - oracledb.DB_TYPE_CHAR, - in_val, - out_val, - ) +def test_3654(cursor): + "3654 - output type handler: from BLOB to RAW" + val = b"Some binary data" + _test_type_handler( + cursor, oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_RAW, val, val + ) - def test_3618(self): - "3618 - output type handler: from INTERVAL to LONG" - in_val = datetime.timedelta( - days=-1, seconds=86314, microseconds=431152 - ) - if self.conn.thin: - out_val = str(in_val) - else: - out_val = "-000000001 23:58:34.431152000" - self.__test_type_handler( - oracledb.DB_TYPE_INTERVAL_DS, - oracledb.DB_TYPE_LONG, - in_val, - out_val, - ) - def test_3619(self): - "3619 - output type handler: from TIMESTAMP to VARCHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP, - oracledb.DB_TYPE_VARCHAR, - in_val, - str(in_val), - ) +def test_3655(cursor): + "3655 - output type handler: from BLOB to LONGRAW" + val = b"Some binary data" + _test_type_handler( + cursor, oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_LONG_RAW, val, val + ) - def test_3620(self): - "3620 - output type handler: from TIMESTAMP to CHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP, - oracledb.DB_TYPE_CHAR, - in_val, - str(in_val), - ) - def test_3621(self): - "3621 - output type handler: from TIMESTAMP to LONG" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP, - oracledb.DB_TYPE_LONG, - in_val, - str(in_val), - ) +def test_3656(cursor): + "3656 - output type handler: from permanent BLOBs to LONG_RAW" + _test_type_handler_lob(cursor, "BLOB", oracledb.DB_TYPE_LONG_RAW) - def test_3622(self): - "3622 - output type handler: from TIMESTAMP_TZ to VARCHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_TZ, - oracledb.DB_TYPE_VARCHAR, - in_val, - str(in_val), - ) - def test_3623(self): - "3623 - output type handler: from TIMESTAMP_TZ to CHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_TZ, - oracledb.DB_TYPE_CHAR, - in_val, - str(in_val), - ) +def test_3657(cursor): + "3657 - output type handler: from permanent BLOBs to RAW" + _test_type_handler_lob(cursor, "BLOB", oracledb.DB_TYPE_RAW) - def test_3624(self): - "3624 - output type handler: from TIMESTAMP_TZ to LONG" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_TZ, - oracledb.DB_TYPE_LONG, - in_val, - str(in_val), - ) - def test_3625(self): - "3625 - output type handler: from TIMESTAMP_LTZ to VARCHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_LTZ, - oracledb.DB_TYPE_VARCHAR, - in_val, - str(in_val), - ) +def test_3658(cursor): + "3658 - output type handler: from permanent CLOBs to VARCHAR" + _test_type_handler_lob(cursor, "CLOB", oracledb.DB_TYPE_VARCHAR) - def test_3626(self): - "3626 - output type handler: from TIMESTAMP_LTZ to CHAR" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_LTZ, - oracledb.DB_TYPE_CHAR, - in_val, - str(in_val), - ) - def test_3627(self): - "3627 - output type handler: from TIMESTAMP_LTZ to LONG" - in_val = datetime.datetime(2002, 12, 17, 1, 2, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_LTZ, - oracledb.DB_TYPE_LONG, - in_val, - str(in_val), - ) +def test_3659(cursor): + "3659 - output type handler: from permanent CLOBs to CHAR" + _test_type_handler_lob(cursor, "CLOB", oracledb.DB_TYPE_CHAR) - def test_3628(self): - "3628 - output type handler: from INTEGER to VARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_INTEGER, oracledb.DB_TYPE_VARCHAR, 31, "31" - ) - def test_3629(self): - "3629 - output type handler: from INTEGER to CHAR" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_INTEGER, oracledb.DB_TYPE_CHAR, 31, "31" - ) +def test_3660(cursor): + "3660 - output type handler: from permanent CLOBs to LONG" + _test_type_handler_lob(cursor, "CLOB", oracledb.DB_TYPE_LONG) - def test_3630(self): - "3630 - output type handler: from INTEGER to LONG" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_INTEGER, oracledb.DB_TYPE_LONG, 31, "31" - ) - def test_3631(self): - "3631 - output type handler: from NUMBER to DOUBLE" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_BINARY_DOUBLE, 31.5, 31.5 - ) +def test_3661(cursor): + "3661 - output type handler: from NCLOB to CHAR" + val = "Some nclob data" + _test_type_handler( + cursor, oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_CHAR, val, val + ) - def test_3632(self): - "3632 - output type handler: from FLOAT to DOUBLE" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_FLOAT, - oracledb.DB_TYPE_BINARY_DOUBLE, - 31.5, - 31.5, - ) - def test_3633(self): - "3633 - output type handler: from VARCHAR to DOUBLE" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_BINARY_DOUBLE, - "31.5", - 31.5, - ) +def test_3662(cursor): + "3662 - output type handler: from NCLOB to VARCHAR" + val = "Some nclob data" + _test_type_handler( + cursor, oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_VARCHAR, val, val + ) - def test_3634(self): - "3634 - output type handler: from CHAR to DOUBLE" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_BINARY_DOUBLE, "31.5", 31.5 - ) - def test_3635(self): - "3635 - output type handler: from LONG to DOUBLE" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_BINARY_DOUBLE, "31.5", 31.5 - ) +def test_3663(cursor): + "3663 - output type handler: from NCLOB to LONG" + val = "Some nclob data" + _test_type_handler( + cursor, oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_LONG, val, val + ) - def test_3636(self): - "3636 - output type handler: from NUMBER to FLOAT" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_BINARY_FLOAT, 31.5, 31.5 - ) - def test_3637(self): - "3637 - output type handler: from DOUBLE to FLOAT" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_DOUBLE, - oracledb.DB_TYPE_BINARY_FLOAT, - 31.5, - 31.5, - ) +def test_3664(cursor): + "3664 - output type handler: from permanent NCLOBs to VARCHAR" + _test_type_handler_lob(cursor, "NCLOB", oracledb.DB_TYPE_VARCHAR) - def test_3638(self): - "3638 - output type handler: from VARCHAR to FLOAT" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_BINARY_FLOAT, - "31.5", - 31.5, - ) - def test_3639(self): - "3639 - output type handler: from CHAR to FLOAT" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_BINARY_FLOAT, "31.5", 31.5 - ) +def test_3665(cursor): + "3665 - output type handler: from permanent NCLOBs to CHAR" + _test_type_handler_lob(cursor, "NCLOB", oracledb.DB_TYPE_CHAR) - def test_3640(self): - "3640 - output type handler: from LONG to FLOAT" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_BINARY_FLOAT, "31.5", 31.5 - ) - - def test_3641(self): - "3641 - output type handler: from VARCHAR to CHAR" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_CHAR, "31.5", "31.5" - ) - - def test_3642(self): - "3642 - output type handler: from VARCHAR to LONG" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_LONG, "31.5", "31.5" - ) - - def test_3643(self): - "3643 - output type handler: from LONG to VARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_VARCHAR, "31.5", "31.5" - ) - - def test_3644(self): - "3644 - output type handler: from LONG to CHAR" - self.__test_type_handler( - oracledb.DB_TYPE_LONG, oracledb.DB_TYPE_CHAR, "31.5", "31.5" - ) - - def test_3645(self): - "3645 - output type handler: from CHAR to VARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_VARCHAR, "31.5", "31.5" - ) - - def test_3646(self): - "3646 - output type handler: from CHAR to LONG" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_LONG, "31.5", "31.5" - ) - - def test_3647(self): - "3647 - output type handler: from TIMESTAMP to TIMESTAMP_TZ" - val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP, oracledb.DB_TYPE_TIMESTAMP_TZ, val, val - ) - - def test_3648(self): - "3648 - output type handler: from TIMESTAMP to TIMESTAMP_LTZ" - val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP, - oracledb.DB_TYPE_TIMESTAMP_LTZ, - val, - val, - ) - - def test_3649(self): - "3649 - output type handler: from TIMESTAMP_TZ to TIMESTAMP" - val = datetime.datetime(2002, 12, 17, 0, 0, 16, 400000) - self.__test_type_handler( - oracledb.DB_TYPE_TIMESTAMP_TZ, oracledb.DB_TYPE_TIMESTAMP, val, val - ) - - def test_3650(self): - "3650 - output type handler: from NUMBER to DATE is invalid" - with self.assertRaisesFullCode("DPY-4007", "ORA-00932"): - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_DATE, 3, 3 - ) - - def test_3651(self): - "3651 - output type handler: from CLOB to CHAR" - val = "Some Clob String" - self.__test_type_handler( - oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_CHAR, val, val - ) - - def test_3652(self): - "3652 - output type handler: from CLOB to VARCHAR" - val = "Some Clob String" - self.__test_type_handler( - oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_VARCHAR, val, val - ) - - def test_3653(self): - "3653 - output type handler: from CLOB to LONG" - val = "Some Clob String" - self.__test_type_handler( - oracledb.DB_TYPE_CLOB, oracledb.DB_TYPE_LONG, val, val - ) - def test_3654(self): - "3654 - output type handler: from BLOB to RAW" - val = b"Some binary data" - self.__test_type_handler( - oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_RAW, val, val - ) +def test_3666(cursor): + "3666 - output type handler: from permanent NCLOBs to LONG" + _test_type_handler_lob(cursor, "NCLOB", oracledb.DB_TYPE_LONG) - def test_3655(self): - "3655 - output type handler: from BLOB to LONGRAW" - val = b"Some binary data" - self.__test_type_handler( - oracledb.DB_TYPE_BLOB, oracledb.DB_TYPE_LONG_RAW, val, val - ) - def test_3656(self): - "3656 - output type handler: from permanent BLOBs to LONG_RAW" - self.__test_type_handler_lob("BLOB", oracledb.DB_TYPE_LONG_RAW) +def test_3667(cursor): + "3667 - output type handler: from NVARCHAR to VARCHAR" + _test_type_handler( + cursor, + oracledb.DB_TYPE_NVARCHAR, + oracledb.DB_TYPE_VARCHAR, + "31.5", + "31.5", + ) - def test_3657(self): - "3657 - output type handler: from permanent BLOBs to RAW" - self.__test_type_handler_lob("BLOB", oracledb.DB_TYPE_RAW) - def test_3658(self): - "3658 - output type handler: from permanent CLOBs to VARCHAR" - self.__test_type_handler_lob("CLOB", oracledb.DB_TYPE_VARCHAR) +def test_3668(cursor): + "3668 - output type handler: from VARCHAR to NVARCHAR" + _test_type_handler( + cursor, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_NVARCHAR, + "31.5", + "31.5", + ) - def test_3659(self): - "3659 - output type handler: from permanent CLOBs to CHAR" - self.__test_type_handler_lob("CLOB", oracledb.DB_TYPE_CHAR) - def test_3660(self): - "3660 - output type handler: from permanent CLOBs to LONG" - self.__test_type_handler_lob("CLOB", oracledb.DB_TYPE_LONG) +def test_3669(cursor): + "3669 - output type handler: from NCHAR to CHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_NCHAR, oracledb.DB_TYPE_CHAR, "31.5", "31.5" + ) - def test_3661(self): - "3661 - output type handler: from NCLOB to CHAR" - val = "Some nclob data" - self.__test_type_handler( - oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_CHAR, val, val - ) - def test_3662(self): - "3662 - output type handler: from NCLOB to VARCHAR" - val = "Some nclob data" - self.__test_type_handler( - oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_VARCHAR, val, val - ) +def test_3670(cursor): + "3670 - output type handler: from CHAR to NCHAR" + _test_type_handler( + cursor, oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_NCHAR, "31.5", "31.5" + ) - def test_3663(self): - "3663 - output type handler: from NCLOB to LONG" - val = "Some nclob data" - self.__test_type_handler( - oracledb.DB_TYPE_NCLOB, oracledb.DB_TYPE_LONG, val, val - ) - def test_3664(self): - "3664 - output type handler: from permanent NCLOBs to VARCHAR" - self.__test_type_handler_lob("NCLOB", oracledb.DB_TYPE_VARCHAR) +def test_3671(cursor, test_env): + "3671 - execute raises an error if an incorrect arraysize is used" - def test_3665(self): - "3665 - output type handler: from permanent NCLOBs to CHAR" - self.__test_type_handler_lob("NCLOB", oracledb.DB_TYPE_CHAR) + def type_handler(cursor, metadata): + return cursor.var(str) - def test_3666(self): - "3666 - output type handler: from permanent NCLOBs to LONG" - self.__test_type_handler_lob("NCLOB", oracledb.DB_TYPE_LONG) + cursor.arraysize = 100 + cursor.outputtypehandler = type_handler + with test_env.assert_raises_full_code("DPY-2016"): + cursor.execute("select :1 from dual", [5]) - def test_3667(self): - "3667 - output type handler: from NVARCHAR to VARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_NVARCHAR, oracledb.DB_TYPE_VARCHAR, "31.5", "31.5" - ) - def test_3668(self): - "3668 - output type handler: from VARCHAR to NVARCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NVARCHAR, "31.5", "31.5" - ) +def test_3672(cursor, test_env): + "3672 - execute raises an error if a var is not returned" - def test_3669(self): - "3669 - output type handler: from NCHAR to CHAR" - self.__test_type_handler( - oracledb.DB_TYPE_NCHAR, oracledb.DB_TYPE_CHAR, "31.5", "31.5" - ) + def type_handler(cursor, metadata): + return "incorrect_return" - def test_3670(self): - "3670 - output type handler: from CHAR to NCHAR" - self.__test_type_handler( - oracledb.DB_TYPE_CHAR, oracledb.DB_TYPE_NCHAR, "31.5", "31.5" - ) + cursor.outputtypehandler = type_handler + with test_env.assert_raises_full_code("DPY-2015"): + cursor.execute("select :1 from dual", [5]) - def test_3671(self): - "3671 - execute raises an error if an incorrect arraysize is used" - def type_handler(cursor, metadata): - return cursor.var(str) +def test_3673(cursor): + "3673 - output type handler: from NUMBER to decimal.Decimal" + _test_type_handler( + cursor, + oracledb.DB_TYPE_NUMBER, + decimal.Decimal, + 31.5, + decimal.Decimal("31.5"), + ) + _test_type_handler( + cursor, + oracledb.DB_TYPE_NUMBER, + decimal.Decimal, + 0, + decimal.Decimal("0"), + ) - cursor = self.conn.cursor() - cursor.arraysize = 100 - cursor.outputtypehandler = type_handler - with self.assertRaisesFullCode("DPY-2016"): - cursor.execute("select :1 from dual", [5]) - def test_3672(self): - "3672 - execute raises an error if a var is not returned" +def test_3674(conn): + "3674 - use of output type handler does not affect description" - def type_handler(cursor, metadata): - return "incorrect_return" + def type_handler(cursor, metadata): + return cursor.var(str, arraysize=cursor.arraysize) - cursor = self.conn.cursor() + with conn.cursor() as cursor: + cursor.execute("select user from dual") + desc_before = cursor.description + with conn.cursor() as cursor: cursor.outputtypehandler = type_handler - with self.assertRaisesFullCode("DPY-2015"): - cursor.execute("select :1 from dual", [5]) - - def test_3673(self): - "3673 - output type handler: from NUMBER to decimal.Decimal" - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, - decimal.Decimal, - 31.5, - decimal.Decimal("31.5"), - ) - self.__test_type_handler( - oracledb.DB_TYPE_NUMBER, decimal.Decimal, 0, decimal.Decimal("0") - ) - - def test_3674(self): - "3674 - use of output type handler does not affect description" - - def type_handler(cursor, metadata): - return cursor.var(str, arraysize=cursor.arraysize) - - with self.conn.cursor() as cursor: - cursor.execute("select user from dual") - desc_before = cursor.description - with self.conn.cursor() as cursor: - cursor.outputtypehandler = type_handler - cursor.execute("select user from dual") - self.assertEqual(cursor.description, desc_before) - - def test_3675(self): - "3675 - use the old signature for an output type handler" - - def type_handler(cursor, name, default_type, size, precision, scale): - return cursor.var(str, arraysize=cursor.arraysize) + cursor.execute("select user from dual") + assert cursor.description == desc_before - with self.conn.cursor() as cursor: - cursor.outputtypehandler = type_handler - cursor.execute("select 1 from dual") - self.assertEqual(cursor.fetchall(), [("1",)]) - def test_3676(self): - "3676 - re-execute query with second fetch returning no rows" - - self.cursor.execute("truncate table TestTempTable") - data = [(i + 1,) for i in range(5)] - self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - self.conn.commit() - - def type_handler_1(cursor, metadata): - return cursor.var( - str, - arraysize=cursor.arraysize, - outconverter=lambda x: f"_{x}_", - ) - - def type_handler_2(cursor, metadata): - return cursor.var( - str, - arraysize=cursor.arraysize, - outconverter=lambda x: f"={x}=", - ) - - self.cursor.outputtypehandler = type_handler_1 - self.cursor.arraysize = 6 - self.cursor.prefetchrows = 6 - sql = "select IntCol from TestTempTable where rownum <= :1" - self.cursor.execute(sql, [6]) - expected_value = [(f"_{x}_",) for x, in data] - self.assertEqual(self.cursor.fetchall(), expected_value) - - self.cursor.outputtypehandler = type_handler_2 - self.cursor.prefetchrows = 2 - self.cursor.arraysize = 2 - self.cursor.execute(sql, [0]) - self.assertEqual(self.cursor.fetchall(), []) - - def test_3677(self): - "3677 - output type handler: from BINARY_DOUBLE to VARCHAR" - str_value = "36.75" if self.conn.thin else "3.675E+001" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_DOUBLE, - oracledb.DB_TYPE_VARCHAR, - 36.75, - str_value, - ) - - def test_3678(self): - "3678 - output type handler: from BINARY_FLOAT to VARCHAR" - str_value = "16.25" if self.conn.thin else "1.625E+001" - self.__test_type_handler( - oracledb.DB_TYPE_BINARY_FLOAT, - oracledb.DB_TYPE_VARCHAR, - 16.25, - str_value, - ) +def test_3675(conn): + "3675 - use the old signature for an output type handler" + def type_handler(cursor, name, default_type, size, precision, scale): + return cursor.var(str, arraysize=cursor.arraysize) -if __name__ == "__main__": - test_env.run_test_cases() + with conn.cursor() as cursor: + cursor.outputtypehandler = type_handler + cursor.execute("select 1 from dual") + assert cursor.fetchall() == [("1",)] + + +def test_3676(conn, cursor): + "3676 - re-execute query with second fetch returning no rows" + + cursor.execute("truncate table TestTempTable") + data = [(i + 1,) for i in range(5)] + cursor.executemany("insert into TestTempTable (IntCol) values (:1)", data) + conn.commit() + + def type_handler_1(cursor, metadata): + return cursor.var( + str, + arraysize=cursor.arraysize, + outconverter=lambda x: f"_{x}_", + ) + + def type_handler_2(cursor, metadata): + return cursor.var( + str, + arraysize=cursor.arraysize, + outconverter=lambda x: f"={x}=", + ) + + cursor.outputtypehandler = type_handler_1 + cursor.arraysize = 6 + cursor.prefetchrows = 6 + sql = "select IntCol from TestTempTable where rownum <= :1" + cursor.execute(sql, [6]) + expected_value = [(f"_{x}_",) for x, in data] + assert cursor.fetchall() == expected_value + + cursor.outputtypehandler = type_handler_2 + cursor.prefetchrows = 2 + cursor.arraysize = 2 + cursor.execute(sql, [0]) + assert cursor.fetchall() == [] + + +def test_3677(conn, cursor): + "3677 - output type handler: from BINARY_DOUBLE to VARCHAR" + str_value = "36.75" if conn.thin else "3.675E+001" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_DOUBLE, + oracledb.DB_TYPE_VARCHAR, + 36.75, + str_value, + ) + + +def test_3678(conn, cursor): + "3678 - output type handler: from BINARY_FLOAT to VARCHAR" + str_value = "16.25" if conn.thin else "1.625E+001" + _test_type_handler( + cursor, + oracledb.DB_TYPE_BINARY_FLOAT, + oracledb.DB_TYPE_VARCHAR, + 16.25, + str_value, + ) diff --git a/tests/test_3700_var.py b/tests/test_3700_var.py index 91737697..410db980 100644 --- a/tests/test_3700_var.py +++ b/tests/test_3700_var.py @@ -30,509 +30,620 @@ import decimal import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def _test_positive_set_and_get( - self, var_type, value_to_set, expected_value, type_name=None +import pytest + + +def _test_positive_set_and_get( + cursor, test_env, var_type, value_to_set, expected_value, type_name=None +): + var = cursor.var(var_type, typename=type_name) + var.setvalue(0, value_to_set) + result = var.getvalue() + if isinstance(result, oracledb.LOB): + result = result.read() + elif isinstance(result, oracledb.DbObject): + result = test_env.get_db_object_as_plain_object(result) + if isinstance(expected_value, datetime.date) and not isinstance( + expected_value, datetime.datetime ): - var = self.cursor.var(var_type, typename=type_name) - var.setvalue(0, value_to_set) - result = var.getvalue() - if isinstance(result, oracledb.LOB): - result = result.read() - elif isinstance(result, oracledb.DbObject): - result = self.get_db_object_as_plain_object(result) - if isinstance(expected_value, datetime.date) and not isinstance( - expected_value, datetime.datetime - ): - if isinstance(result, datetime.datetime): - result = result.date() - self.assertEqual(type(result), type(expected_value)) - self.assertEqual(result, expected_value) - - def _test_negative_set_and_get( - self, var_type, value_to_set, type_name=None - ): - var = self.cursor.var(var_type, typename=type_name) - self.assertRaises( - (TypeError, oracledb.DatabaseError), var.setvalue, 0, value_to_set - ) - - def test_3700(self): - "3700 - setting values on variables of type DB_TYPE_NUMBER" - self._test_positive_set_and_get(int, 5, 5) - self._test_positive_set_and_get(oracledb.DB_TYPE_NUMBER, 3.5, 3.5) - self._test_positive_set_and_get( - decimal.Decimal, decimal.Decimal("24.8"), decimal.Decimal("24.8") - ) - self._test_positive_set_and_get(int, True, 1) - self._test_positive_set_and_get(int, False, 0) - self._test_positive_set_and_get(oracledb.DB_TYPE_NUMBER, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_NUMBER, "abc") - - def test_3701(self): - "3701 - setting values on variables of type DB_TYPE_BINARY_INTEGER" - self._test_positive_set_and_get(oracledb.DB_TYPE_BINARY_INTEGER, 5, 5) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_INTEGER, 3.5, 3 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_INTEGER, decimal.Decimal("24.8"), 24 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_INTEGER, True, 1 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_INTEGER, False, 0 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_INTEGER, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_BINARY_INTEGER, "abc") - - def test_3702(self): - "3702 - setting values on variables of type DB_TYPE_VARCHAR" - value = "A VARCHAR string" - self._test_positive_set_and_get(oracledb.DB_TYPE_VARCHAR, value, value) - value = b"A raw string for VARCHAR" - self._test_positive_set_and_get( - oracledb.DB_TYPE_VARCHAR, value, value.decode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_VARCHAR, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_VARCHAR, 5) - - def test_3703(self): - "3703 - setting values on variables of type DB_TYPE_NVARCHAR" - value = "A NVARCHAR string" - self._test_positive_set_and_get( - oracledb.DB_TYPE_NVARCHAR, value, value - ) - value = b"A raw string for NVARCHAR" - self._test_positive_set_and_get( - oracledb.DB_TYPE_NVARCHAR, value, value.decode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_NVARCHAR, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_NVARCHAR, 5) - - def test_3704(self): - "3704 - setting values on variables of type DB_TYPE_CHAR" - value = "A CHAR string" - self._test_positive_set_and_get(oracledb.DB_TYPE_CHAR, value, value) - value = b"A raw string for CHAR" - self._test_positive_set_and_get( - oracledb.DB_TYPE_CHAR, value, value.decode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_CHAR, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_CHAR, 5) - - def test_3705(self): - "3705 - setting values on variables of type DB_TYPE_NCHAR" - value = "A NCHAR string" - self._test_positive_set_and_get(oracledb.DB_TYPE_NCHAR, value, value) - value = b"A raw string for NCHAR" - self._test_positive_set_and_get( - oracledb.DB_TYPE_CHAR, value, value.decode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_NCHAR, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_NCHAR, 5) - - def test_3706(self): - "3706 - setting values on variables of type DB_TYPE_LONG" - value = "Long Data" * 15000 - self._test_positive_set_and_get(oracledb.DB_TYPE_LONG, value, value) - value = b"Raw data for LONG" * 15000 - self._test_positive_set_and_get( - oracledb.DB_TYPE_LONG, value, value.decode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_LONG, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_LONG, 5) - - def test_3707(self): - "3707 - setting values on variables of type DB_TYPE_RAW" - value = b"Raw Data" - self._test_positive_set_and_get(oracledb.DB_TYPE_RAW, value, value) - value = "String data for RAW" - self._test_positive_set_and_get( - oracledb.DB_TYPE_RAW, value, value.encode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_RAW, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_RAW, 5) - - def test_3708(self): - "3708 - setting values on variables of type DB_TYPE_LONG_RAW" - value = b"Long Raw Data" * 15000 - self._test_positive_set_and_get( - oracledb.DB_TYPE_LONG_RAW, value, value - ) - value = "String data for LONG RAW" * 15000 - self._test_positive_set_and_get( - oracledb.DB_TYPE_LONG_RAW, value, value.encode() - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_LONG_RAW, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_LONG_RAW, 5) - - def test_3709(self): - "3709 - setting values on variables of type DB_TYPE_DATE" - value = datetime.date(2017, 5, 6) - self._test_positive_set_and_get(oracledb.DB_TYPE_DATE, value, value) - value = datetime.datetime(2017, 5, 6, 9, 36, 0) - self._test_positive_set_and_get(oracledb.DB_TYPE_DATE, value, value) - self._test_positive_set_and_get(oracledb.DB_TYPE_DATE, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_DATE, 5) - - def test_3710(self): - "3710 - setting values on variables of type DB_TYPE_TIMESTAMP" - value = datetime.date(2017, 5, 6) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP, value, value - ) - value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP, value, value - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_TIMESTAMP, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_TIMESTAMP, 5) - - def test_3711(self): - "3711 - setting values on variables of type DB_TYPE_TIMESTAMP_TZ" - value = datetime.date(2017, 5, 6) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_TZ, value, value - ) - value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_TZ, value, value - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_TZ, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_TIMESTAMP_TZ, 5) - - def test_3712(self): - "3712 - setting values on variables of type DB_TYPE_TIMESTAMP_LTZ" - value = datetime.date(2017, 5, 6) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_LTZ, value, value - ) - value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_LTZ, value, value - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_TIMESTAMP_LTZ, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_TIMESTAMP_LTZ, 5) - - def test_3713(self): - "3713 - setting values on variables of type DB_TYPE_BLOB" - value = b"Short temp BLOB value" - temp_blob = self.conn.createlob(oracledb.DB_TYPE_BLOB) - temp_blob.write(value) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BLOB, temp_blob, value - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_CLOB, temp_blob) - self._test_negative_set_and_get(oracledb.DB_TYPE_NCLOB, temp_blob) - value = b"Short BLOB value" - self._test_positive_set_and_get(oracledb.DB_TYPE_BLOB, value, value) - self._test_positive_set_and_get(oracledb.DB_TYPE_BLOB, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_BLOB, 5) - - def test_3714(self): - "3714 - setting values on variables of type DB_TYPE_CLOB" - value = "Short temp CLOB value" - temp_clob = self.conn.createlob(oracledb.DB_TYPE_CLOB) - temp_clob.write(value) - self._test_positive_set_and_get( - oracledb.DB_TYPE_CLOB, temp_clob, value - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_BLOB, temp_clob) - self._test_negative_set_and_get(oracledb.DB_TYPE_NCLOB, temp_clob) - value = "Short CLOB value" - self._test_positive_set_and_get(oracledb.DB_TYPE_CLOB, value, value) - self._test_positive_set_and_get(oracledb.DB_TYPE_CLOB, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_CLOB, 5) - - def test_3715(self): - "3715 - setting values on variables of type DB_TYPE_NCLOB" - value = "Short temp NCLOB value" - temp_nclob = self.conn.createlob(oracledb.DB_TYPE_NCLOB) - temp_nclob.write(value) - self._test_positive_set_and_get( - oracledb.DB_TYPE_NCLOB, temp_nclob, value - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_BLOB, temp_nclob) - self._test_negative_set_and_get(oracledb.DB_TYPE_CLOB, temp_nclob) - value = "Short NCLOB Value" - self._test_positive_set_and_get(oracledb.DB_TYPE_NCLOB, value, value) - self._test_positive_set_and_get(oracledb.DB_TYPE_NCLOB, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_NCLOB, 5) - - def test_3716(self): - "3716 - setting values on variables of type DB_TYPE_BINARY_FLOAT" - self._test_positive_set_and_get(oracledb.DB_TYPE_BINARY_FLOAT, 5, 5.0) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_FLOAT, 3.5, 3.5 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_FLOAT, decimal.Decimal("24.5"), 24.5 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_FLOAT, True, 1.0 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_FLOAT, False, 0.0 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_FLOAT, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_BINARY_FLOAT, "abc") - - def test_3717(self): - "3717 - setting values on variables of type DB_TYPE_BINARY_DOUBLE" - self._test_positive_set_and_get(oracledb.DB_TYPE_BINARY_DOUBLE, 5, 5.0) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_DOUBLE, 3.5, 3.5 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_DOUBLE, decimal.Decimal("192.125"), 192.125 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_DOUBLE, True, 1.0 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_DOUBLE, False, 0.0 - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BINARY_DOUBLE, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_BINARY_DOUBLE, "abc") - - def test_3718(self): - "3718 - setting values on variables of type DB_TYPE_BOOLEAN" - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, 5, True) - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, 2.0, True) - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, "abc", True) - self._test_positive_set_and_get( - oracledb.DB_TYPE_BOOLEAN, decimal.Decimal("24.8"), True - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, 0.0, False) - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, 0, False) - self._test_positive_set_and_get(oracledb.DB_TYPE_BOOLEAN, None, None) - - def test_3719(self): - "3719 - setting values on variables of type DB_TYPE_INTERVAL_DS" - value = datetime.timedelta(days=5, seconds=56000, microseconds=123780) - self._test_positive_set_and_get( - oracledb.DB_TYPE_INTERVAL_DS, value, value - ) - self._test_positive_set_and_get( - oracledb.DB_TYPE_INTERVAL_DS, None, None - ) - self._test_negative_set_and_get(oracledb.DB_TYPE_INTERVAL_DS, 5) - - def test_3720(self): - "3720 - setting values on variables of type DB_TYPE_ROWID" - self._test_negative_set_and_get(oracledb.DB_TYPE_ROWID, 12345) - self._test_negative_set_and_get(oracledb.DB_TYPE_ROWID, "523lkhlf") - - def test_3721(self): - "3721 - setting values on variables of type DB_TYPE_OBJECT" - obj_type = self.conn.gettype("UDT_OBJECT") - obj = obj_type.newobject() - plain_obj = self.get_db_object_as_plain_object(obj) - self._test_positive_set_and_get( - oracledb.DB_TYPE_OBJECT, obj, plain_obj, "UDT_OBJECT" - ) - self._test_positive_set_and_get(obj_type, obj, plain_obj) - self._test_positive_set_and_get( - oracledb.DB_TYPE_OBJECT, None, None, "UDT_OBJECT" - ) - self._test_positive_set_and_get(obj_type, None, None) - self._test_negative_set_and_get( - oracledb.DB_TYPE_OBJECT, "abc", "UDT_OBJECT" - ) - self._test_negative_set_and_get( - oracledb.DB_TYPE_OBJECT, obj, "UDT_OBJECTARRAY" - ) - wrong_obj_type = self.conn.gettype("UDT_OBJECTARRAY") - self._test_negative_set_and_get(wrong_obj_type, obj) - - @test_env.skip_unless_native_json_supported() - def test_3722(self): - "3722 - setting values on variables of type DB_TYPE_JSON" - json_data = [ - 5, - 25.25, - decimal.Decimal("10.25"), - True, - False, - datetime.datetime(2017, 5, 6), - datetime.datetime(2017, 5, 6, 9, 36, 0, 300000), - datetime.timedelta(days=5, seconds=56000, microseconds=123780), - {}, - "String", - b"Some bytes", - {"keyA": 1, "KeyB": "Melbourne"}, - [], - [1, "A"], - {"name": None}, - {"name": "John"}, - {"age": 30}, - {"Permanent": True}, - { - "employee": { - "name": "John", - "age": 30, - "city": "Delhi", - "Parmanent": True, - } - }, - {"employees": ["John", "Matthew", "James"]}, - { - "employees": [ - {"employee1": {"name": "John", "city": "Delhi"}}, - {"employee2": {"name": "Matthew", "city": "Mumbai"}}, - {"employee3": {"name": "James", "city": "Bangalore"}}, - ] - }, - ] - self._test_positive_set_and_get( - oracledb.DB_TYPE_JSON, json_data, json_data - ) - self._test_positive_set_and_get(oracledb.DB_TYPE_JSON, None, None) - - def test_3723(self): - "3723 - test setting values on variables of type DB_TYPE_CURSOR" - self._test_positive_set_and_get(oracledb.DB_TYPE_CURSOR, None, None) - self._test_negative_set_and_get(oracledb.DB_TYPE_CURSOR, 5) - - def test_3724(self): - "3724 - test fetching columns containing all null values" - self.cursor.execute( - """ - select null, to_char(null), to_number(null), to_date(null), - to_timestamp(null), to_clob(null), to_blob(null) - from dual - """ - ) - self.assertEqual( - self.cursor.fetchall(), - [(None, None, None, None, None, None, None)], - ) - - @test_env.skip_unless_thin_mode() - def test_3725(self): - "3725 - setting values on variables of type DB_TYPE_UROWID" - self._test_negative_set_and_get(oracledb.DB_TYPE_UROWID, 12345) - self._test_negative_set_and_get(oracledb.DB_TYPE_UROWID, "523lkhlf") - - def test_3726(self): - "3726 - getting value with an specific index" - var = self.cursor.var(oracledb.DB_TYPE_NUMBER, 1000, 2) - var.setvalue(0, 10) - self.assertEqual(var.getvalue(0), 10) - self.assertIsNone(var.getvalue(1)) - self.assertRaises(IndexError, var.getvalue, 4) - - def test_3727(self): - "3727 - getting buffer_size attribute" - test_values = [ - (oracledb.DB_TYPE_NUMBER, 200, 22), - (oracledb.DB_TYPE_VARCHAR, 3000, 12000), - (oracledb.DB_TYPE_RAW, 4000, 4000), - (oracledb.DB_TYPE_NCHAR, 1000, 4000), - (oracledb.DB_TYPE_CHAR, 2000, 8000), - ] - for typ, size, buffer_size in test_values: - var = self.cursor.var(typ, size) - self.assertEqual(var.buffer_size, buffer_size) - - def test_3728(self): - "3728 - getting actual elements" - array_size = 8 - var = self.cursor.var(oracledb.DB_TYPE_NUMBER, arraysize=array_size) - self.assertEqual(var.actual_elements, array_size) - self.assertEqual(var.actual_elements, var.num_elements) - - def test_3729(self): - "3729 - test deprecated attributes" - var = self.cursor.var(oracledb.DB_TYPE_NUMBER, arraysize=200) - self.assertEqual(var.bufferSize, 22) - self.assertEqual(var.actualElements, 200) - self.assertEqual(var.numElements, 200) - - def test_3730(self): - "3730 - test calling of outconverter with null values" - - def type_handler(cursor, metadata): - return cursor.var( - metadata.type_code, - outconverter=lambda v: f"|{v}|" if v else "", - convert_nulls=True, - arraysize=cursor.arraysize, - ) - - self.cursor.outputtypehandler = type_handler - self.cursor.execute( - """ - select 'First - A', 'First - B' - from dual - union all - select 'Second - A', null - from dual - union all - select null, 'Third - B' - from dual - """ - ) - rows = self.cursor.fetchall() - expected_rows = [ - ("|First - A|", "|First - B|"), - ("|Second - A|", ""), - ("", "|Third - B|"), - ] - self.assertEqual(rows, expected_rows) - - def test_3731(self): - "3731 - test getting convert_nulls" - for convert_nulls in [True, False]: - simple_var = self.cursor.var(str, convert_nulls=convert_nulls) - self.assertEqual(simple_var.convert_nulls, convert_nulls) - - def test_3732(self): - "3732 - test encoding_errors" - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - str_value = "Я" - replacement_char = "�" - invalid_bytes = str_value.encode("windows-1251") - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, utl_raw.cast_to_varchar2(:1)) - """, - [invalid_bytes], - ) - self.conn.commit() - - for arg_name in ["encoding_errors", "encodingErrors"]: - with self.subTest(arg_name=arg_name): - - def type_handler(cursor, fetch_info): - args = dict(arraysize=cursor.arraysize) - args[arg_name] = "replace" - return cursor.var(fetch_info.type_code, **args) - - with self.conn.cursor() as cursor: - cursor.outputtypehandler = type_handler - cursor.execute("select StringCol1 from TestTempTable") - (fetched_value,) = cursor.fetchone() - self.assertEqual(fetched_value, replacement_char) - - -if __name__ == "__main__": - test_env.run_test_cases() + if isinstance(result, datetime.datetime): + result = result.date() + assert type(result) == type(expected_value) + assert result == expected_value + + +def _test_negative_set_and_get(cursor, var_type, value_to_set, type_name=None): + var = cursor.var(var_type, typename=type_name) + pytest.raises( + (TypeError, oracledb.DatabaseError), var.setvalue, 0, value_to_set + ) + + +def test_3700(cursor, test_env): + "3700 - setting values on variables of type DB_TYPE_NUMBER" + _test_positive_set_and_get(cursor, test_env, int, 5, 5) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NUMBER, 3.5, 3.5 + ) + _test_positive_set_and_get( + cursor, + test_env, + decimal.Decimal, + decimal.Decimal("24.8"), + decimal.Decimal("24.8"), + ) + _test_positive_set_and_get(cursor, test_env, int, True, 1) + _test_positive_set_and_get(cursor, test_env, int, False, 0) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NUMBER, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NUMBER, "abc") + + +def test_3701(cursor, test_env): + "3701 - setting values on variables of type DB_TYPE_BINARY_INTEGER" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_INTEGER, 5, 5 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_INTEGER, 3.5, 3 + ) + _test_positive_set_and_get( + cursor, + test_env, + oracledb.DB_TYPE_BINARY_INTEGER, + decimal.Decimal("24.8"), + 24, + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_INTEGER, True, 1 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_INTEGER, False, 0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_INTEGER, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BINARY_INTEGER, "abc") + + +def test_3702(cursor, test_env): + "3702 - setting values on variables of type DB_TYPE_VARCHAR" + value = "A VARCHAR string" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_VARCHAR, value, value + ) + value = b"A raw string for VARCHAR" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_VARCHAR, value, value.decode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_VARCHAR, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_VARCHAR, 5) + + +def test_3703(cursor, test_env): + "3703 - setting values on variables of type DB_TYPE_NVARCHAR" + value = "A NVARCHAR string" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NVARCHAR, value, value + ) + value = b"A raw string for NVARCHAR" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NVARCHAR, value, value.decode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NVARCHAR, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NVARCHAR, 5) + + +def test_3704(cursor, test_env): + "3704 - setting values on variables of type DB_TYPE_CHAR" + value = "A CHAR string" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CHAR, value, value + ) + value = b"A raw string for CHAR" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CHAR, value, value.decode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CHAR, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_CHAR, 5) + + +def test_3705(cursor, test_env): + "3705 - setting values on variables of type DB_TYPE_NCHAR" + value = "A NCHAR string" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NCHAR, value, value + ) + value = b"A raw string for NCHAR" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CHAR, value, value.decode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NCHAR, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NCHAR, 5) + + +def test_3706(cursor, test_env): + "3706 - setting values on variables of type DB_TYPE_LONG" + value = "Long Data" * 15000 + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG, value, value + ) + value = b"Raw data for LONG" * 15000 + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG, value, value.decode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_LONG, 5) + + +def test_3707(cursor, test_env): + "3707 - setting values on variables of type DB_TYPE_RAW" + value = b"Raw Data" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_RAW, value, value + ) + value = "String data for RAW" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_RAW, value, value.encode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_RAW, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_RAW, 5) + + +def test_3708(cursor, test_env): + "3708 - setting values on variables of type DB_TYPE_LONG_RAW" + value = b"Long Raw Data" * 15000 + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG_RAW, value, value + ) + value = "String data for LONG RAW" * 15000 + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG_RAW, value, value.encode() + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_LONG_RAW, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_LONG_RAW, 5) + + +def test_3709(cursor, test_env): + "3709 - setting values on variables of type DB_TYPE_DATE" + value = datetime.date(2017, 5, 6) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_DATE, value, value + ) + value = datetime.datetime(2017, 5, 6, 9, 36, 0) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_DATE, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_DATE, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_DATE, 5) + + +def test_3710(cursor, test_env): + "3710 - setting values on variables of type DB_TYPE_TIMESTAMP" + value = datetime.date(2017, 5, 6) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP, value, value + ) + value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_TIMESTAMP, 5) + + +def test_3711(cursor, test_env): + "3711 - setting values on variables of type DB_TYPE_TIMESTAMP_TZ" + value = datetime.date(2017, 5, 6) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_TZ, value, value + ) + value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_TZ, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_TZ, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_TIMESTAMP_TZ, 5) + + +def test_3712(cursor, test_env): + "3712 - setting values on variables of type DB_TYPE_TIMESTAMP_LTZ" + value = datetime.date(2017, 5, 6) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_LTZ, value, value + ) + value = datetime.datetime(2017, 5, 6, 9, 36, 0, 300000) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_LTZ, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_TIMESTAMP_LTZ, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_TIMESTAMP_LTZ, 5) + + +def test_3713(conn, cursor, test_env): + "3713 - setting values on variables of type DB_TYPE_BLOB" + value = b"Short temp BLOB value" + temp_blob = conn.createlob(oracledb.DB_TYPE_BLOB) + temp_blob.write(value) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BLOB, temp_blob, value + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_CLOB, temp_blob) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NCLOB, temp_blob) + value = b"Short BLOB value" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BLOB, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BLOB, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BLOB, 5) + + +def test_3714(conn, cursor, test_env): + "3714 - setting values on variables of type DB_TYPE_CLOB" + value = "Short temp CLOB value" + temp_clob = conn.createlob(oracledb.DB_TYPE_CLOB) + temp_clob.write(value) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CLOB, temp_clob, value + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BLOB, temp_clob) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NCLOB, temp_clob) + value = "Short CLOB value" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CLOB, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CLOB, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_CLOB, 5) + + +def test_3715(conn, cursor, test_env): + "3715 - setting values on variables of type DB_TYPE_NCLOB" + value = "Short temp NCLOB value" + temp_nclob = conn.createlob(oracledb.DB_TYPE_NCLOB) + temp_nclob.write(value) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NCLOB, temp_nclob, value + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BLOB, temp_nclob) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_CLOB, temp_nclob) + value = "Short NCLOB Value" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NCLOB, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_NCLOB, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_NCLOB, 5) + + +def test_3716(cursor, test_env): + "3716 - setting values on variables of type DB_TYPE_BINARY_FLOAT" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_FLOAT, 5, 5.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_FLOAT, 3.5, 3.5 + ) + _test_positive_set_and_get( + cursor, + test_env, + oracledb.DB_TYPE_BINARY_FLOAT, + decimal.Decimal("24.5"), + 24.5, + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_FLOAT, True, 1.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_FLOAT, False, 0.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_FLOAT, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BINARY_FLOAT, "abc") + + +def test_3717(cursor, test_env): + "3717 - setting values on variables of type DB_TYPE_BINARY_DOUBLE" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_DOUBLE, 5, 5.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_DOUBLE, 3.5, 3.5 + ) + _test_positive_set_and_get( + cursor, + test_env, + oracledb.DB_TYPE_BINARY_DOUBLE, + decimal.Decimal("192.125"), + 192.125, + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_DOUBLE, True, 1.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_DOUBLE, False, 0.0 + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BINARY_DOUBLE, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_BINARY_DOUBLE, "abc") + + +def test_3718(cursor, test_env): + "3718 - setting values on variables of type DB_TYPE_BOOLEAN" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, 5, True + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, 2.0, True + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, "abc", True + ) + _test_positive_set_and_get( + cursor, + test_env, + oracledb.DB_TYPE_BOOLEAN, + decimal.Decimal("24.8"), + True, + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, 0.0, False + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, 0, False + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_BOOLEAN, None, None + ) + + +def test_3719(cursor, test_env): + "3719 - setting values on variables of type DB_TYPE_INTERVAL_DS" + value = datetime.timedelta(days=5, seconds=56000, microseconds=123780) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_INTERVAL_DS, value, value + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_INTERVAL_DS, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_INTERVAL_DS, 5) + + +def test_3720(cursor, test_env): + "3720 - setting values on variables of type DB_TYPE_ROWID" + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_ROWID, 12345) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_ROWID, "523lkhlf") + + +def test_3721(conn, cursor, test_env): + "3721 - setting values on variables of type DB_TYPE_OBJECT" + obj_type = conn.gettype("UDT_OBJECT") + obj = obj_type.newobject() + plain_obj = test_env.get_db_object_as_plain_object(obj) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_OBJECT, obj, plain_obj, "UDT_OBJECT" + ) + _test_positive_set_and_get(cursor, test_env, obj_type, obj, plain_obj) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_OBJECT, None, None, "UDT_OBJECT" + ) + _test_positive_set_and_get(cursor, test_env, obj_type, None, None) + _test_negative_set_and_get( + cursor, oracledb.DB_TYPE_OBJECT, "abc", "UDT_OBJECT" + ) + _test_negative_set_and_get( + cursor, oracledb.DB_TYPE_OBJECT, obj, "UDT_OBJECTARRAY" + ) + wrong_obj_type = conn.gettype("UDT_OBJECTARRAY") + _test_negative_set_and_get(cursor, wrong_obj_type, obj) + + +def test_3722(skip_unless_native_json_supported, cursor, test_env): + "3722 - setting values on variables of type DB_TYPE_JSON" + json_data = [ + 5, + 25.25, + decimal.Decimal("10.25"), + True, + False, + datetime.datetime(2017, 5, 6), + datetime.datetime(2017, 5, 6, 9, 36, 0, 300000), + datetime.timedelta(days=5, seconds=56000, microseconds=123780), + {}, + "String", + b"Some bytes", + {"keyA": 1, "KeyB": "Melbourne"}, + [], + [1, "A"], + {"name": None}, + {"name": "John"}, + {"age": 30}, + {"Permanent": True}, + { + "employee": { + "name": "John", + "age": 30, + "city": "Delhi", + "Parmanent": True, + } + }, + {"employees": ["John", "Matthew", "James"]}, + { + "employees": [ + {"employee1": {"name": "John", "city": "Delhi"}}, + {"employee2": {"name": "Matthew", "city": "Mumbai"}}, + {"employee3": {"name": "James", "city": "Bangalore"}}, + ] + }, + ] + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_JSON, json_data, json_data + ) + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_JSON, None, None + ) + + +def test_3723(cursor, test_env): + "3723 - test setting values on variables of type DB_TYPE_CURSOR" + _test_positive_set_and_get( + cursor, test_env, oracledb.DB_TYPE_CURSOR, None, None + ) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_CURSOR, 5) + + +def test_3724(cursor): + "3724 - test fetching columns containing all null values" + cursor.execute( + """ + select null, to_char(null), to_number(null), to_date(null), + to_timestamp(null), to_clob(null), to_blob(null) + from dual + """ + ) + assert cursor.fetchall() == [(None, None, None, None, None, None, None)] + + +def test_3725(skip_unless_thin_mode, cursor, test_env): + "3725 - setting values on variables of type DB_TYPE_UROWID" + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_UROWID, 12345) + _test_negative_set_and_get(cursor, oracledb.DB_TYPE_UROWID, "523lkhlf") + + +def test_3726(cursor): + "3726 - getting value with an specific index" + var = cursor.var(oracledb.DB_TYPE_NUMBER, 1000, 2) + var.setvalue(0, 10) + assert var.getvalue(0) == 10 + assert var.getvalue(1) is None + pytest.raises(IndexError, var.getvalue, 4) + + +def test_3727(cursor): + "3727 - getting buffer_size attribute" + test_values = [ + (oracledb.DB_TYPE_NUMBER, 200, 22), + (oracledb.DB_TYPE_VARCHAR, 3000, 12000), + (oracledb.DB_TYPE_RAW, 4000, 4000), + (oracledb.DB_TYPE_NCHAR, 1000, 4000), + (oracledb.DB_TYPE_CHAR, 2000, 8000), + ] + for typ, size, buffer_size in test_values: + var = cursor.var(typ, size) + assert var.buffer_size == buffer_size + + +def test_3728(cursor): + "3728 - getting actual elements" + array_size = 8 + var = cursor.var(oracledb.DB_TYPE_NUMBER, arraysize=array_size) + assert var.actual_elements == array_size + assert var.actual_elements == var.num_elements + + +def test_3729(cursor): + "3729 - test deprecated attributes" + var = cursor.var(oracledb.DB_TYPE_NUMBER, arraysize=200) + assert var.bufferSize == 22 + assert var.actualElements == 200 + assert var.numElements == 200 + + +def test_3730(cursor): + "3730 - test calling of outconverter with null values" + + def type_handler(cursor, metadata): + return cursor.var( + metadata.type_code, + outconverter=lambda v: f"|{v}|" if v else "", + convert_nulls=True, + arraysize=cursor.arraysize, + ) + + cursor.outputtypehandler = type_handler + cursor.execute( + """ + select 'First - A', 'First - B' + from dual + union all + select 'Second - A', null + from dual + union all + select null, 'Third - B' + from dual + """ + ) + rows = cursor.fetchall() + expected_rows = [ + ("|First - A|", "|First - B|"), + ("|Second - A|", ""), + ("", "|Third - B|"), + ] + assert rows == expected_rows + + +def test_3731(cursor): + "3731 - test getting convert_nulls" + for convert_nulls in [True, False]: + simple_var = cursor.var(str, convert_nulls=convert_nulls) + assert simple_var.convert_nulls == convert_nulls + + +def test_3732(conn, cursor, test_env): + "3732 - test encoding_errors" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + str_value = "Я" + replacement_char = "�" + invalid_bytes = str_value.encode("windows-1251") + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, utl_raw.cast_to_varchar2(:1)) + """, + [invalid_bytes], + ) + conn.commit() + + for arg_name in ["encoding_errors", "encodingErrors"]: + + def type_handler(cursor, fetch_info): + args = dict(arraysize=cursor.arraysize) + args[arg_name] = "replace" + return cursor.var(fetch_info.type_code, **args) + + with conn.cursor() as cursor: + cursor.outputtypehandler = type_handler + cursor.execute("select StringCol1 from TestTempTable") + (fetched_value,) = cursor.fetchone() + assert fetched_value == replacement_char diff --git a/tests/test_3800_typehandler.py b/tests/test_3800_typehandler.py index d6faf521..f783f33f 100644 --- a/tests/test_3800_typehandler.py +++ b/tests/test_3800_typehandler.py @@ -30,7 +30,7 @@ import json import oracledb -import test_env +import pytest class Building: @@ -60,262 +60,259 @@ def from_json(cls, value): return cls(**result) -class TestCase(test_env.BaseTestCase): - def building_in_converter(self, value): - return value.to_json() +def building_in_converter(value): + return value.to_json() - def input_type_handler(self, cursor, value, num_elements): - if isinstance(value, Building): - return cursor.var( - oracledb.STRING, - arraysize=num_elements, - inconverter=self.building_in_converter, - ) - def output_type_handler(self, cursor, metadata): +def input_type_handler(cursor, value, num_elements): + if isinstance(value, Building): + return cursor.var( + oracledb.STRING, + arraysize=num_elements, + inconverter=building_in_converter, + ) + + +def output_type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + return cursor.var( + metadata.type_code, + arraysize=cursor.arraysize, + outconverter=Building.from_json, + ) + + +def test_3800(cursor, test_env): + "3800 - binding unsupported python object without input type handler" + cursor.execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + building = Building(1, "The First Building", 5) + with test_env.assert_raises_full_code("DPY-3002"): + cursor.execute(sql, [building.building_id, building]) + + +def test_3801(cursor): + "3801 - not callable input type handler" + cursor.execute("truncate table TestTempTable") + building = Building(1, "The First Building", 5) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + cursor.inputtypehandler = 5 + assert cursor.inputtypehandler == 5 + with pytest.raises(TypeError): + cursor.execute(sql, (building.building_id, building)) + + +def test_3802(conn, cursor): + "3802 - binding unsupported python object with input type handler" + cursor.execute("truncate table TestTempTable") + building = Building(1, "The First Building", 5) + cursor.inputtypehandler = input_type_handler + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [building.building_id, building], + ) + assert cursor.bindvars[1].inconverter == building_in_converter + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [(building.building_id, building.to_json())] + + +def test_3803(conn, cursor, test_env): + "3803 - input type handler and output type handler on cursor level" + cursor.execute("truncate table TestTempTable") + building_one = Building(1, "The First Building", 5) + building_two = Building(2, "The Second Building", 87) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + cursor_one = conn.cursor() + cursor_two = conn.cursor() + cursor_one.inputtypehandler = input_type_handler + cursor_one.execute(sql, [building_one.building_id, building_one]) + conn.commit() + + cursor_one.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor_one.fetchall() == [ + (building_one.building_id, building_one.to_json()) + ] + with test_env.assert_raises_full_code("DPY-3002"): + cursor_two.execute( + sql, + (building_two.building_id, building_two), + ) + + cursor_two.outputtypehandler = output_type_handler + cursor_two.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor_two.fetchall() == [(building_one.building_id, building_one)] + + +def test_3804(cursor, test_env): + "3804 - input type handler and output type handler on connection level" + cursor.execute("truncate table TestTempTable") + building_one = Building(1, "The First Building", 5) + building_two = Building(2, "The Second Building", 87) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + conn = test_env.get_connection() + conn.inputtypehandler = input_type_handler + assert conn.inputtypehandler == input_type_handler + + cursor_one = conn.cursor() + cursor_two = conn.cursor() + cursor_one.execute(sql, [building_one.building_id, building_one]) + cursor_two.execute(sql, [building_two.building_id, building_two]) + conn.commit() + + expected_data = [ + (building_one.building_id, building_one), + (building_two.building_id, building_two), + ] + conn.outputtypehandler = output_type_handler + assert conn.outputtypehandler == output_type_handler + cursor_one.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor_one.fetchvars[1].outconverter == Building.from_json + assert cursor_one.fetchall() == expected_data + + cursor_two.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor_two.fetchall() == expected_data + other_cursor = cursor.connection.cursor() + with test_env.assert_raises_full_code("DPY-3002"): + other_cursor.execute(sql, (building_one.building_id, building_one)) + + +def test_3805(conn, cursor): + "3805 - output type handler with outconvert and null values" + cursor.execute("truncate table TestTempTable") + data_to_insert = [(1, "String 1"), (2, None), (3, "String 2")] + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data_to_insert, + ) + conn.commit() + + def converter(value): + return "CONVERTED" + + def output_type_handler(cursor, metadata): if metadata.type_code is oracledb.DB_TYPE_VARCHAR: return cursor.var( - metadata.type_code, - arraysize=cursor.arraysize, - outconverter=Building.from_json, + str, outconverter=converter, arraysize=cursor.arraysize ) - def test_3800(self): - "3800 - binding unsupported python object without input type handler" - self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - building = Building(1, "The First Building", 5) - with self.assertRaisesFullCode("DPY-3002"): - self.cursor.execute(sql, [building.building_id, building]) - - def test_3801(self): - "3801 - not callable input type handler" - self.cursor.execute("truncate table TestTempTable") - building = Building(1, "The First Building", 5) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - self.cursor.inputtypehandler = 5 - self.assertEqual(self.cursor.inputtypehandler, 5) - with self.assertRaises(TypeError): - self.cursor.execute(sql, (building.building_id, building)) - - def test_3802(self): - "3802 - binding unsupported python object with input type handler" - self.cursor.execute("truncate table TestTempTable") - building = Building(1, "The First Building", 5) - self.cursor.inputtypehandler = self.input_type_handler - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [building.building_id, building], - ) - self.assertEqual( - self.cursor.bindvars[1].inconverter, self.building_in_converter - ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual( - self.cursor.fetchall(), - [(building.building_id, building.to_json())], - ) + cursor.outputtypehandler = output_type_handler + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] + assert cursor.fetchall() == expected_data - def test_3803(self): - "3803 - input type handler and output type handler on cursor level" - self.cursor.execute("truncate table TestTempTable") - building_one = Building(1, "The First Building", 5) - building_two = Building(2, "The Second Building", 87) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - cursor_one = self.conn.cursor() - cursor_two = self.conn.cursor() - cursor_one.inputtypehandler = self.input_type_handler - cursor_one.execute(sql, [building_one.building_id, building_one]) - self.conn.commit() - - cursor_one.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual( - cursor_one.fetchall(), - [(building_one.building_id, building_one.to_json())], - ) - with self.assertRaisesFullCode("DPY-3002"): - cursor_two.execute( - sql, - (building_two.building_id, building_two), + +def test_3806(cursor, test_env): + "3806 - output type handler for fetching 21c JSON" + + def output_type_handler(cursor, metadata): + # fetch 21c JSON datatype when using python-oracledb thin mode + if metadata.type_code is oracledb.DB_TYPE_JSON: + return cursor.var( + str, arraysize=cursor.arraysize, outconverter=json.loads + ) + # if using Oracle Client version < 21, then database returns BLOB + # data type instead of JSON data type + elif metadata.type_code is oracledb.DB_TYPE_BLOB: + return cursor.var( + metadata.type, + arraysize=cursor.arraysize, + outconverter=lambda v: json.loads(v.read()), ) - cursor_two.outputtypehandler = self.output_type_handler - cursor_two.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual( - cursor_two.fetchall(), [(building_one.building_id, building_one)] - ) + # require a 21c+ database + if not test_env.has_server_version(21): + pytest.skip("unsupported database") + + cursor.execute("delete from TestJson") + insert_sql = "insert into TestJson values (:1, :2)" + json_data = [ + dict(name="John", city="Delhi"), + dict(name="George", city="Bangalore"), + dict(name="Sam", city="Mumbai"), + ] + data_to_insert = list(enumerate(json_data)) + if test_env.has_client_version(21): + # take advantage of direct binding + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.executemany(insert_sql, data_to_insert) + else: + # insert data as JSON string + json_string_data = [(i, json.dumps(j)) for i, j in data_to_insert] + cursor.executemany(insert_sql, json_string_data) + + if not test_env.has_client_version(21): + cursor.outputtypehandler = output_type_handler + cursor.execute("select * from TestJson") + assert cursor.fetchall() == data_to_insert + + +def test_3807(cursor, test_env): + "3807 - output type handler for encoding errors" + + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + + def output_type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + return cursor.var( + metadata.type_code, + arraysize=cursor.arraysize, + encoding_errors="replace", + ) - def test_3804(self): - "3804 - input type handler and output type handler on connection level" - self.cursor.execute("truncate table TestTempTable") - building_one = Building(1, "The First Building", 5) - building_two = Building(2, "The Second Building", 87) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - conn = test_env.get_connection() - conn.inputtypehandler = self.input_type_handler - self.assertEqual(conn.inputtypehandler, self.input_type_handler) - - cursor_one = conn.cursor() - cursor_two = conn.cursor() - cursor_one.execute(sql, [building_one.building_id, building_one]) - cursor_two.execute(sql, [building_two.building_id, building_two]) - conn.commit() - - expected_data = [ - (building_one.building_id, building_one), - (building_two.building_id, building_two), - ] - conn.outputtypehandler = self.output_type_handler - self.assertEqual(conn.outputtypehandler, self.output_type_handler) - cursor_one.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual( - cursor_one.fetchvars[1].outconverter, Building.from_json - ) - self.assertEqual(cursor_one.fetchall(), expected_data) - - cursor_two.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor_two.fetchall(), expected_data) - other_cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-3002"): - other_cursor.execute(sql, (building_one.building_id, building_one)) - - def test_3805(self): - "3805 - output type handler with outconvert and null values" - self.cursor.execute("truncate table TestTempTable") - data_to_insert = [(1, "String 1"), (2, None), (3, "String 2")] - self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data_to_insert, - ) - self.conn.commit() + cursor.outputtypehandler = output_type_handler + cursor.execute("select utl_raw.cast_to_varchar2('41ab42cd43ef') from dual") + (result,) = cursor.fetchone() + rc = chr(0xFFFD) + expected_result = f"A{rc}B{rc}C{rc}" + assert result == expected_result - def converter(value): - return "CONVERTED" - def output_type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_VARCHAR: - return cursor.var( - str, outconverter=converter, arraysize=cursor.arraysize - ) +def test_3808(conn): + "3808 - output type handler with object implementing __call__()" - self.cursor.outputtypehandler = output_type_handler - self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] - self.assertEqual(self.cursor.fetchall(), expected_data) + class TimestampOutputTypeHandler: - @test_env.skip_unless_native_json_supported() - def test_3806(self): - "3806 - output type handler for fetching 21c JSON" + def __init__(self, unit="s"): + if unit == "ms": + self.factor = 1000 + else: + self.factor = 1 - def output_type_handler(cursor, metadata): - # fetch 21c JSON datatype when using python-oracledb thin mode - if metadata.type_code is oracledb.DB_TYPE_JSON: - return cursor.var( - str, arraysize=cursor.arraysize, outconverter=json.loads - ) - # if using Oracle Client version < 21, then database returns BLOB - # data type instead of JSON data type - elif metadata.type_code is oracledb.DB_TYPE_BLOB: - return cursor.var( - metadata.type, - arraysize=cursor.arraysize, - outconverter=lambda v: json.loads(v.read()), - ) + def converter(self, d): + return int(d.timestamp() * self.factor) - self.cursor.execute("delete from TestJson") - insert_sql = "insert into TestJson values (:1, :2)" - json_data = [ - dict(name="John", city="Delhi"), - dict(name="George", city="Bangalore"), - dict(name="Sam", city="Mumbai"), - ] - data_to_insert = list(enumerate(json_data)) - if test_env.has_client_version(21): - # take advantage of direct binding - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.executemany(insert_sql, data_to_insert) - else: - # insert data as JSON string - json_string_data = [(i, json.dumps(j)) for i, j in data_to_insert] - self.cursor.executemany(insert_sql, json_string_data) - - if not test_env.has_client_version(21): - self.cursor.outputtypehandler = output_type_handler - self.cursor.execute("select * from TestJson") - self.assertEqual(self.cursor.fetchall(), data_to_insert) - - def test_3807(self): - "3807 - output type handler for encoding errors" - - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - - def output_type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + def __call__(self, cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_TIMESTAMP: return cursor.var( metadata.type_code, arraysize=cursor.arraysize, - encoding_errors="replace", + outconverter=self.converter, ) - self.cursor.outputtypehandler = output_type_handler - self.cursor.execute( - "select utl_raw.cast_to_varchar2('41ab42cd43ef') from dual" - ) - (result,) = self.cursor.fetchone() - rc = chr(0xFFFD) - expected_result = f"A{rc}B{rc}C{rc}" - self.assertEqual(result, expected_result) - - def test_3808(self): - "3808 - output type handler with object implementing __call__()" - - class TimestampOutputTypeHandler: - - def __init__(self, unit="s"): - if unit == "ms": - self.factor = 1000 - else: - self.factor = 1 - - def converter(self, d): - return int(d.timestamp() * self.factor) - - def __call__(self, cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_TIMESTAMP: - return cursor.var( - metadata.type_code, - arraysize=cursor.arraysize, - outconverter=self.converter, - ) - - d = datetime.datetime.today() - with self.conn.cursor() as cursor: - cursor.outputtypehandler = TimestampOutputTypeHandler("ms") - cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) - cursor.execute("select :d from dual", [d]) - (result,) = cursor.fetchone() - self.assertEqual(result, int(d.timestamp() * 1000)) - with self.conn.cursor() as cursor: - cursor.outputtypehandler = TimestampOutputTypeHandler("s") - cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) - cursor.execute("select :d from dual", [d]) - (result,) = cursor.fetchone() - self.assertEqual(result, int(d.timestamp())) - - -if __name__ == "__main__": - test_env.run_test_cases() + d = datetime.datetime.today() + with conn.cursor() as cursor: + cursor.outputtypehandler = TimestampOutputTypeHandler("ms") + cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) + cursor.execute("select :d from dual", [d]) + (result,) = cursor.fetchone() + assert result == int(d.timestamp() * 1000) + with conn.cursor() as cursor: + cursor.outputtypehandler = TimestampOutputTypeHandler("s") + cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) + cursor.execute("select :d from dual", [d]) + (result,) = cursor.fetchone() + assert result == int(d.timestamp()) diff --git a/tests/test_3900_cursor_execute.py b/tests/test_3900_cursor_execute.py index 0d18c4c9..83ea83e7 100644 --- a/tests/test_3900_cursor_execute.py +++ b/tests/test_3900_cursor_execute.py @@ -29,546 +29,557 @@ import collections import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_3900(self): - "3900 - test executing a statement without any arguments" - result = self.cursor.execute("begin null; end;") - self.assertIsNone(result) - - def test_3901(self): - "3901 - test executing a None statement with bind variables" - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-2001"): - cursor.execute(None, x=5) - - def test_3902(self): - "3902 - test executing a statement with args and empty keyword args" - simple_var = self.cursor.var(oracledb.NUMBER) - args = [simple_var] - kwargs = {} - result = self.cursor.execute("begin :1 := 25; end;", args, **kwargs) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 25) - - def test_3903(self): - "3903 - test executing a statement with keyword arguments" - simple_var = self.cursor.var(oracledb.NUMBER) - result = self.cursor.execute( - "begin :value := 5; end;", value=simple_var - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 5) - - def test_3904(self): - "3904 - test executing a statement with a dictionary argument" - simple_var = self.cursor.var(oracledb.NUMBER) - dict_arg = dict(value=simple_var) - result = self.cursor.execute("begin :value := 10; end;", dict_arg) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - def test_3905(self): - "3905 - test executing a statement with both a dict and keyword args" - simple_var = self.cursor.var(oracledb.NUMBER) - dict_arg = dict(value=simple_var) - with self.assertRaisesFullCode("DPY-2005"): - self.cursor.execute( - "begin :value := 15; end;", dict_arg, value=simple_var - ) - def test_3906(self): - "3906 - test executing a statement and then changing the array size" - self.cursor.execute("select IntCol from TestNumbers") - self.cursor.arraysize = 20 - self.assertEqual(len(self.cursor.fetchall()), 10) - - def test_3907(self): - "3907 - test that subsequent executes succeed after bad execute" - sql = "begin raise_application_error(-20000, 'this); end;" - with self.assertRaisesFullCode("DPY-2041"): - self.cursor.execute(sql) - self.cursor.execute("begin null; end;") - - def test_3908(self): - "3908 - test that subsequent fetches fail after bad execute" - with self.assertRaisesFullCode("ORA-00904"): - self.cursor.execute("select y from dual") - with self.assertRaisesFullCode("DPY-1003"): - self.cursor.fetchall() - - def test_3909(self): - "3909 - test executing a statement with an incorrect named bind" - sql = "select * from TestStrings where IntCol = :value" - with self.assertRaisesFullCode("DPY-4008", "ORA-01036"): - self.cursor.execute(sql, value2=3) - - def test_3910(self): - "3910 - test executing a statement with named binds" - result = self.cursor.execute( - """ - select * - from TestNumbers - where IntCol = :value1 and LongIntCol = :value2 - """, - value1=1, - value2=38, - ) - self.assertEqual(len(result.fetchall()), 1) - - def test_3911(self): - "3911 - test executing a statement with an incorrect positional bind" - sql = """ - select * - from TestNumbers - where IntCol = :value and LongIntCol = :value2""" - with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): - self.cursor.execute(sql, [3]) - - def test_3912(self): - "3912 - test executing a statement with positional binds" - result = self.cursor.execute( - """ + +def test_3900(cursor): + "3900 - test executing a statement without any arguments" + result = cursor.execute("begin null; end;") + assert result is None + + +def test_3901(conn, test_env): + "3901 - test executing a None statement with bind variables" + cursor = conn.cursor() + with test_env.assert_raises_full_code("DPY-2001"): + cursor.execute(None, x=5) + + +def test_3902(cursor): + "3902 - test executing a statement with args and empty keyword args" + simple_var = cursor.var(oracledb.NUMBER) + args = [simple_var] + kwargs = {} + result = cursor.execute("begin :1 := 25; end;", args, **kwargs) + assert result is None + assert simple_var.getvalue() == 25 + + +def test_3903(cursor): + "3903 - test executing a statement with keyword arguments" + simple_var = cursor.var(oracledb.NUMBER) + result = cursor.execute("begin :value := 5; end;", value=simple_var) + assert result is None + assert simple_var.getvalue() == 5 + + +def test_3904(cursor): + "3904 - test executing a statement with a dictionary argument" + simple_var = cursor.var(oracledb.NUMBER) + dict_arg = dict(value=simple_var) + result = cursor.execute("begin :value := 10; end;", dict_arg) + assert result is None + assert simple_var.getvalue() == 10 + + +def test_3905(cursor, test_env): + "3905 - test executing a statement with both a dict and keyword args" + simple_var = cursor.var(oracledb.NUMBER) + dict_arg = dict(value=simple_var) + with test_env.assert_raises_full_code("DPY-2005"): + cursor.execute("begin :value := 15; end;", dict_arg, value=simple_var) + + +def test_3906(cursor): + "3906 - test executing a statement and then changing the array size" + cursor.execute("select IntCol from TestNumbers") + cursor.arraysize = 20 + assert len(cursor.fetchall()) == 10 + + +def test_3907(cursor, test_env): + "3907 - test that subsequent executes succeed after bad execute" + sql = "begin raise_application_error(-20000, 'this); end;" + with test_env.assert_raises_full_code("DPY-2041"): + cursor.execute(sql) + cursor.execute("begin null; end;") + + +def test_3908(cursor, test_env): + "3908 - test that subsequent fetches fail after bad execute" + with test_env.assert_raises_full_code("ORA-00904"): + cursor.execute("select y from dual") + with test_env.assert_raises_full_code("DPY-1003"): + cursor.fetchall() + + +def test_3909(cursor, test_env): + "3909 - test executing a statement with an incorrect named bind" + sql = "select * from TestStrings where IntCol = :value" + with test_env.assert_raises_full_code("DPY-4008", "ORA-01036"): + cursor.execute(sql, value2=3) + + +def test_3910(cursor): + "3910 - test executing a statement with named binds" + result = cursor.execute( + """ + select * + from TestNumbers + where IntCol = :value1 and LongIntCol = :value2 + """, + value1=1, + value2=38, + ) + assert len(result.fetchall()) == 1 + + +def test_3911(cursor, test_env): + "3911 - test executing a statement with an incorrect positional bind" + sql = """ select * from TestNumbers - where IntCol = :value and LongIntCol = :value2 - """, - [1, 38], - ) - self.assertEqual(len(result.fetchall()), 1) - - def test_3913(self): - "3913 - test executing a statement after rebinding a named bind" - statement = "begin :value := :value2 + 5; end;" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var2 = self.cursor.var(oracledb.NUMBER) - simple_var2.setvalue(0, 5) - result = self.cursor.execute( - statement, value=simple_var, value2=simple_var2 - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - simple_var = self.cursor.var(oracledb.NATIVE_FLOAT) - simple_var2 = self.cursor.var(oracledb.NATIVE_FLOAT) - simple_var2.setvalue(0, 10) - result = self.cursor.execute( - statement, value=simple_var, value2=simple_var2 - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 15) - - def test_3914(self): - "3914 - test executing a PL/SQL statement with duplicate binds" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var.setvalue(0, 5) - result = self.cursor.execute( + where IntCol = :value and LongIntCol = :value2""" + with test_env.assert_raises_full_code("DPY-4009", "ORA-01008"): + cursor.execute(sql, [3]) + + +def test_3912(cursor): + "3912 - test executing a statement with positional binds" + result = cursor.execute( + """ + select * + from TestNumbers + where IntCol = :value and LongIntCol = :value2 + """, + [1, 38], + ) + assert len(result.fetchall()) == 1 + + +def test_3913(cursor): + "3913 - test executing a statement after rebinding a named bind" + statement = "begin :value := :value2 + 5; end;" + simple_var = cursor.var(oracledb.NUMBER) + simple_var2 = cursor.var(oracledb.NUMBER) + simple_var2.setvalue(0, 5) + result = cursor.execute(statement, value=simple_var, value2=simple_var2) + assert result is None + assert simple_var.getvalue() == 10 + + simple_var = cursor.var(oracledb.NATIVE_FLOAT) + simple_var2 = cursor.var(oracledb.NATIVE_FLOAT) + simple_var2.setvalue(0, 10) + result = cursor.execute(statement, value=simple_var, value2=simple_var2) + assert result is None + assert simple_var.getvalue() == 15 + + +def test_3914(cursor): + "3914 - test executing a PL/SQL statement with duplicate binds" + simple_var = cursor.var(oracledb.NUMBER) + simple_var.setvalue(0, 5) + result = cursor.execute( + """ + begin + :value := :value + 5; + end; + """, + value=simple_var, + ) + assert result is None + assert simple_var.getvalue() == 10 + + +def test_3915(cursor): + "3915 - test executing a PL/SQL statement with duplicate binds" + simple_var = cursor.var(oracledb.NUMBER) + simple_var.setvalue(0, 5) + cursor.execute("begin :value := :value + 5; end;", [simple_var]) + assert simple_var.getvalue() == 10 + + +def test_3916(cursor, test_env): + "3916 - test executing a statement with an incorrect number of binds" + statement = "begin :value := :value2 + 5; end;" + var = cursor.var(oracledb.NUMBER) + var.setvalue(0, 5) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + cursor.execute(statement) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + cursor.execute(statement, value=var) + with test_env.assert_raises_full_code("DPY-4008", "ORA-01036"): + cursor.execute(statement, value=var, value2=var, value3=var) + + +def test_3917(conn, cursor): + "3917 - change in size on subsequent binds does not use optimised path" + cursor.execute("truncate table TestTempTable") + data = [(1, "Test String #1"), (2, "ABC" * 100)] + for row in data: + cursor.execute( """ - begin - :value := :value + 5; - end; + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) """, - value=simple_var, + row, ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - def test_3915(self): - "3915 - test executing a PL/SQL statement with duplicate binds" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var.setvalue(0, 5) - self.cursor.execute("begin :value := :value + 5; end;", [simple_var]) - self.assertEqual(simple_var.getvalue(), 10) - - def test_3916(self): - "3916 - test executing a statement with an incorrect number of binds" - statement = "begin :value := :value2 + 5; end;" - var = self.cursor.var(oracledb.NUMBER) - var.setvalue(0, 5) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - self.cursor.execute(statement) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - self.cursor.execute(statement, value=var) - with self.assertRaisesFullCode("DPY-4008", "ORA-01036"): - self.cursor.execute(statement, value=var, value2=var, value3=var) - - def test_3917(self): - "3917 - change in size on subsequent binds does not use optimised path" - self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test String #1"), (2, "ABC" * 100)] - for row in data: - self.cursor.execute( + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == data + + +def test_3918(conn, cursor): + "3918 - test that dml can use optimised path" + data_to_insert = [(i + 1, f"Test String #{i + 1}") for i in range(3)] + cursor.execute("truncate table TestTempTable") + for row in data_to_insert: + with conn.cursor() as other_cursor: + other_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, row, ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(self.cursor.fetchall(), data) - - def test_3918(self): - "3918 - test that dml can use optimised path" - data_to_insert = [(i + 1, f"Test String #{i + 1}") for i in range(3)] - self.cursor.execute("truncate table TestTempTable") - for row in data_to_insert: - with self.conn.cursor() as cursor: - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - row, - ) - self.conn.commit() - self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(self.cursor.fetchall(), data_to_insert) - - def test_3919(self): - "3919 - test calling execute() with invalid parameters" - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - with self.assertRaisesFullCode("DPY-2003"): - self.cursor.execute(sql, "These are not valid parameters") - - def test_3920(self): - "3920 - test calling execute() with mixed binds" - self.cursor.execute("truncate table TestTempTable") - self.cursor.setinputsizes(None, None, str) - data = dict(val1=1, val2="Test String 1") - with self.assertRaisesFullCode("DPY-2006"): - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - returning StringCol1 into :out_var - """, - data, - ) - - def test_3921(self): - "3921 - test binding by name with double quotes" - data = {'"_value1"': 1, '"VaLue_2"': 2, '"3VALUE"': 3} - self.cursor.execute( - 'select :"_value1" + :"VaLue_2" + :"3VALUE" from dual', - data, - ) - (result,) = self.cursor.fetchone() - self.assertEqual(result, 6) - - def test_3922(self): - "3922 - test executing a statement with different input buffer sizes" - sql = """ - insert into TestTempTable (IntCol, StringCol1, StringCol2) - values (:int_col, :str_val1, :str_val2) returning IntCol - into :ret_data""" - values1 = {"int_col": 1, "str_val1": '{"a", "b"}', "str_val2": None} - values2 = {"int_col": 2, "str_val1": None, "str_val2": '{"a", "b"}'} - values3 = {"int_col": 3, "str_val1": '{"a"}', "str_val2": None} - - self.cursor.execute("truncate table TestTempTable") - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - self.cursor.execute(sql, values1) - self.assertEqual(ret_bind.values, [["1"]]) - - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - self.cursor.execute(sql, values2) - self.assertEqual(ret_bind.values, [["2"]]) - - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - self.cursor.execute(sql, values3) - self.assertEqual(ret_bind.values, [["3"]]) - - def test_3923(self): - "3923 - test using rowfactory" - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Test 1') - """ - ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - column_names = [col[0] for col in self.cursor.description] - - def rowfactory(*row): - return dict(zip(column_names, row)) - - self.cursor.rowfactory = rowfactory - self.assertEqual(self.cursor.rowfactory, rowfactory) - self.assertEqual( - self.cursor.fetchall(), [{"INTCOL": 1, "STRINGCOL1": "Test 1"}] - ) - - def test_3924(self): - "3924 - test executing same query after setting rowfactory" - self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test 1"), (2, "Test 2")] - self.cursor.executemany( + conn.commit() + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert cursor.fetchall() == data_to_insert + + +def test_3919(cursor, test_env): + "3919 - test calling execute() with invalid parameters" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + with test_env.assert_raises_full_code("DPY-2003"): + cursor.execute(sql, "These are not valid parameters") + + +def test_3920(cursor, test_env): + "3920 - test calling execute() with mixed binds" + cursor.execute("truncate table TestTempTable") + cursor.setinputsizes(None, None, str) + data = dict(val1=1, val2="Test String 1") + with test_env.assert_raises_full_code("DPY-2006"): + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) + returning StringCol1 into :out_var """, data, ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - column_names = [col[0] for col in self.cursor.description] - self.cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - results1 = self.cursor.fetchall() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - results2 = self.cursor.fetchall() - self.assertEqual(results1, results2) - - def test_3925(self): - "3925 - test executing different query after setting rowfactory" - self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test 1"), (2, "Test 2")] - self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - column_names = [col[0] for col in self.cursor.description] - self.cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - self.cursor.execute( - """ - select IntCol, StringCol - from TestSTrings - where IntCol between 1 and 3 order by IntCol - """ - ) - expected_data = [(1, "String 1"), (2, "String 2"), (3, "String 3")] - self.assertEqual(self.cursor.fetchall(), expected_data) - - def test_3926(self): - "3926 - test setting rowfactory on a REF cursor" - with self.conn.cursor() as cursor: - sql_function = "pkg_TestRefCursors.TestReturnCursor" - ref_cursor = cursor.callfunc( - sql_function, oracledb.DB_TYPE_CURSOR, [2] - ) - column_names = [col[0] for col in ref_cursor.description] - ref_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - expected_value = [ - {"INTCOL": 1, "STRINGCOL": "String 1"}, - {"INTCOL": 2, "STRINGCOL": "String 2"}, - ] - self.assertEqual(ref_cursor.fetchall(), expected_value) - - def test_3927(self): - "3927 - test using a subclassed string as bind parameter keys" - - class my_str(str): - pass - - self.cursor.execute("truncate table TestTempTable") - keys = {my_str("str_val"): oracledb.DB_TYPE_VARCHAR} - self.cursor.setinputsizes(**keys) - values = { - my_str("int_val"): 3927, - my_str("str_val"): "3927 - String Value", - } - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - """, - values, - ) - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual( - self.cursor.fetchall(), [(3927, "3927 - String Value")] - ) - - def test_3928(self): - "3928 - test using a sequence of parameters other than a list or tuple" - - class MySeq(collections.abc.Sequence): - def __init__(self, *data): - self.data = data - def __len__(self): - return len(self.data) - def __getitem__(self, index): - return self.data[index] - - values_to_insert = [MySeq(1, "String 1"), MySeq(2, "String 2")] - expected_data = [tuple(value) for value in values_to_insert] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - """, - values_to_insert, +def test_3921(cursor): + "3921 - test binding by name with double quotes" + data = {'"_value1"': 1, '"VaLue_2"': 2, '"3VALUE"': 3} + cursor.execute( + 'select :"_value1" + :"VaLue_2" + :"3VALUE" from dual', + data, + ) + (result,) = cursor.fetchone() + assert result == 6 + + +def test_3922(cursor): + "3922 - test executing a statement with different input buffer sizes" + sql = """ + insert into TestTempTable (IntCol, StringCol1, StringCol2) + values (:int_col, :str_val1, :str_val2) returning IntCol + into :ret_data""" + values1 = {"int_col": 1, "str_val1": '{"a", "b"}', "str_val2": None} + values2 = {"int_col": 2, "str_val1": None, "str_val2": '{"a", "b"}'} + values3 = {"int_col": 3, "str_val1": '{"a"}', "str_val2": None} + + cursor.execute("truncate table TestTempTable") + ret_bind = cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + cursor.setinputsizes(ret_data=ret_bind) + cursor.execute(sql, values1) + assert ret_bind.values == [["1"]] + + ret_bind = cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + cursor.setinputsizes(ret_data=ret_bind) + cursor.execute(sql, values2) + assert ret_bind.values == [["2"]] + + ret_bind = cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + cursor.setinputsizes(ret_data=ret_bind) + cursor.execute(sql, values3) + assert ret_bind.values == [["3"]] + + +def test_3923(conn, cursor): + "3923 - test using rowfactory" + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Test 1') + """ + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in cursor.description] + + def rowfactory(*row): + return dict(zip(column_names, row)) + + cursor.rowfactory = rowfactory + assert cursor.rowfactory == rowfactory + assert cursor.fetchall() == [{"INTCOL": 1, "STRINGCOL1": "Test 1"}] + + +def test_3924(conn, cursor): + "3924 - test executing same query after setting rowfactory" + cursor.execute("truncate table TestTempTable") + data = [(1, "Test 1"), (2, "Test 2")] + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in cursor.description] + cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + results1 = cursor.fetchall() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + results2 = cursor.fetchall() + assert results1 == results2 + + +def test_3925(conn, cursor): + "3925 - test executing different query after setting rowfactory" + cursor.execute("truncate table TestTempTable") + data = [(1, "Test 1"), (2, "Test 2")] + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in cursor.description] + cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + cursor.execute( + """ + select IntCol, StringCol + from TestSTrings + where IntCol between 1 and 3 order by IntCol + """ + ) + expected_data = [(1, "String 1"), (2, "String 2"), (3, "String 3")] + assert cursor.fetchall() == expected_data + + +def test_3926(conn): + "3926 - test setting rowfactory on a REF cursor" + with conn.cursor() as cursor: + sql_function = "pkg_TestRefCursors.TestReturnCursor" + ref_cursor = cursor.callfunc( + sql_function, oracledb.DB_TYPE_CURSOR, [2] ) - self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchall(), expected_data) - - def test_3929(self): - "3929 - test an output type handler with prefetch > arraysize" - - def type_handler(cursor, metadata): - return cursor.var(metadata.type_code, arraysize=cursor.arraysize) - - self.cursor.arraysize = 2 - self.cursor.prefetchrows = 3 - self.cursor.outputtypehandler = type_handler - self.cursor.execute("select level from dual connect by level <= 5") - self.assertEqual( - self.cursor.fetchall(), [(1,), (2,), (3,), (4,), (5,)] - ) - - def test_3930(self): - "3930 - test setinputsizes() but without binding" - self.cursor.setinputsizes(None, int) - sql = "select :1, : 2 from dual" - with self.assertRaisesFullCode("ORA-01008", "DPY-4010"): - self.cursor.execute(sql, []) - - def test_3931(self): - "3931 - test getting FetchInfo attributes" - type_obj = self.conn.gettype("UDT_OBJECT") - varchar_ratio, _ = test_env.get_charset_ratios() - test_values = [ - ( - "select IntCol from TestObjects", - 10, - None, - False, - "INTCOL", - False, - 9, - 0, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_NUMBER, - ), - ( - "select ObjectCol from TestObjects", - None, - None, - False, - "OBJECTCOL", - True, - None, - None, - type_obj, - oracledb.DB_TYPE_OBJECT, - ), - ( - "select JsonVarchar from TestJsonCols", - 4000, - 4000 * varchar_ratio, - True, - "JSONVARCHAR", - False, - None, - None, - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_VARCHAR, - ), - ( - "select FLOATCOL from TestNumbers", - 127, - None, - False, - "FLOATCOL", - False, - 126, - -127, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_NUMBER, - ), + column_names = [col[0] for col in ref_cursor.description] + ref_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + expected_value = [ + {"INTCOL": 1, "STRINGCOL": "String 1"}, + {"INTCOL": 2, "STRINGCOL": "String 2"}, ] - for ( - sql, - display_size, - internal_size, - is_json, - name, - null_ok, - precision, - scale, - typ, - type_code, - ) in test_values: - self.cursor.execute(sql) - (fetch_info,) = self.cursor.description - self.assertIsInstance(fetch_info, oracledb.FetchInfo) - self.assertEqual(fetch_info.display_size, display_size) - self.assertEqual(fetch_info.internal_size, internal_size) - if test_env.has_server_version(12, 2): - self.assertEqual(fetch_info.is_json, is_json) - self.assertEqual(fetch_info.name, name) - self.assertEqual(fetch_info.null_ok, null_ok) - self.assertEqual(fetch_info.precision, precision) - self.assertEqual(fetch_info.scale, scale) - self.assertEqual(fetch_info.type, typ) - self.assertEqual(fetch_info.type_code, type_code) - self.assertIsNone(fetch_info.vector_dimensions) - self.assertIsNone(fetch_info.vector_format) - - def test_3932(self): - "3932 - test FetchInfo repr() and str()" - self.cursor.execute("select IntCol from TestObjects") - (fetch_info,) = self.cursor.description - self.assertEqual( - str(fetch_info), - "('INTCOL', , 10, None, 9, 0, False)", - ) - self.assertEqual( - repr(fetch_info), - "('INTCOL', , 10, None, 9, 0, False)", - ) - - def test_3933(self): - "3933 - test slicing FetchInfo" - self.cursor.execute("select IntCol from TestObjects") - (fetch_info,) = self.cursor.description - self.assertEqual(fetch_info[1:3], (oracledb.DB_TYPE_NUMBER, 10)) - - def test_3934(self): - "3934 - test rowcount is zero for PL/SQL" - self.cursor.execute("begin null; end;") - self.assertEqual(self.cursor.rowcount, 0) - self.cursor.execute("select user from dual") - self.cursor.fetchall() - self.assertEqual(self.cursor.rowcount, 1) - self.cursor.execute("begin null; end;") - self.assertEqual(self.cursor.rowcount, 0) - - def test_3935(self): - "3935 - test raising no_data_found in PL/SQL" - with self.assertRaisesFullCode("ORA-01403"): - self.cursor.execute("begin raise no_data_found; end;") - - def test_3936(self): - "3936 - test executing an empty statement" - with self.assertRaisesFullCode("DPY-2066"): - self.cursor.execute("") - with self.assertRaisesFullCode("DPY-2066"): - self.cursor.execute(" ") - - -if __name__ == "__main__": - test_env.run_test_cases() + assert ref_cursor.fetchall() == expected_value + + +def test_3927(cursor): + "3927 - test using a subclassed string as bind parameter keys" + + class my_str(str): + pass + + cursor.execute("truncate table TestTempTable") + keys = {my_str("str_val"): oracledb.DB_TYPE_VARCHAR} + cursor.setinputsizes(**keys) + values = { + my_str("int_val"): 3927, + my_str("str_val"): "3927 - String Value", + } + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + """, + values, + ) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [(3927, "3927 - String Value")] + + +def test_3928(cursor): + "3928 - test using a sequence of parameters other than a list or tuple" + + class MySeq(collections.abc.Sequence): + def __init__(self, *data): + self.data = data + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + return self.data[index] + + values_to_insert = [MySeq(1, "String 1"), MySeq(2, "String 2")] + expected_data = [tuple(value) for value in values_to_insert] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + """, + values_to_insert, + ) + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert cursor.fetchall() == expected_data + + +def test_3929(cursor): + "3929 - test an output type handler with prefetch > arraysize" + + def type_handler(cursor, metadata): + return cursor.var(metadata.type_code, arraysize=cursor.arraysize) + + cursor.arraysize = 2 + cursor.prefetchrows = 3 + cursor.outputtypehandler = type_handler + cursor.execute("select level from dual connect by level <= 5") + assert cursor.fetchall() == [(1,), (2,), (3,), (4,), (5,)] + + +def test_3930(cursor, test_env): + "3930 - test setinputsizes() but without binding" + cursor.setinputsizes(None, int) + sql = "select :1, : 2 from dual" + with test_env.assert_raises_full_code("ORA-01008", "DPY-4010"): + cursor.execute(sql, []) + + +def test_3931(conn, cursor, test_env): + "3931 - test getting FetchInfo attributes" + type_obj = conn.gettype("UDT_OBJECT") + varchar_ratio, _ = test_env.charset_ratios + test_values = [ + ( + "select IntCol from TestObjects", + 10, + None, + False, + "INTCOL", + False, + 9, + 0, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_NUMBER, + ), + ( + "select ObjectCol from TestObjects", + None, + None, + False, + "OBJECTCOL", + True, + None, + None, + type_obj, + oracledb.DB_TYPE_OBJECT, + ), + ( + "select JsonVarchar from TestJsonCols", + 4000, + 4000 * varchar_ratio, + True, + "JSONVARCHAR", + False, + None, + None, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_VARCHAR, + ), + ( + "select FLOATCOL from TestNumbers", + 127, + None, + False, + "FLOATCOL", + False, + 126, + -127, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_NUMBER, + ), + ] + for ( + sql, + display_size, + internal_size, + is_json, + name, + null_ok, + precision, + scale, + typ, + type_code, + ) in test_values: + cursor.execute(sql) + (fetch_info,) = cursor.description + assert isinstance(fetch_info, oracledb.FetchInfo) + assert fetch_info.display_size == display_size + assert fetch_info.internal_size == internal_size + if test_env.has_server_version(12, 2): + assert fetch_info.is_json == is_json + assert fetch_info.name == name + assert fetch_info.null_ok == null_ok + assert fetch_info.precision == precision + assert fetch_info.scale == scale + assert fetch_info.type == typ + assert fetch_info.type_code == type_code + assert fetch_info.vector_dimensions is None + assert fetch_info.vector_format is None + + +def test_3932(cursor): + "3932 - test FetchInfo repr() and str()" + cursor.execute("select IntCol from TestObjects") + (fetch_info,) = cursor.description + expected = "('INTCOL', , 10, None, 9, 0, False)" + assert str(fetch_info) == expected + assert repr(fetch_info) == expected + + +def test_3933(cursor): + "3933 - test slicing FetchInfo" + cursor.execute("select IntCol from TestObjects") + (fetch_info,) = cursor.description + assert fetch_info[1:3] == (oracledb.DB_TYPE_NUMBER, 10) + + +def test_3934(cursor): + "3934 - test rowcount is zero for PL/SQL" + cursor.execute("begin null; end;") + assert cursor.rowcount == 0 + cursor.execute("select user from dual") + cursor.fetchall() + assert cursor.rowcount == 1 + cursor.execute("begin null; end;") + assert cursor.rowcount == 0 + + +def test_3935(cursor, test_env): + "3935 - test raising no_data_found in PL/SQL" + with test_env.assert_raises_full_code("ORA-01403"): + cursor.execute("begin raise no_data_found; end;") + + +def test_3936(cursor, test_env): + "3936 - test executing an empty statement" + with test_env.assert_raises_full_code("DPY-2066"): + cursor.execute("") + with test_env.assert_raises_full_code("DPY-2066"): + cursor.execute(" ") diff --git a/tests/test_4000_cursor_executemany.py b/tests/test_4000_cursor_executemany.py index f9c1a69a..2db8c8a3 100644 --- a/tests/test_4000_cursor_executemany.py +++ b/tests/test_4000_cursor_executemany.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2024, Oracle and/or its affiliates. +# Copyright (c) 2020, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,241 +29,279 @@ import decimal import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_4000(self): - "4000 - test executing a statement multiple times (named args)" - self.cursor.execute("truncate table TestTempTable") - rows = [{"value": n} for n in range(250)] - self.cursor.arraysize = 100 - self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:value)", - rows, - ) - self.conn.commit() - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - def test_4001(self): - "4001 - test executing a statement multiple times (positional args)" - self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(230)] - self.cursor.arraysize = 100 - self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - rows, - ) - self.conn.commit() - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - def test_4002(self): - "4002 - test executing a statement multiple times (with prepare)" - self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(225)] - self.cursor.arraysize = 100 - self.cursor.prepare("insert into TestTempTable (IntCol) values (:1)") - self.cursor.executemany(None, rows) - self.conn.commit() - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - def test_4003(self): - "4003 - test executing a statement multiple times (with rebind)" - self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(235)] - self.cursor.arraysize = 100 - statement = "insert into TestTempTable (IntCol) values (:1)" - self.cursor.executemany(statement, rows[:50]) - self.cursor.executemany(statement, rows[50:]) - self.conn.commit() - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - def test_4004(self): - "4004 - test executing multiple times (with input sizes wrong)" - cursor = self.conn.cursor() - cursor.setinputsizes(oracledb.NUMBER) - data = [[decimal.Decimal("25.8")], [decimal.Decimal("30.0")]] - cursor.executemany("declare t number; begin t := :1; end;", data) - - def test_4005(self): - "4005 - test executing multiple times (with multiple batches)" - self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - self.cursor.executemany(sql, [(1, None), (2, None)]) - self.cursor.executemany(sql, [(3, None), (4, "Testing")]) - - def test_4006(self): - "4006 - test executemany() with various numeric types" - self.cursor.execute("truncate table TestTempTable") - data = [ - (1, 5), - (2, 7.0), - (3, 6.5), - (4, 2**65), - (5, decimal.Decimal("24.5")), - ] - self.cursor.executemany( - "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", - data, - ) - self.cursor.execute( - "select IntCol, NumberCol from TestTempTable order by IntCol" - ) - self.assertEqual(self.cursor.fetchall(), data) - - def test_4007(self): - "4007 - test executing a statement multiple times (with resize)" - self.cursor.execute("truncate table TestTempTable") - rows = [ - (1, "First"), - (2, "Second"), - (3, "Third"), - (4, "Fourth"), - (5, "Fifth"), - (6, "Sixth"), - (7, "Seventh and the longest one"), - ] - self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - rows, - ) - self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(self.cursor.fetchall(), rows) - - def test_4008(self): - "4008 - test executing a statement multiple times (with exception)" - self.cursor.execute("truncate table TestTempTable") - rows = [{"value": n} for n in (1, 2, 3, 2, 5)] - statement = "insert into TestTempTable (IntCol) values (:value)" - with self.assertRaisesFullCode("ORA-00001"): - self.cursor.executemany(statement, rows) - self.assertEqual(self.cursor.rowcount, 3) - - def test_4009(self): - "4009 - test calling executemany() with invalid parameters" - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2)""" - with self.assertRaisesFullCode("DPY-2004"): - self.cursor.executemany(sql, "These are not valid parameters") - - def test_4010(self): - "4010 - test calling executemany() without any bind parameters" - num_rows = 5 - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id); - end; - """, - num_rows, - ) - self.assertEqual(self.cursor.rowcount, 0) - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, num_rows) - - def test_4011(self): - "4011 - test calling executemany() with binds performed earlier" - num_rows = 9 - self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(int, arraysize=num_rows) - self.cursor.setinputsizes(var) - self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id); - - select sum(IntCol) into :1 - from TestTempTable; - end; - """, - num_rows, - ) - self.assertEqual(self.cursor.rowcount, 0) - expected_data = [1, 3, 6, 10, 15, 21, 28, 36, 45] - self.assertEqual(var.values, expected_data) - - def test_4012(self): - "4012 - test executing plsql statements multiple times (with binds)" - var = self.cursor.var(int, arraysize=5) - self.cursor.setinputsizes(var) - data = [[25], [30], [None], [35], [None]] - exepected_data = [25, 30, None, 35, None] - self.cursor.executemany("declare t number; begin t := :1; end;", data) - self.assertEqual(var.values, exepected_data) - - def test_4013(self): - "4013 - test executemany with incorrect parameters" - with self.assertRaisesFullCode("DPY-2004"): - self.cursor.executemany("select :1 from dual", [1]) - - def test_4014(self): - "4014 - test executemany with mixed binds (pos first)" - rows = [["test"], {"value": 1}] - with self.assertRaisesFullCode("DPY-2006"): - self.cursor.executemany("select :1 from dual", rows) - - def test_4015(self): - "4015 - test executemany with mixed binds (name first)" - rows = [{"value": 1}, ["test"]] - with self.assertRaisesFullCode("DPY-2006"): - self.cursor.executemany("select :value from dual", rows) - - def test_4016(self): - "4016 - test executemany() with a pl/sql statement with dml returning" - num_rows = 5 - self.cursor.execute("truncate table TestTempTable") - out_var = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(out_var) - self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id) - returning IntCol into :out_bind; - end; - """, - num_rows, - ) - self.assertEqual(out_var.values, [1, 2, 3, 4, 5]) - - def test_4017(self): - "4017 - test executemany() with pl/sql in binds and out binds" - self.cursor.execute("truncate table TestTempTable") - values = [5, 8, 17, 24, 6] - data = [(i, f"Test {i}") for i in values] - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(None, None, out_bind) - self.cursor.executemany( +import pytest + + +@pytest.fixture +def empty_tab(cursor): + cursor.execute("truncate table TestTempTable") + + +def test_4000(conn, cursor, empty_tab): + "4000 - test executing a statement multiple times (named args)" + rows = [{"value": n} for n in range(250)] + cursor.arraysize = 100 + cursor.executemany( + "insert into TestTempTable (IntCol) values (:value)", + rows, + ) + conn.commit() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == len(rows) + + +def test_4001(conn, cursor, empty_tab): + "4001 - test executing a statement multiple times (positional args)" + rows = [[n] for n in range(230)] + cursor.arraysize = 100 + cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + rows, + ) + conn.commit() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == len(rows) + + +def test_4002(conn, cursor, empty_tab): + "4002 - test executing a statement multiple times (with prepare)" + rows = [[n] for n in range(225)] + cursor.arraysize = 100 + cursor.prepare("insert into TestTempTable (IntCol) values (:1)") + cursor.executemany(None, rows) + conn.commit() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == len(rows) + + +def test_4003(conn, cursor, empty_tab): + "4003 - test executing a statement multiple times (with rebind)" + rows = [[n] for n in range(235)] + cursor.arraysize = 100 + statement = "insert into TestTempTable (IntCol) values (:1)" + cursor.executemany(statement, rows[:50]) + cursor.executemany(statement, rows[50:]) + conn.commit() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == len(rows) + + +def test_4004(conn): + "4004 - test executing multiple times (with input sizes wrong)" + cursor = conn.cursor() + cursor.setinputsizes(oracledb.NUMBER) + data = [[decimal.Decimal("25.8")], [decimal.Decimal("30.0")]] + cursor.executemany("declare t number; begin t := :1; end;", data) + + +def test_4005(cursor, empty_tab): + "4005 - test executing multiple times (with multiple batches)" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + cursor.executemany(sql, [(1, None), (2, None)]) + cursor.executemany(sql, [(3, None), (4, "Testing")]) + + +def test_4006(cursor, empty_tab): + "4006 - test executemany() with various numeric types" + data = [ + (1, 5), + (2, 7.0), + (3, 6.5), + (4, 2**65), + (5, decimal.Decimal("24.5")), + ] + cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + cursor.execute( + "select IntCol, NumberCol from TestTempTable order by IntCol" + ) + assert cursor.fetchall() == data + + +def test_4007(cursor, empty_tab): + "4007 - test executing a statement multiple times (with resize)" + rows = [ + (1, "First"), + (2, "Second"), + (3, "Third"), + (4, "Fourth"), + (5, "Fifth"), + (6, "Sixth"), + (7, "Seventh and the longest one"), + ] + cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + ) + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert cursor.fetchall() == rows + + +def test_4008(cursor, empty_tab, test_env): + "4008 - test executing a statement multiple times (with exception)" + rows = [{"value": n} for n in (1, 2, 3, 2, 5)] + statement = "insert into TestTempTable (IntCol) values (:value)" + with test_env.assert_raises_full_code("ORA-00001"): + cursor.executemany(statement, rows) + assert cursor.rowcount == 3 + + +def test_4009(cursor, test_env): + "4009 - test calling executemany() with invalid parameters" + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2)""" + with test_env.assert_raises_full_code("DPY-2004"): + cursor.executemany(sql, "These are not valid parameters") + + +def test_4010(cursor, empty_tab): + "4010 - test calling executemany() without any bind parameters" + num_rows = 5 + cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id); + end; + """, + num_rows, + ) + assert cursor.rowcount == 0 + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == num_rows + + +def test_4011(cursor, empty_tab): + "4011 - test calling executemany() with binds performed earlier" + num_rows = 9 + var = cursor.var(int, arraysize=num_rows) + cursor.setinputsizes(var) + cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id); + + select sum(IntCol) into :1 + from TestTempTable; + end; + """, + num_rows, + ) + assert cursor.rowcount == 0 + expected_data = [1, 3, 6, 10, 15, 21, 28, 36, 45] + assert var.values == expected_data + + +def test_4012(cursor): + "4012 - test executing plsql statements multiple times (with binds)" + var = cursor.var(int, arraysize=5) + cursor.setinputsizes(var) + data = [[25], [30], [None], [35], [None]] + exepected_data = [25, 30, None, 35, None] + cursor.executemany("declare t number; begin t := :1; end;", data) + assert var.values == exepected_data + + +def test_4013(cursor, test_env): + "4013 - test executemany with incorrect parameters" + with test_env.assert_raises_full_code("DPY-2004"): + cursor.executemany("select :1 from dual", [1]) + + +def test_4014(cursor, test_env): + "4014 - test executemany with mixed binds (pos first)" + rows = [["test"], {"value": 1}] + with test_env.assert_raises_full_code("DPY-2006"): + cursor.executemany("select :1 from dual", rows) + + +def test_4015(cursor, test_env): + "4015 - test executemany with mixed binds (name first)" + rows = [{"value": 1}, ["test"]] + with test_env.assert_raises_full_code("DPY-2006"): + cursor.executemany("select :value from dual", rows) + + +def test_4016(cursor, empty_tab): + "4016 - test executemany() with a pl/sql statement with dml returning" + num_rows = 5 + out_var = cursor.var(oracledb.NUMBER, arraysize=5) + cursor.setinputsizes(out_var) + cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id) + returning IntCol into :out_bind; + end; + """, + num_rows, + ) + assert out_var.values == [1, 2, 3, 4, 5] + + +def test_4017(cursor, empty_tab): + "4017 - test executemany() with pl/sql in binds and out binds" + values = [5, 8, 17, 24, 6] + data = [(i, f"Test {i}") for i in values] + out_bind = cursor.var(oracledb.NUMBER, arraysize=5) + cursor.setinputsizes(None, None, out_bind) + cursor.executemany( + """ + begin + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol into :out_bind; + end; + """, + data, + ) + assert out_bind.values == values + + +def test_4018(cursor, empty_tab): + "4018 - test executemany() with pl/sql outbinds" + out_bind = cursor.var(oracledb.NUMBER, arraysize=5) + cursor.setinputsizes(out_bind) + cursor.executemany("begin :out_var := 5; end;", 5) + assert out_bind.values == [5, 5, 5, 5, 5] + + +def test_4019(cursor): + "4019 - test re-executemany() with pl/sql in binds and out binds" + values = [5, 8, 17, 24, 6] + data = [(i, f"Test {i}") for i in values] + for i in range(2): + cursor.execute("truncate table TestTempTable") + out_bind = cursor.var(oracledb.NUMBER, arraysize=len(values)) + cursor.setinputsizes(None, None, out_bind) + cursor.executemany( """ begin insert into TestTempTable (IntCol, StringCol1) @@ -273,181 +311,152 @@ def test_4017(self): """, data, ) - self.assertEqual(out_bind.values, values) - - def test_4018(self): - "4018 - test executemany() with pl/sql outbinds" - self.cursor.execute("truncate table TestTempTable") - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(out_bind) - self.cursor.executemany("begin :out_var := 5; end;", 5) - self.assertEqual(out_bind.values, [5, 5, 5, 5, 5]) - - def test_4019(self): - "4019 - test re-executemany() with pl/sql in binds and out binds" - values = [5, 8, 17, 24, 6] - data = [(i, f"Test {i}") for i in values] - for i in range(2): - self.cursor.execute("truncate table TestTempTable") - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=len(values)) - self.cursor.setinputsizes(None, None, out_bind) - self.cursor.executemany( - """ - begin - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol into :out_bind; - end; - """, - data, - ) - self.assertEqual(out_bind.values, values) - - def test_4020(self): - "4020 - test PL/SQL statement with single row bind" - value = 4020 - var = self.cursor.var(int) - self.cursor.executemany("begin :1 := :2; end;", [[var, value]]) - self.assertEqual(var.values, [value]) - - def test_4021(self): - "4021 - test deferral of type assignment" - self.cursor.execute("truncate table TestTempTable") - data = [(1, None), (2, 25)] - self.cursor.executemany( - """ - insert into TestTempTable - (IntCol, NumberCol) - values (:1, :2) - """, - data, - ) - self.conn.commit() - self.cursor.execute( - """ - select IntCol, NumberCol - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchall(), data) - - def test_4022(self): - "4022 - test PL/SQL with a lerge number of binds" - parts = [] - bind_names = [] - all_bind_values = [] - out_binds = [] - for i in range(5): - all_bind_values.append([]) - for i in range(350): - n = len(parts) + 1 - bind_names.extend([f"v_out_{n}_0", f"a_{n}", f"b_{n}", f"c_{n}"]) - parts.append(f":v_out{n} := :a_{n} + :b_{n} + :c_{n};") - out_binds.append( - self.cursor.var(int, arraysize=len(all_bind_values)) - ) - for j, bind_values in enumerate(all_bind_values): - bind_values.extend( - [out_binds[-1], n * 1 + j, n * 2 + j, n * 3 + j] - ) - lf = "\n" - sql = f"begin{lf}{lf.join(parts)}{lf}end;" - self.cursor.executemany(sql, all_bind_values) - init_val = 6 - for var in out_binds: - expected_values = [ - init_val, - init_val + 3, - init_val + 6, - init_val + 9, - init_val + 12, - ] - self.assertEqual(var.values, expected_values) - init_val += 6 - - def test_4023(self): - "3901 - test executing a None statement" - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-2001"): - cursor.executemany(None, [(1,), (2,)]) - - def test_4024(self): + assert out_bind.values == values + + +def test_4020(cursor): + "4020 - test PL/SQL statement with single row bind" + value = 4020 + var = cursor.var(int) + cursor.executemany("begin :1 := :2; end;", [[var, value]]) + assert var.values == [value] + + +def test_4021(conn, cursor, empty_tab): + "4021 - test deferral of type assignment" + data = [(1, None), (2, 25)] + cursor.executemany( """ - 4024 - test executemany with number of iterations - (previous bind values) + insert into TestTempTable + (IntCol, NumberCol) + values (:1, :2) + """, + data, + ) + conn.commit() + cursor.execute( """ - data = [(2,), (3,), (4,)] - for num_iterations in range(1, len(data) + 1): - self.cursor.execute("truncate table TestLongs") - self.cursor.executemany( - "insert into TestLongs (IntCol) values (:1)", data - ) - self.cursor.executemany(None, num_iterations) - self.cursor.execute("select IntCol from TestLongs") - expected_value = data + data[:num_iterations] - self.assertEqual(self.cursor.fetchall(), expected_value) - - def test_4025(self): - "4025 - test executemany with empty lists and number of iterations" - values = [[] for _ in range(5)] - for num_iterations in (4, 6): - self.cursor.execute("truncate table TestLongs") - self.cursor.executemany( - "insert into TestLongs (IntCol) values (67)", - values, - ) - self.cursor.executemany(None, num_iterations) - self.cursor.execute("select IntCol from TestLongs") - expected_value = [(67,)] * (len(values) + num_iterations) - self.assertEqual(self.cursor.fetchall(), expected_value) - - def test_4026(self): - "4026 - test executemany error offset returned correctly" - data = [(i,) for i in range(1, 11)] - with self.assertRaises(oracledb.Error) as cm: - self.cursor.executemany( - """ - declare - t_Value number; - begin - t_Value := 10 / (4 - :1); - end; - """, - data, + select IntCol, NumberCol + from TestTempTable + order by IntCol + """ + ) + assert cursor.fetchall() == data + + +def test_4022(cursor): + "4022 - test PL/SQL with a lerge number of binds" + parts = [] + bind_names = [] + all_bind_values = [] + out_binds = [] + for i in range(5): + all_bind_values.append([]) + for i in range(350): + n = len(parts) + 1 + bind_names.extend([f"v_out_{n}_0", f"a_{n}", f"b_{n}", f"c_{n}"]) + parts.append(f":v_out{n} := :a_{n} + :b_{n} + :c_{n};") + out_binds.append(cursor.var(int, arraysize=len(all_bind_values))) + for j, bind_values in enumerate(all_bind_values): + bind_values.extend( + [out_binds[-1], n * 1 + j, n * 2 + j, n * 3 + j] ) - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.offset, 3) - - def test_4027(self): - "4027 - test executemany with number of iterations too small" - data = [[1], [2], [3]] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( + lf = "\n" + sql = f"begin{lf}{lf.join(parts)}{lf}end;" + cursor.executemany(sql, all_bind_values) + init_val = 6 + for var in out_binds: + expected_values = [ + init_val, + init_val + 3, + init_val + 6, + init_val + 9, + init_val + 12, + ] + assert var.values == expected_values + init_val += 6 + + +def test_4023(cursor, test_env): + "3901 - test executing a None statement" + with test_env.assert_raises_full_code("DPY-2001"): + cursor.executemany(None, [(1,), (2,)]) + + +def test_4024(cursor): + """ + 4024 - test executemany with number of iterations + (previous bind values) + """ + data = [(2,), (3,), (4,)] + for num_iterations in range(1, len(data) + 1): + cursor.execute("truncate table TestLongs") + cursor.executemany("insert into TestLongs (IntCol) values (:1)", data) + cursor.executemany(None, num_iterations) + cursor.execute("select IntCol from TestLongs") + expected_value = data + data[:num_iterations] + assert cursor.fetchall() == expected_value + + +def test_4025(cursor): + "4025 - test executemany with empty lists and number of iterations" + values = [[] for _ in range(5)] + for num_iterations in (4, 6): + cursor.execute("truncate table TestLongs") + cursor.executemany( + "insert into TestLongs (IntCol) values (67)", + values, + ) + cursor.executemany(None, num_iterations) + cursor.execute("select IntCol from TestLongs") + expected_value = [(67,)] * (len(values) + num_iterations) + assert cursor.fetchall() == expected_value + + +def test_4026(cursor): + "4026 - test executemany error offset returned correctly" + data = [(i,) for i in range(1, 11)] + with pytest.raises(oracledb.Error) as excinfo: + cursor.executemany( """ declare - t_Value number; + t_Value number; begin - t_Value := :1; + t_Value := 10 / (4 - :1); end; """, data, ) - self.cursor.executemany(None, 2) - with self.assertRaisesFullCode("DPY-2016"): - self.cursor.executemany(None, 4) + (error_obj,) = excinfo.value.args + assert error_obj.offset == 3 - def test_4028(self): - "4028 - test executemany with empty parameter set" - self.cursor.executemany("insert into TestTempTable values (:1)", []) - def test_4029(self): - "4029 - test executemany with an empty statement" - with self.assertRaisesFullCode("DPY-2066"): - self.cursor.executemany("", 5) - with self.assertRaisesFullCode("DPY-2066"): - self.cursor.executemany(" ", 5) - - -if __name__ == "__main__": - test_env.run_test_cases() +def test_4027(cursor, empty_tab, test_env): + "4027 - test executemany with number of iterations too small" + data = [[1], [2], [3]] + cursor.executemany( + """ + declare + t_Value number; + begin + t_Value := :1; + end; + """, + data, + ) + cursor.executemany(None, 2) + with test_env.assert_raises_full_code("DPY-2016"): + cursor.executemany(None, 4) + + +def test_4028(cursor): + "4028 - test executemany with empty parameter set" + cursor.executemany("insert into TestTempTable values (:1)", []) + + +def test_4029(cursor, test_env): + "4029 - test executemany with an empty statement" + with test_env.assert_raises_full_code("DPY-2066"): + cursor.executemany("", 5) + with test_env.assert_raises_full_code("DPY-2066"): + cursor.executemany(" ", 5) diff --git a/tests/test_4100_cursor_callproc.py b/tests/test_4100_cursor_callproc.py index ed1fe1eb..2a2826e5 100644 --- a/tests/test_4100_cursor_callproc.py +++ b/tests/test_4100_cursor_callproc.py @@ -28,436 +28,424 @@ """ import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_4100(self): - "4100 - test executing a stored procedure" - var = self.cursor.var(oracledb.NUMBER) - results = self.cursor.callproc("proc_Test", ("hi", 5, var)) - self.assertEqual(results, ["hi", 10, 2.0]) - - def test_4101(self): - "4101 - test executing a stored procedure with all args keyword args" - inout_value = self.cursor.var(oracledb.NUMBER) - inout_value.setvalue(0, 5) - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict( - a_InOutValue=inout_value, a_InValue="hi", a_OutValue=out_value - ) - results = self.cursor.callproc("proc_Test", [], kwargs) - self.assertEqual(results, []) - self.assertEqual(inout_value.getvalue(), 10) - self.assertEqual(out_value.getvalue(), 2.0) - - def test_4102(self): - "4102 - test executing a stored procedure with last arg as keyword arg" - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict(a_OutValue=out_value) - results = self.cursor.callproc("proc_Test", ("hi", 5), kwargs) - self.assertEqual(results, ["hi", 10]) - self.assertEqual(out_value.getvalue(), 2.0) - - def test_4103(self): - "4103 - test executing a stored procedure, repeated keyword arg" - kwargs = dict( - a_InValue="hi", a_OutValue=self.cursor.var(oracledb.NUMBER) - ) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callproc("proc_Test", ("hi", 5), kwargs) - - def test_4104(self): - "4104 - test executing a stored procedure without any arguments" - results = self.cursor.callproc("proc_TestNoArgs") - self.assertEqual(results, []) - - def test_4105(self): - "4105 - test executing a stored function" - results = self.cursor.callfunc("func_Test", oracledb.NUMBER, ("hi", 5)) - self.assertEqual(results, 7) - - def test_4106(self): - "4106 - test executing a stored function without any arguments" - results = self.cursor.callfunc("func_TestNoArgs", oracledb.NUMBER) - self.assertEqual(results, 712) - - def test_4107(self): - "4107 - test executing a stored function with wrong parameters" - func_name = "func_Test" - with self.assertRaisesFullCode("DPY-2007"): - self.cursor.callfunc(oracledb.NUMBER, func_name, ("hi", 5)) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callfunc(func_name, oracledb.NUMBER, ("hi", 5, 7)) - with self.assertRaisesFullCode("DPY-2012"): - self.cursor.callfunc(func_name, oracledb.NUMBER, "hi", 7) - with self.assertRaisesFullCode("ORA-06502"): - self.cursor.callfunc(func_name, oracledb.NUMBER, [5, "hi"]) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callfunc(func_name, oracledb.NUMBER) - with self.assertRaisesFullCode("DPY-2012"): - self.cursor.callfunc(func_name, oracledb.NUMBER, 5) - - def test_4108(self): - "4108 - test to verify keywordParameters is deprecated" - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict(a_OutValue=out_value) - with self.assertRaisesFullCode("DPY-2014"): - self.cursor.callproc( - "proc_Test", ("hi", 5), kwargs, keywordParameters=kwargs - ) - extra_amount = self.cursor.var(oracledb.NUMBER) - extra_amount.setvalue(0, 5) - kwargs = dict(a_ExtraAmount=extra_amount, a_String="hi") - with self.assertRaisesFullCode("DPY-2014"): - self.cursor.callfunc( - "func_Test", - oracledb.NUMBER, - [], - kwargs, - keywordParameters=kwargs, - ) - def test_4109(self): - "4109 - test error for keyword args with invalid type" - kwargs = [5] - with self.assertRaisesFullCode("DPY-2013"): - self.cursor.callproc("proc_Test", [], kwargs) - with self.assertRaisesFullCode("DPY-2013"): - self.cursor.callfunc("func_Test", oracledb.NUMBER, [], kwargs) - - def test_4110(self): - "4110 - test to verify that deprecated keywordParameters works" - extra_amount = self.cursor.var(oracledb.DB_TYPE_NUMBER) - extra_amount.setvalue(0, 5) - kwargs = dict(a_ExtraAmount=extra_amount, a_String="hi") - results = self.cursor.callfunc( - "func_Test", oracledb.DB_TYPE_NUMBER, keywordParameters=kwargs - ) - self.assertEqual(results, 7) - out_value = self.cursor.var(oracledb.DB_TYPE_NUMBER) - kwargs = dict(a_OutValue=out_value) - results = self.cursor.callproc( - "proc_Test", ("hi", 5), keywordParameters=kwargs +def test_4100(cursor): + "4100 - test executing a stored procedure" + var = cursor.var(oracledb.NUMBER) + results = cursor.callproc("proc_Test", ("hi", 5, var)) + assert results == ["hi", 10, 2.0] + + +def test_4101(cursor): + "4101 - test executing a stored procedure with all args keyword args" + inout_value = cursor.var(oracledb.NUMBER) + inout_value.setvalue(0, 5) + out_value = cursor.var(oracledb.NUMBER) + kwargs = dict( + a_InOutValue=inout_value, a_InValue="hi", a_OutValue=out_value + ) + results = cursor.callproc("proc_Test", [], kwargs) + assert results == [] + assert inout_value.getvalue() == 10 + assert out_value.getvalue() == 2.0 + + +def test_4102(cursor): + "4102 - test executing a stored procedure with last arg as keyword arg" + out_value = cursor.var(oracledb.NUMBER) + kwargs = dict(a_OutValue=out_value) + results = cursor.callproc("proc_Test", ("hi", 5), kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() == 2.0 + + +def test_4103(cursor, test_env): + "4103 - test executing a stored procedure, repeated keyword arg" + kwargs = dict(a_InValue="hi", a_OutValue=cursor.var(oracledb.NUMBER)) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callproc("proc_Test", ("hi", 5), kwargs) + + +def test_4104(cursor): + "4104 - test executing a stored procedure without any arguments" + results = cursor.callproc("proc_TestNoArgs") + assert results == [] + + +def test_4105(cursor): + "4105 - test executing a stored function" + results = cursor.callfunc("func_Test", oracledb.NUMBER, ("hi", 5)) + assert results == 7 + + +def test_4106(cursor): + "4106 - test executing a stored function without any arguments" + results = cursor.callfunc("func_TestNoArgs", oracledb.NUMBER) + assert results == 712 + + +def test_4107(cursor, test_env): + "4107 - test executing a stored function with wrong parameters" + func_name = "func_Test" + with test_env.assert_raises_full_code("DPY-2007"): + cursor.callfunc(oracledb.NUMBER, func_name, ("hi", 5)) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callfunc(func_name, oracledb.NUMBER, ("hi", 5, 7)) + with test_env.assert_raises_full_code("DPY-2012"): + cursor.callfunc(func_name, oracledb.NUMBER, "hi", 7) + with test_env.assert_raises_full_code("ORA-06502"): + cursor.callfunc(func_name, oracledb.NUMBER, [5, "hi"]) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callfunc(func_name, oracledb.NUMBER) + with test_env.assert_raises_full_code("DPY-2012"): + cursor.callfunc(func_name, oracledb.NUMBER, 5) + + +def test_4108(cursor, test_env): + "4108 - test to verify keywordParameters is deprecated" + out_value = cursor.var(oracledb.NUMBER) + kwargs = dict(a_OutValue=out_value) + with test_env.assert_raises_full_code("DPY-2014"): + cursor.callproc( + "proc_Test", ("hi", 5), kwargs, keywordParameters=kwargs ) - self.assertEqual(results, ["hi", 10]) - self.assertEqual(out_value.getvalue(), 2.0) - - def test_4111(self): - "4111 - test callproc with setinputsizes" - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + extra_amount = cursor.var(oracledb.NUMBER) + extra_amount.setvalue(0, 5) + kwargs = dict(a_ExtraAmount=extra_amount, a_String="hi") + with test_env.assert_raises_full_code("DPY-2014"): + cursor.callfunc( + "func_Test", + oracledb.NUMBER, + [], + kwargs, + keywordParameters=kwargs, ) - results = self.cursor.callproc("proc_Test2", ("hi", 5, out_value)) - self.assertEqual(results, ["hi", 10, True]) - self.assertTrue(out_value.getvalue()) - def test_4112(self): - "4112 - test callfunc with setinputsizes" - self.cursor.setinputsizes( + +def test_4109(cursor, test_env): + "4109 - test error for keyword args with invalid type" + kwargs = [5] + with test_env.assert_raises_full_code("DPY-2013"): + cursor.callproc("proc_Test", [], kwargs) + with test_env.assert_raises_full_code("DPY-2013"): + cursor.callfunc("func_Test", oracledb.NUMBER, [], kwargs) + + +def test_4110(cursor): + "4110 - test to verify that deprecated keywordParameters works" + extra_amount = cursor.var(oracledb.DB_TYPE_NUMBER) + extra_amount.setvalue(0, 5) + kwargs = dict(a_ExtraAmount=extra_amount, a_String="hi") + results = cursor.callfunc( + "func_Test", oracledb.DB_TYPE_NUMBER, keywordParameters=kwargs + ) + assert results == 7 + + out_value = cursor.var(oracledb.DB_TYPE_NUMBER) + kwargs = dict(a_OutValue=out_value) + results = cursor.callproc("proc_Test", ("hi", 5), keywordParameters=kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() == 2.0 + + +def test_4111(cursor): + "4111 - test callproc with setinputsizes" + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + ) + results = cursor.callproc("proc_Test2", ("hi", 5, out_value)) + assert results == ["hi", 10, True] + assert out_value.getvalue() + + +def test_4112(cursor): + "4112 - test callfunc with setinputsizes" + cursor.setinputsizes( + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BOOLEAN, + ) + results = cursor.callfunc("func_Test2", oracledb.NUMBER, ("hi", 5, True)) + assert results == 7 + + +def test_4113(cursor): + "4113 - test callproc with setinputsizes with kwargs" + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + ) + kwargs = dict(a_OutValue=out_value) + results = cursor.callproc("proc_Test2", ("hi", 5), kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() + + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + ) + kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) + results = cursor.callproc("proc_Test2", [], kwargs) + assert results == [] + assert out_value.getvalue() + + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BOOLEAN, + ) + kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) + results = cursor.callproc("proc_Test2", [], kwargs) + assert results == [] + assert out_value.getvalue() + + +def test_4114(cursor, test_env): + "4114 - test callproc with setinputsizes with kwargs in mixed order" + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + ) + kwargs = dict(a_OutValue=out_value, a_InValue="hi", a_InOutValue=5) + with test_env.assert_raises_full_code("ORA-06550"): + results = cursor.callproc("proc_Test2", keyword_parameters=kwargs) + assert results == [] + assert out_value.getvalue() + + cursor.setinputsizes( + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_BOOLEAN, + ) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callproc("proc_Test2", keyword_parameters=kwargs) + + +def test_4115(cursor): + "4115 - test callfunc with setinputsizes with kwargs" + extra_amount = cursor.var(oracledb.DB_TYPE_NUMBER) + extra_amount.setvalue(0, 5) + test_values = [ + (["hi"], dict(a_ExtraAmount=extra_amount, a_Boolean=True)), + ( + [], + dict(a_String="hi", a_ExtraAmount=extra_amount, a_Boolean=True), + ), + ] + for args, kwargs in test_values: + cursor.setinputsizes( oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_BOOLEAN, ) - results = self.cursor.callfunc( - "func_Test2", oracledb.NUMBER, ("hi", 5, True) + results = cursor.callfunc( + "func_Test2", oracledb.DB_TYPE_NUMBER, args, kwargs ) - self.assertEqual(results, 7) - - def test_4113(self): - "4113 - test callproc with setinputsizes with kwargs" - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value - ) - kwargs = dict(a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", ("hi", 5), kwargs) - self.assertEqual(results, ["hi", 10]) - self.assertTrue(out_value.getvalue()) - - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value - ) - kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", [], kwargs) - self.assertEqual(results, []) - self.assertTrue(out_value.getvalue()) - - self.cursor.setinputsizes( + assert results == 7 + + +def test_4116(cursor, test_env): + "4116 - test callproc with setinputsizes with extra arguments" + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + test_values = [ + (("hi", 5, out_value), None), + (("hi",), dict(a_InOutValue=5, a_OutValue=out_value)), + ([], dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value)), + ] + for args, kwargs in test_values: + cursor.setinputsizes( oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_BOOLEAN, - ) - kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", [], kwargs) - self.assertEqual(results, []) - self.assertTrue(out_value.getvalue()) - - def test_4114(self): - "4114 - test callproc with setinputsizes with kwargs in mixed order" - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, out_value + oracledb.NUMBER, + out_value, + oracledb.DB_TYPE_VARCHAR, # extra argument ) - kwargs = dict(a_OutValue=out_value, a_InValue="hi", a_InOutValue=5) - with self.assertRaisesFullCode("ORA-06550"): - results = self.cursor.callproc( - "proc_Test2", keyword_parameters=kwargs - ) - self.assertEqual(results, []) - self.assertTrue(out_value.getvalue()) - - self.cursor.setinputsizes( + with test_env.assert_raises_full_code("ORA-01036", "DPY-4009"): + cursor.callproc("proc_Test2", args, kwargs) + + +def test_4117(cursor, test_env): + "4117 - test callfunc with setinputsizes with extra arguments" + extra_amount = cursor.var(oracledb.DB_TYPE_NUMBER) + extra_amount.setvalue(0, 5) + test_values = [ + (["hi", extra_amount], None), + (["hi"], dict(a_ExtraAmount=extra_amount)), + ([], dict(a_ExtraAmount=extra_amount, a_String="hi")), + ] + for args, kwargs in test_values: + cursor.setinputsizes( + oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER, oracledb.DB_TYPE_BOOLEAN, + oracledb.DB_TYPE_VARCHAR, # extra argument ) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callproc("proc_Test2", keyword_parameters=kwargs) - - def test_4115(self): - "4115 - test callfunc with setinputsizes with kwargs" - extra_amount = self.cursor.var(oracledb.DB_TYPE_NUMBER) - extra_amount.setvalue(0, 5) - test_values = [ - (["hi"], dict(a_ExtraAmount=extra_amount, a_Boolean=True)), - ( - [], - dict( - a_String="hi", a_ExtraAmount=extra_amount, a_Boolean=True - ), - ), - ] - for args, kwargs in test_values: - self.cursor.setinputsizes( - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_BOOLEAN, - ) - results = self.cursor.callfunc( + with test_env.assert_raises_full_code("ORA-01036", "DPY-4009"): + cursor.callfunc( "func_Test2", oracledb.DB_TYPE_NUMBER, args, kwargs ) - self.assertEqual(results, 7) - - def test_4116(self): - "4116 - test callproc with setinputsizes with extra arguments" - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - test_values = [ - (("hi", 5, out_value), None), - (("hi",), dict(a_InOutValue=5, a_OutValue=out_value)), - ([], dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value)), - ] - for args, kwargs in test_values: - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, - oracledb.NUMBER, - out_value, - oracledb.DB_TYPE_VARCHAR, # extra argument - ) - with self.assertRaisesFullCode("ORA-01036", "DPY-4009"): - self.cursor.callproc("proc_Test2", args, kwargs) - - def test_4117(self): - "4117 - test callfunc with setinputsizes with extra arguments" - extra_amount = self.cursor.var(oracledb.DB_TYPE_NUMBER) - extra_amount.setvalue(0, 5) - test_values = [ - (["hi", extra_amount], None), - (["hi"], dict(a_ExtraAmount=extra_amount)), - ([], dict(a_ExtraAmount=extra_amount, a_String="hi")), - ] - for args, kwargs in test_values: - self.cursor.setinputsizes( - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_BOOLEAN, - oracledb.DB_TYPE_VARCHAR, # extra argument - ) - with self.assertRaisesFullCode("ORA-01036", "DPY-4009"): - self.cursor.callfunc( - "func_Test2", oracledb.DB_TYPE_NUMBER, args, kwargs - ) - - def test_4118(self): - "4118 - test callproc with setinputsizes with too few parameters" - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - - # setinputsizes for 2 args (missed 1 args) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER - ) - results = self.cursor.callproc("proc_Test2", ("hi", 5, out_value)) - self.assertEqual(results, ["hi", 10, out_value.getvalue()]) - self.assertTrue(out_value.getvalue()) - # setinputsizes for 2 args (missed 1 kwargs) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER - ) - kwargs = dict(a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", ("hi", 5), kwargs) - self.assertEqual(results, ["hi", 10]) - self.assertTrue(out_value.getvalue()) - - # setinputsizes for 1 args (missed 2 args) - self.cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR) - results = self.cursor.callproc("proc_Test2", ("hi", 5, out_value)) - self.assertEqual(results, ["hi", 10, out_value.getvalue()]) - self.assertTrue(out_value.getvalue()) - - # setinputsizes for 1 args (missed 1 args and 1 kwargs) - self.cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR) - kwargs = dict(a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", ("hi", 5), kwargs) - self.assertEqual(results, ["hi", 10]) - self.assertTrue(out_value.getvalue()) - - # setinputsizes for 2 kwargs (missed 1 kwargs) - self.cursor.setinputsizes( - oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER - ) - kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) - results = self.cursor.callproc("proc_Test2", [], kwargs) - self.assertEqual(results, []) - self.assertTrue(out_value.getvalue()) - - def test_4119(self): - """ - 4119 - test callproc with setinputsizes with wrong order of parameters - """ - # setinputsizes for 2 args (missed 1 kwargs) - out_value = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - self.cursor.setinputsizes(bool, oracledb.DB_TYPE_VARCHAR) - kwargs = dict(a_OutValue=out_value) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callproc("proc_Test2", ["hi", 5], kwargs) - - # setinputsizes for 2 kwargs (missed 1 kwargs) - self.cursor.setinputsizes(bool, oracledb.DB_TYPE_VARCHAR) - kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callproc("proc_Test2", [], kwargs) - - def test_4120(self): - "4120 - test callfunc with setinputsizes with too few parameters" - # setinputsizes for return_type and 1 kwargs (missed 2 kwargs) - bool_var = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - bool_var.setvalue(0, False) - kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=3) - self.cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_VARCHAR) - results = self.cursor.callfunc( - "func_Test2", oracledb.NUMBER, [], kwargs - ) - self.assertEqual(results, -1) - - # setinputsizes for return_type (missed 3 kwargs) - bool_var.setvalue(0, False) - kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=1) - self.cursor.setinputsizes(oracledb.NUMBER) - results = self.cursor.callfunc( - "func_Test2", oracledb.NUMBER, [], kwargs - ) - self.assertEqual(results, 1) - - # setinputsizes for return_type (missed 3 args) - bool_var.setvalue(0, True) - self.cursor.setinputsizes(oracledb.NUMBER) - results = self.cursor.callfunc( - "func_Test2", oracledb.NUMBER, ["hi", 2, bool_var] - ) - self.assertEqual(results, 4) - - def test_4121(self): - """ - 4121 - test callfunc with setinputsizes with wrong order of parameters - """ - # setinputsizes for 2 args (missed 2 kwargs) - bool_var = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - bool_var.setvalue(0, True) - self.cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_BOOLEAN) - kwargs = dict(a_Boolean=bool_var) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callfunc( - "func_Test2", oracledb.NUMBER, ["hi", bool_var], kwargs - ) - - def test_4122(self): - "4122 - test callfunc with setinputsizes without type for return_type" - # setinputsizes for 1 args and 1 kwargs - bool_var = self.cursor.var(oracledb.DB_TYPE_BOOLEAN) - bool_var.setvalue(0, False) - self.cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_BOOLEAN) - kwargs = dict(a_Boolean=bool_var) - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callfunc( - "func_Test2", oracledb.DB_TYPE_NUMBER, ["hi"], kwargs - ) - - # setinputsizes for 2 kwargs (missed 1 kwargs) - bool_var.setvalue(0, False) - kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=0) - self.cursor.setinputsizes( - oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_VARCHAR - ) - results = self.cursor.callfunc( - "func_Test2", oracledb.DB_TYPE_NUMBER, [], kwargs - ) - self.assertEqual(results, 2) - # setinputsizes for 2 args and 1 kwargs - bool_var.setvalue(0, False) - self.cursor.setinputsizes( - oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_NUMBER - ) - kwargs = dict(a_Boolean=bool_var) - results = self.cursor.callfunc( - "func_Test2", oracledb.DB_TYPE_NUMBER, ["Bye", 2], kwargs - ) - self.assertEqual(results, 1) - - # setinputsizes for 2 args (missed 1 args) - bool_var.setvalue(0, False) - self.cursor.setinputsizes( - oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_NUMBER - ) - kwargs = dict(a_Boolean=bool_var) - results = self.cursor.callfunc( - "func_Test2", oracledb.DB_TYPE_NUMBER, ["Light", -1, bool_var] +def test_4118(cursor): + "4118 - test callproc with setinputsizes with too few parameters" + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + + # setinputsizes for 2 args (missed 1 args) + cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER) + results = cursor.callproc("proc_Test2", ("hi", 5, out_value)) + assert results == ["hi", 10, out_value.getvalue()] + assert out_value.getvalue() + + # setinputsizes for 2 args (missed 1 kwargs) + cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER) + kwargs = dict(a_OutValue=out_value) + results = cursor.callproc("proc_Test2", ("hi", 5), kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() + + # setinputsizes for 1 args (missed 2 args) + cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR) + results = cursor.callproc("proc_Test2", ("hi", 5, out_value)) + assert results == ["hi", 10, out_value.getvalue()] + assert out_value.getvalue() + + # setinputsizes for 1 args (missed 1 args and 1 kwargs) + cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR) + kwargs = dict(a_OutValue=out_value) + results = cursor.callproc("proc_Test2", ("hi", 5), kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() + + # setinputsizes for 2 kwargs (missed 1 kwargs) + cursor.setinputsizes(oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_NUMBER) + kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) + results = cursor.callproc("proc_Test2", [], kwargs) + assert results == [] + assert out_value.getvalue() + + +def test_4119(cursor, test_env): + """ + 4119 - test callproc with setinputsizes with wrong order of parameters + """ + # setinputsizes for 2 args (missed 1 kwargs) + out_value = cursor.var(oracledb.DB_TYPE_BOOLEAN) + cursor.setinputsizes(bool, oracledb.DB_TYPE_VARCHAR) + kwargs = dict(a_OutValue=out_value) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callproc("proc_Test2", ["hi", 5], kwargs) + + # setinputsizes for 2 kwargs (missed 1 kwargs) + cursor.setinputsizes(bool, oracledb.DB_TYPE_VARCHAR) + kwargs = dict(a_InValue="hi", a_InOutValue=5, a_OutValue=out_value) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callproc("proc_Test2", [], kwargs) + + +def test_4120(cursor): + "4120 - test callfunc with setinputsizes with too few parameters" + # setinputsizes for return_type and 1 kwargs (missed 2 kwargs) + bool_var = cursor.var(oracledb.DB_TYPE_BOOLEAN) + bool_var.setvalue(0, False) + kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=3) + cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_VARCHAR) + results = cursor.callfunc("func_Test2", oracledb.NUMBER, [], kwargs) + assert results == -1 + + # setinputsizes for return_type (missed 3 kwargs) + bool_var.setvalue(0, False) + kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=1) + cursor.setinputsizes(oracledb.NUMBER) + results = cursor.callfunc("func_Test2", oracledb.NUMBER, [], kwargs) + assert results == 1 + + # setinputsizes for return_type (missed 3 args) + bool_var.setvalue(0, True) + cursor.setinputsizes(oracledb.NUMBER) + results = cursor.callfunc( + "func_Test2", oracledb.NUMBER, ["hi", 2, bool_var] + ) + assert results == 4 + + +def test_4121(cursor, test_env): + "4121 - test callfunc with setinputsizes with wrong order of parameters" + # setinputsizes for 2 args (missed 2 kwargs) + bool_var = cursor.var(oracledb.DB_TYPE_BOOLEAN) + bool_var.setvalue(0, True) + cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_BOOLEAN) + kwargs = dict(a_Boolean=bool_var) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callfunc( + "func_Test2", oracledb.NUMBER, ["hi", bool_var], kwargs ) - self.assertEqual(results, 6) - - def test_4123(self): - "4123 - test executing a procedure with callfunc" - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callfunc( - "proc_Test2", oracledb.NUMBER, ("hello", 3, False) - ) - - def test_4124(self): - "4124 - test executing a function with callproc" - with self.assertRaisesFullCode("ORA-06550"): - self.cursor.callproc("func_Test2", ("hello", 5, True)) - - def test_4125(self): - "4125 - test calling a procedure with a string > 32767 characters" - data = "4125" * 16000 - size_var = self.cursor.var(int) - self.cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) - self.assertEqual(size_var.getvalue(), len(data)) - - def test_4126(self): - "4125 - test calling a procedure with raw data > 32767 bytes" - data = b"4126" * 16250 - size_var = self.cursor.var(int) - self.cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) - self.assertEqual(size_var.getvalue(), len(data)) -if __name__ == "__main__": - test_env.run_test_cases() +def test_4122(cursor, test_env): + "4122 - test callfunc with setinputsizes without type for return_type" + # setinputsizes for 1 args and 1 kwargs + bool_var = cursor.var(oracledb.DB_TYPE_BOOLEAN) + bool_var.setvalue(0, False) + cursor.setinputsizes(oracledb.NUMBER, oracledb.DB_TYPE_BOOLEAN) + kwargs = dict(a_Boolean=bool_var) + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callfunc("func_Test2", oracledb.DB_TYPE_NUMBER, ["hi"], kwargs) + + # setinputsizes for 2 kwargs (missed 1 kwargs) + bool_var.setvalue(0, False) + kwargs = dict(a_Boolean=bool_var, a_String="hi", a_ExtraAmount=0) + cursor.setinputsizes(oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_VARCHAR) + results = cursor.callfunc( + "func_Test2", oracledb.DB_TYPE_NUMBER, [], kwargs + ) + assert results == 2 + + # setinputsizes for 2 args and 1 kwargs + bool_var.setvalue(0, False) + cursor.setinputsizes(oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_NUMBER) + kwargs = dict(a_Boolean=bool_var) + results = cursor.callfunc( + "func_Test2", oracledb.DB_TYPE_NUMBER, ["Bye", 2], kwargs + ) + assert results == 1 + + # setinputsizes for 2 args (missed 1 args) + bool_var.setvalue(0, False) + cursor.setinputsizes(oracledb.DB_TYPE_BOOLEAN, oracledb.DB_TYPE_NUMBER) + kwargs = dict(a_Boolean=bool_var) + results = cursor.callfunc( + "func_Test2", oracledb.DB_TYPE_NUMBER, ["Light", -1, bool_var] + ) + assert results == 6 + + +def test_4123(cursor, test_env): + "4123 - test executing a procedure with callfunc" + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callfunc("proc_Test2", oracledb.NUMBER, ("hello", 3, False)) + + +def test_4124(cursor, test_env): + "4124 - test executing a function with callproc" + with test_env.assert_raises_full_code("ORA-06550"): + cursor.callproc("func_Test2", ("hello", 5, True)) + + +def test_4125(cursor): + "4125 - test calling a procedure with a string > 32767 characters" + data = "4125" * 16000 + size_var = cursor.var(int) + cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) + assert size_var.getvalue() == len(data) + + +def test_4126(cursor): + "4125 - test calling a procedure with raw data > 32767 bytes" + data = b"4126" * 16250 + size_var = cursor.var(int) + cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) + assert size_var.getvalue() == len(data) diff --git a/tests/test_4200_cursor_scrollable.py b/tests/test_4200_cursor_scrollable.py index 897fb7b5..aa09685d 100644 --- a/tests/test_4200_cursor_scrollable.py +++ b/tests/test_4200_cursor_scrollable.py @@ -26,201 +26,195 @@ 4200 - Module for testing scrollable cursors """ -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_4200(self): - "4200 - test creating a scrollable cursor" - cursor = self.conn.cursor() - self.assertEqual(cursor.scrollable, False) - cursor = self.conn.cursor(True) - self.assertEqual(cursor.scrollable, True) - cursor = self.conn.cursor(scrollable=True) - self.assertEqual(cursor.scrollable, True) - cursor.scrollable = False - self.assertEqual(cursor.scrollable, False) - - def test_4201(self): - "4201 - test scrolling absolute yields an exception (after result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPY-2063"): - cursor.scroll(12, "absolute") - - def test_4202(self): - "4202 - test scrolling absolute (when in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.prefetchrows = 0 - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - self.assertTrue( - cursor.arraysize > 1, - "array size must exceed 1 for this test to work correctly", - ) + +def test_4200(conn): + "4200 - test creating a scrollable cursor" + cursor = conn.cursor() + assert not cursor.scrollable + cursor = conn.cursor(True) + assert cursor.scrollable + cursor = conn.cursor(scrollable=True) + assert cursor.scrollable + cursor.scrollable = False + assert not cursor.scrollable + + +def test_4201(conn, test_env): + "4201 - test scrolling absolute yields an exception (after result set)" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + cursor.scroll(12, "absolute") + + +def test_4202(conn): + "4202 - test scrolling absolute (when in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + assert ( + cursor.arraysize > 1 + ), "array size must exceed 1 for this test to work correctly" + cursor.scroll(1, mode="absolute") + (value,) = cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +def test_4203(conn): + "4203 - test scrolling absolute (when not in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.scroll(6, mode="absolute") + (value,) = cursor.fetchone() + assert value == 7.5 + assert cursor.rowcount == 6 + + +def test_4204(conn): + "4204 - test scrolling to first row in result set (in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + cursor.scroll(mode="first") + (value,) = cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +def test_4205(conn): + "4205 - test scrolling to first row in result set (not in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + cursor.fetchmany() + cursor.scroll(mode="first") + (value,) = cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +def test_4206(conn): + "4206 - test scrolling to last row in result set" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.scroll(mode="last") + (value,) = cursor.fetchone() + assert value == 12.5 + assert cursor.rowcount == 10 + + +def test_4207(conn, test_env): + "4207 - test scrolling relative yields an exception (after result set)" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + cursor.scroll(15) + + +def test_4208(conn, test_env): + "4208 - test scrolling relative yields exception (before result set)" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + cursor.scroll(-5) + + +def test_4209(conn): + "4209 - test scrolling relative (when in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + assert cursor.arraysize > 1, message + cursor.scroll(2 - cursor.rowcount) + (value,) = cursor.fetchone() + assert value == 2.5 + assert cursor.rowcount == 2 + + +def test_4210(conn): + "4210 - test scrolling relative (when not in buffers)" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + assert cursor.arraysize > 1, message + cursor.scroll(3 - cursor.rowcount) + (value,) = cursor.fetchone() + assert value == 3.75 + assert cursor.rowcount == 3 + + +def test_4211(conn, cursor, test_env): + "4211 - test scrolling when there are no rows" + cursor.execute("truncate table TestTempTable") + cursor = conn.cursor(scrollable=True) + cursor.execute("select * from TestTempTable") + cursor.scroll(mode="last") + assert cursor.fetchall() == [] + cursor.scroll(mode="first") + assert cursor.fetchall() == [] + with test_env.assert_raises_full_code("DPY-2063"): cursor.scroll(1, mode="absolute") - (value,) = cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - def test_4203(self): - "4203 - test scrolling absolute (when not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.scroll(6, mode="absolute") - (value,) = cursor.fetchone() - self.assertEqual(value, 7.5) - self.assertEqual(cursor.rowcount, 6) - - def test_4204(self): - "4204 - test scrolling to first row in result set (in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - cursor.scroll(mode="first") - (value,) = cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - def test_4205(self): - "4205 - test scrolling to first row in result set (not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - cursor.fetchmany() - cursor.scroll(mode="first") - (value,) = cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - def test_4206(self): - "4206 - test scrolling to last row in result set" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.scroll(mode="last") - (value,) = cursor.fetchone() - self.assertEqual(value, 12.5) - self.assertEqual(cursor.rowcount, 10) - - def test_4207(self): - "4207 - test scrolling relative yields an exception (after result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPY-2063"): - cursor.scroll(15) - - def test_4208(self): - "4208 - test scrolling relative yields exception (before result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - with self.assertRaisesFullCode("DPY-2063"): - cursor.scroll(-5) - - def test_4209(self): - "4209 - test scrolling relative (when in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - message = "array size must exceed 1 for this test to work correctly" - self.assertTrue(cursor.arraysize > 1, message) - cursor.scroll(2 - cursor.rowcount) - (value,) = cursor.fetchone() - self.assertEqual(value, 2.5) - self.assertEqual(cursor.rowcount, 2) - - def test_4210(self): - "4210 - test scrolling relative (when not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - cursor.fetchmany() - message = "array size must exceed 1 for this test to work correctly" - self.assertTrue(cursor.arraysize > 1, message) - cursor.scroll(3 - cursor.rowcount) - (value,) = cursor.fetchone() - self.assertEqual(value, 3.75) - self.assertEqual(cursor.rowcount, 3) - - def test_4211(self): - "4211 - test scrolling when there are no rows" - self.cursor.execute("truncate table TestTempTable") - cursor = self.conn.cursor(scrollable=True) - cursor.execute("select * from TestTempTable") - cursor.scroll(mode="last") - self.assertEqual(cursor.fetchall(), []) - cursor.scroll(mode="first") - self.assertEqual(cursor.fetchall(), []) - with self.assertRaisesFullCode("DPY-2063"): - cursor.scroll(1, mode="absolute") - - def test_4212(self): - "4212 - test scrolling with differing array and fetch array sizes" - self.cursor.execute("truncate table TestTempTable") - for i in range(30): - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, null) - """, - [i + 1], - ) - for arraysize in range(1, 6): - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = arraysize - cursor.execute("select IntCol from TestTempTable order by IntCol") - for num_rows in range(1, arraysize + 1): - cursor.scroll(15, "absolute") - rows = cursor.fetchmany(num_rows) - self.assertEqual(rows[0][0], 15) - self.assertEqual(cursor.rowcount, 15 + num_rows - 1) - cursor.scroll(9) - rows = cursor.fetchmany(num_rows) - num_rows_fetched = len(rows) - self.assertEqual(rows[0][0], 15 + num_rows + 8) - self.assertEqual( - cursor.rowcount, 15 + num_rows + num_rows_fetched + 7 - ) - cursor.scroll(-12) - rows = cursor.fetchmany(num_rows) - count = 15 + num_rows + num_rows_fetched - 5 - self.assertEqual(rows[0][0], count) - count = 15 + num_rows + num_rows_fetched + num_rows - 6 - self.assertEqual(cursor.rowcount, count) - - def test_4213(self): - "4213 - test calling scroll() with invalid mode" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchmany() - with self.assertRaisesFullCode("DPY-2009"): - cursor.scroll(mode="middle") - - def test_4214(self): - "4214 - test scroll after fetching all rows" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = 5 - cursor.prefetchrows = 0 - cursor.execute("select NumberCol from TestNumbers order by IntCol") - cursor.fetchall() - cursor.scroll(5, mode="absolute") - (value,) = cursor.fetchone() - self.assertEqual(value, 6.25) - self.assertEqual(cursor.rowcount, 5) - - -if __name__ == "__main__": - test_env.run_test_cases() + + +def test_4212(conn, cursor): + "4212 - test scrolling with differing array and fetch array sizes" + cursor.execute("truncate table TestTempTable") + for i in range(30): + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, null) + """, + [i + 1], + ) + conn.commit() + for arraysize in range(1, 6): + cursor = conn.cursor(scrollable=True) + cursor.arraysize = arraysize + cursor.execute("select IntCol from TestTempTable order by IntCol") + for num_rows in range(1, arraysize + 1): + cursor.scroll(15, "absolute") + rows = cursor.fetchmany(num_rows) + assert rows[0][0] == 15 + assert cursor.rowcount == 15 + num_rows - 1 + cursor.scroll(9) + rows = cursor.fetchmany(num_rows) + num_rows_fetched = len(rows) + assert rows[0][0] == 15 + num_rows + 8 + assert cursor.rowcount == 15 + num_rows + num_rows_fetched + 7 + cursor.scroll(-12) + rows = cursor.fetchmany(num_rows) + count = 15 + num_rows + num_rows_fetched - 5 + assert rows[0][0] == count + count = 15 + num_rows + num_rows_fetched + num_rows - 6 + assert cursor.rowcount == count + + +def test_4213(conn, test_env): + "4213 - test calling scroll() with invalid mode" + cursor = conn.cursor(scrollable=True) + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchmany() + with test_env.assert_raises_full_code("DPY-2009"): + cursor.scroll(mode="middle") + + +def test_4214(conn): + "4214 - test scroll after fetching all rows" + cursor = conn.cursor(scrollable=True) + cursor.arraysize = 5 + cursor.prefetchrows = 0 + cursor.execute("select NumberCol from TestNumbers order by IntCol") + cursor.fetchall() + cursor.scroll(5, mode="absolute") + (value,) = cursor.fetchone() + assert value == 6.25 + assert cursor.rowcount == 5 diff --git a/tests/test_4300_cursor_other.py b/tests/test_4300_cursor_other.py index 57a70e06..a2b59022 100644 --- a/tests/test_4300_cursor_other.py +++ b/tests/test_4300_cursor_other.py @@ -29,996 +29,1032 @@ import decimal import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_4300(self): - "4300 - test preparing a statement and executing it multiple times" - cursor = self.conn.cursor() - self.assertEqual(cursor.statement, None) - statement = "begin :value := :value + 5; end;" - cursor.prepare(statement) - var = cursor.var(oracledb.NUMBER) - self.assertEqual(cursor.statement, statement) - var.setvalue(0, 2) - cursor.execute(None, value=var) - self.assertEqual(var.getvalue(), 7) - cursor.execute(None, value=var) - self.assertEqual(var.getvalue(), 12) - cursor.execute("begin :value2 := 3; end;", value2=var) - self.assertEqual(var.getvalue(), 3) - - def test_4301(self): - "4301 - confirm an exception is raised after closing a cursor" - self.cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - self.cursor.execute("select 1 from dual") - - def test_4302(self): - "4302 - test iterators" - self.cursor.execute( - """ - select IntCol - from TestNumbers - where IntCol between 1 and 3 - order by IntCol - """ - ) - rows = [v for v, in self.cursor] - self.assertEqual(rows, [1, 2, 3]) - def test_4303(self): - "4303 - test iterators (with intermediate execute)" - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - select IntCol - from TestNumbers - where IntCol between 1 and 3 - order by IntCol - """ - ) - test_iter = iter(self.cursor) - (value,) = next(test_iter) - self.cursor.execute("insert into TestTempTable (IntCol) values (1)") - with self.assertRaisesFullCode("DPY-1003"): - next(test_iter) - - def test_4304(self): - "4304 - test that bindnames() works correctly." - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-2002"): - cursor.bindnames() - cursor.prepare("begin null; end;") - self.assertEqual(cursor.bindnames(), []) - cursor.prepare("begin :retval := :inval + 5; end;") - self.assertEqual(cursor.bindnames(), ["RETVAL", "INVAL"]) - cursor.prepare("begin :retval := :a * :a + :b * :b; end;") - self.assertEqual(cursor.bindnames(), ["RETVAL", "A", "B"]) - cursor.prepare( - """ - begin - :a := :b + :c + :d + :e + :f + :g + :h + :i + :j + :k + :l; - end; - """ - ) - names = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"] - self.assertEqual(cursor.bindnames(), names) - cursor.prepare("select :a * :a + :b * :b from dual") - self.assertEqual(cursor.bindnames(), ["A", "B"]) - cursor.prepare("select :value1 + :VaLue_2 from dual") - self.assertEqual(cursor.bindnames(), ["VALUE1", "VALUE_2"]) - cursor.prepare("select :élevé, :fenêtre from dual") - self.assertEqual(cursor.bindnames(), ["ÉLEVÉ", "FENÊTRE"]) - - def test_4305(self): - "4305 - test cursor.setinputsizes() with invalid parameters" - val = decimal.Decimal(5) - with self.assertRaisesFullCode("DPY-2005"): - self.cursor.setinputsizes(val, x=val) - with self.assertRaisesFullCode("DPY-2007"): - self.cursor.setinputsizes(val) - - def test_4306(self): - "4306 - test setting input sizes without any parameters" - self.cursor.setinputsizes() - self.cursor.execute("select :val from dual", val="Test Value") - self.assertEqual(self.cursor.fetchall(), [("Test Value",)]) - - def test_4307(self): - "4307 - test setting input sizes with an empty dictionary" - empty_dict = {} - self.cursor.prepare("select 236 from dual") - self.cursor.setinputsizes(**empty_dict) - self.cursor.execute(None, empty_dict) - self.assertEqual(self.cursor.fetchall(), [(236,)]) - - def test_4308(self): - "4308 - test setting input sizes with an empty list" - empty_list = [] - self.cursor.prepare("select 239 from dual") - self.cursor.setinputsizes(*empty_list) - self.cursor.execute(None, empty_list) - self.assertEqual(self.cursor.fetchall(), [(239,)]) - - def test_4309(self): - "4309 - test setting input sizes with positional args" - var = self.cursor.var(oracledb.STRING, 100) - self.cursor.setinputsizes(None, 5, None, 10, None, oracledb.NUMBER) - self.cursor.execute( - """ - begin - :1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6); - end; - """, - [var, "test_", 5, "_second_", 3, 7], - ) - self.assertEqual(var.getvalue(), "test_5_second_37") - - def test_4310(self): - "4310 - test Cursor repr()" - expected_value = f"" - self.assertEqual(str(self.cursor), expected_value) - self.assertEqual(repr(self.cursor), expected_value) - - def test_4311(self): - "4311 - test parsing query statements" - sql = "select LongIntCol from TestNumbers where IntCol = :val" - self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertEqual( - self.cursor.description, - [("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, 0)], - ) - def test_4312(self): - "4312 - test cursor.setoutputsize() does not fail (but does nothing)" - self.cursor.setoutputsize(100, 2) - - def test_4313(self): - "4313 - test cursor.var() with invalid parameters" - with self.assertRaisesFullCode("DPY-2007"): - self.cursor.var(5) - - def test_4314(self): - "4314 - test cursor.arrayvar() with invalid parameters" - with self.assertRaisesFullCode("DPY-2007"): - self.cursor.arrayvar(5, 1) - - def test_4315(self): - "4315 - test binding boolean data without the use of PL/SQL" - self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - self.cursor.execute(sql, (False, "Value should be 0")) - self.cursor.execute(sql, (True, "Value should be 1")) - self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - expected_value = [(0, "Value should be 0"), (1, "Value should be 1")] - self.assertEqual(self.cursor.fetchall(), expected_value) - - def test_4316(self): - "4316 - test using a cursor as a context manager" - with self.cursor as cursor: - cursor.execute("truncate table TestTempTable") - cursor.execute("select count(*) from TestTempTable") - (count,) = cursor.fetchone() - self.assertEqual(count, 0) - with self.assertRaisesFullCode("DPY-1006"): - self.cursor.close() - - def test_4317(self): - "4317 - test that rowcount attribute is reset to zero on query execute" - for num in [0, 1, 1, 0]: - self.cursor.execute("select * from dual where 1 = :s", [num]) - self.cursor.fetchone() - self.assertEqual(self.cursor.rowcount, num) - - def test_4318(self): - "4318 - test that the typename attribute can be passed a value of None" - value_to_set = 5 - var = self.cursor.var(int, typename=None) - var.setvalue(0, value_to_set) - self.assertEqual(var.getvalue(), value_to_set) - - def test_4319(self): - "4319 - test that an object type can be used as type in cursor.var()" - obj_type = self.conn.gettype("UDT_OBJECT") - var = self.cursor.var(obj_type) - self.cursor.callproc( - "pkg_TestBindObject.BindObjectOut", (28, "Bind obj out", var) +def test_4300(cursor): + "4300 - test preparing a statement and executing it multiple times" + assert cursor.statement is None + statement = "begin :value := :value + 5; end;" + cursor.prepare(statement) + var = cursor.var(oracledb.NUMBER) + assert cursor.statement == statement + var.setvalue(0, 2) + cursor.execute(None, value=var) + assert var.getvalue() == 7 + cursor.execute(None, value=var) + assert var.getvalue() == 12 + cursor.execute("begin :value2 := 3; end;", value2=var) + assert var.getvalue() == 3 + + +def test_4301(conn, test_env): + "4301 - confirm an exception is raised after closing a cursor" + cursor = conn.cursor() + cursor.close() + with test_env.assert_raises_full_code("DPY-1006"): + cursor.execute("select 1 from dual") + + +def test_4302(cursor): + "4302 - test iterators" + cursor.execute( + """ + select IntCol + from TestNumbers + where IntCol between 1 and 3 + order by IntCol + """ + ) + rows = [v for v, in cursor] + assert rows == [1, 2, 3] + + +def test_4303(cursor, test_env): + "4303 - test iterators (with intermediate execute)" + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + select IntCol + from TestNumbers + where IntCol between 1 and 3 + order by IntCol + """ + ) + test_iter = iter(cursor) + (value,) = next(test_iter) + cursor.execute("insert into TestTempTable (IntCol) values (1)") + with test_env.assert_raises_full_code("DPY-1003"): + next(test_iter) + + +def test_4304(cursor, test_env): + "4304 - test that bindnames() works correctly." + with test_env.assert_raises_full_code("DPY-2002"): + cursor.bindnames() + cursor.prepare("begin null; end;") + assert cursor.bindnames() == [] + cursor.prepare("begin :retval := :inval + 5; end;") + assert cursor.bindnames() == ["RETVAL", "INVAL"] + cursor.prepare("begin :retval := :a * :a + :b * :b; end;") + assert cursor.bindnames() == ["RETVAL", "A", "B"] + cursor.prepare( + """ + begin + :a := :b + :c + :d + :e + :f + :g + :h + :i + :j + :k + :l; + end; + """ + ) + names = ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"] + assert cursor.bindnames() == names + cursor.prepare("select :a * :a + :b * :b from dual") + assert cursor.bindnames() == ["A", "B"] + cursor.prepare("select :value1 + :VaLue_2 from dual") + assert cursor.bindnames() == ["VALUE1", "VALUE_2"] + cursor.prepare("select :élevé, :fenêtre from dual") + assert cursor.bindnames() == ["ÉLEVÉ", "FENÊTRE"] + + +def test_4305(cursor, test_env): + "4305 - test cursor.setinputsizes() with invalid parameters" + val = decimal.Decimal(5) + with test_env.assert_raises_full_code("DPY-2005"): + cursor.setinputsizes(val, x=val) + with test_env.assert_raises_full_code("DPY-2007"): + cursor.setinputsizes(val) + + +def test_4306(cursor): + "4306 - test setting input sizes without any parameters" + cursor.setinputsizes() + cursor.execute("select :val from dual", val="Test Value") + assert cursor.fetchall() == [("Test Value",)] + + +def test_4307(cursor): + "4307 - test setting input sizes with an empty dictionary" + empty_dict = {} + cursor.prepare("select 236 from dual") + cursor.setinputsizes(**empty_dict) + cursor.execute(None, empty_dict) + assert cursor.fetchall() == [(236,)] + + +def test_4308(cursor): + "4308 - test setting input sizes with an empty list" + empty_list = [] + cursor.prepare("select 239 from dual") + cursor.setinputsizes(*empty_list) + cursor.execute(None, empty_list) + assert cursor.fetchall() == [(239,)] + + +def test_4309(cursor): + "4309 - test setting input sizes with positional args" + var = cursor.var(oracledb.STRING, 100) + cursor.setinputsizes(None, 5, None, 10, None, oracledb.NUMBER) + cursor.execute( + """ + begin + :1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6); + end; + """, + [var, "test_", 5, "_second_", 3, 7], + ) + assert var.getvalue() == "test_5_second_37" + + +def test_4310(conn, cursor): + "4310 - test Cursor repr()" + expected_value = f"" + assert str(cursor) == expected_value + assert repr(cursor) == expected_value + + +def test_4311(cursor): + "4311 - test parsing query statements" + sql = "select LongIntCol from TestNumbers where IntCol = :val" + cursor.parse(sql) + assert cursor.statement == sql + assert cursor.description == [ + ("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, 0) + ] + + +def test_4312(cursor): + "4312 - test cursor.setoutputsize() does not fail (but does nothing)" + cursor.setoutputsize(100, 2) + + +def test_4313(cursor, test_env): + "4313 - test cursor.var() with invalid parameters" + with test_env.assert_raises_full_code("DPY-2007"): + cursor.var(5) + + +def test_4314(cursor, test_env): + "4314 - test cursor.arrayvar() with invalid parameters" + with test_env.assert_raises_full_code("DPY-2007"): + cursor.arrayvar(5, 1) + + +def test_4315(cursor): + "4315 - test binding boolean data without the use of PL/SQL" + cursor.execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + cursor.execute(sql, (False, "Value should be 0")) + cursor.execute(sql, (True, "Value should be 1")) + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + expected_value = [(0, "Value should be 0"), (1, "Value should be 1")] + assert cursor.fetchall() == expected_value + + +def test_4316(conn, test_env): + "4316 - test using a cursor as a context manager" + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + with test_env.assert_raises_full_code("DPY-1006"): + cursor.close() + + +def test_4317(cursor): + "4317 - test that rowcount attribute is reset to zero on query execute" + for num in [0, 1, 1, 0]: + cursor.execute("select * from dual where 1 = :s", [num]) + cursor.fetchone() + assert cursor.rowcount == num + + +def test_4318(cursor): + "4318 - test that the typename attribute can be passed a value of None" + value_to_set = 5 + var = cursor.var(int, typename=None) + var.setvalue(0, value_to_set) + assert var.getvalue() == value_to_set + + +def test_4319(conn, cursor): + "4319 - test that an object type can be used as type in cursor.var()" + obj_type = conn.gettype("UDT_OBJECT") + var = cursor.var(obj_type) + cursor.callproc( + "pkg_TestBindObject.BindObjectOut", (28, "Bind obj out", var) + ) + obj = var.getvalue() + result = cursor.callfunc("pkg_TestBindObject.GetStringRep", str, (obj,)) + exp = "udt_Object(28, 'Bind obj out', null, null, null, null, null)" + assert result == exp + + +def test_4320(cursor): + "4320 - test that fetching an XMLType returns a string" + int_val = 5 + label = "IntCol" + expected_result = f"<{label}>{int_val}" + cursor.execute( + f""" + select XMLElement("{label}", IntCol) + from TestStrings + where IntCol = :int_val + """, + int_val=int_val, + ) + (result,) = cursor.fetchone() + assert result == expected_result + + +def test_4321(cursor): + "4321 - test last rowid" + + # no statement executed: no rowid + assert cursor.lastrowid is None + + # DDL statement executed: no rowid + cursor.execute("truncate table TestTempTable") + assert cursor.lastrowid is None + + # statement prepared: no rowid + cursor.prepare("insert into TestTempTable (IntCol) values (:1)") + assert cursor.lastrowid is None + + # multiple rows inserted: rowid of last row inserted + rows = [(n,) for n in range(225)] + cursor.executemany(None, rows) + rowid = cursor.lastrowid + cursor.execute( + """ + select rowid + from TestTempTable + where IntCol = :1 + """, + rows[-1], + ) + assert cursor.fetchone()[0] == rowid + + # statement executed but no rows updated: no rowid + cursor.execute("delete from TestTempTable where 1 = 0") + assert cursor.lastrowid is None + + # stetement executed with one row updated: rowid of updated row + cursor.execute( + """ + update TestTempTable set StringCol1 = 'Modified' + where IntCol = :1 + """, + rows[-2], + ) + rowid = cursor.lastrowid + cursor.execute( + "select rowid from TestTempTable where IntCol = :1", + rows[-2], + ) + assert cursor.fetchone()[0] == rowid + + # statement executed with many rows updated: rowid of last updated row + cursor.execute( + """ + update TestTempTable set + StringCol1 = 'Row ' || to_char(IntCol) + where IntCol = :1 + """, + rows[-3], + ) + rowid = cursor.lastrowid + cursor.execute( + "select StringCol1 from TestTempTable where rowid = :1", + [rowid], + ) + assert cursor.fetchone()[0] == "Row %s" % rows[-3] + + +def test_4322(conn, round_trip_checker): + "4322 - test prefetch rows" + + # perform simple query and verify only one round trip is needed + with conn.cursor() as cursor: + cursor.execute("select sysdate from dual").fetchall() + assert round_trip_checker.get_value() == 1 + + # set prefetchrows to 1 and verify that two round trips are now needed + with conn.cursor() as cursor: + cursor.prefetchrows = 1 + assert cursor.prefetchrows == 1 + cursor.execute("select sysdate from dual").fetchall() + assert round_trip_checker.get_value() == 2 + + # simple DDL only requires a single round trip + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") + assert round_trip_checker.get_value() == 1 + + # array execution only requires a single round trip + num_rows = 590 + with conn.cursor() as cursor: + data = [(n + 1,) for n in range(num_rows)] + cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + data, ) - obj = var.getvalue() - result = self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, (obj,) + assert round_trip_checker.get_value() == 1 + + # setting prefetch and array size to 1 requires a round-trip for each + # row + with conn.cursor() as cursor: + cursor.arraysize = 1 + cursor.prefetchrows = 1 + assert cursor.prefetchrows == 1 + cursor.execute("select IntCol from TestTempTable").fetchall() + assert round_trip_checker.get_value() == num_rows + 1 + + # setting prefetch and array size to 300 requires 2 round-trips + with conn.cursor() as cursor: + cursor.arraysize = 300 + cursor.prefetchrows = 300 + assert cursor.prefetchrows == 300 + cursor.execute("select IntCol from TestTempTable").fetchall() + assert round_trip_checker.get_value() == 2 + + +def test_4323(conn, round_trip_checker): + "4323 - test prefetch rows using existing cursor" + + # Set prefetch rows on an existing cursor + num_rows = 590 + with conn.cursor() as cursor: + cursor.execute("truncate table TestTempTable") + assert round_trip_checker.get_value() == 1 + data = [(n + 1,) for n in range(num_rows)] + cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + data, ) - exp = "udt_Object(28, 'Bind obj out', null, null, null, null, null)" - self.assertEqual(result, exp) - - def test_4320(self): - "4320 - test that fetching an XMLType returns a string" - int_val = 5 - label = "IntCol" - expected_result = f"<{label}>{int_val}" - self.cursor.execute( - f""" - select XMLElement("{label}", IntCol) - from TestStrings - where IntCol = :int_val - """, - int_val=int_val, + assert round_trip_checker.get_value() == 1 + cursor.prefetchrows = 30 + cursor.arraysize = 100 + cursor.execute("select IntCol from TestTempTable").fetchall() + assert round_trip_checker.get_value() == 7 + + +def test_4324(cursor): + "4324 - test parsing plsql statements" + sql = "begin :value := 5; end;" + cursor.parse(sql) + assert cursor.statement == sql + assert cursor.description is None + + +def test_4325(cursor): + "4325 - test parsing ddl statements" + sql = "truncate table TestTempTable" + cursor.parse(sql) + assert cursor.statement == sql + assert cursor.description is None + + +def test_4326(cursor): + "4326 - test parsing dml statements" + sql = "insert into TestTempTable (IntCol) values (1)" + cursor.parse(sql) + assert cursor.statement == sql + assert cursor.description is None + + +def test_4327(cursor, test_env): + "4327 - test to verify encodingErrors is deprecated" + errors = "strict" + with test_env.assert_raises_full_code("DPY-2014"): + cursor.var( + oracledb.NUMBER, encoding_errors=errors, encodingErrors=errors ) - (result,) = self.cursor.fetchone() - self.assertEqual(result, expected_result) - def test_4321(self): - "4321 - test last rowid" - # no statement executed: no rowid - self.assertIsNone(self.cursor.lastrowid) +def test_4328(cursor, test_env): + "4328 - test arrays of arrays not supported" + simple_var = cursor.arrayvar(oracledb.NUMBER, 3) + with test_env.assert_raises_full_code("DPY-3005"): + simple_var.setvalue(1, [1, 2, 3]) - # DDL statement executed: no rowid - self.cursor.execute("truncate table TestTempTable") - self.assertIsNone(self.cursor.lastrowid) - # statement prepared: no rowid - self.cursor.prepare("insert into TestTempTable (IntCol) values (:1)") - self.assertIsNone(self.cursor.lastrowid) +def test_4329(cursor, test_env): + "4329 - test cursor.setinputsizes() with invalid list parameters" + with test_env.assert_raises_full_code("DPY-2011"): + cursor.setinputsizes([int, 2, 10]) - # multiple rows inserted: rowid of last row inserted - rows = [(n,) for n in range(225)] - self.cursor.executemany(None, rows) - rowid = self.cursor.lastrowid - self.cursor.execute( - """ - select rowid - from TestTempTable - where IntCol = :1 - """, - rows[-1], - ) - self.assertEqual(self.cursor.fetchone()[0], rowid) - # statement executed but no rows updated: no rowid - self.cursor.execute("delete from TestTempTable where 1 = 0") - self.assertIsNone(self.cursor.lastrowid) +def test_4330(cursor, test_env): + "4330 - test unsupported python type on cursor" + with test_env.assert_raises_full_code("DPY-3003"): + cursor.var(list) - # stetement executed with one row updated: rowid of updated row - self.cursor.execute( - """ - update TestTempTable set StringCol1 = 'Modified' - where IntCol = :1 - """, - rows[-2], - ) - rowid = self.cursor.lastrowid - self.cursor.execute( - "select rowid from TestTempTable where IntCol = :1", - rows[-2], - ) - self.assertEqual(self.cursor.fetchone()[0], rowid) - # statement executed with many rows updated: rowid of last updated row - self.cursor.execute( - """ - update TestTempTable set - StringCol1 = 'Row ' || to_char(IntCol) - where IntCol = :1 - """, - rows[-3], - ) - rowid = self.cursor.lastrowid - self.cursor.execute( - "select StringCol1 from TestTempTable where rowid = :1", - [rowid], - ) - self.assertEqual(self.cursor.fetchone()[0], "Row %s" % rows[-3]) - - def test_4322(self): - "4322 - test prefetch rows" - self.setup_round_trip_checker() - - # perform simple query and verify only one round trip is needed - with self.conn.cursor() as cursor: - cursor.execute("select sysdate from dual").fetchall() - self.assertRoundTrips(1) - - # set prefetchrows to 1 and verify that two round trips are now needed - with self.conn.cursor() as cursor: - cursor.prefetchrows = 1 - self.assertEqual(cursor.prefetchrows, 1) - cursor.execute("select sysdate from dual").fetchall() - self.assertRoundTrips(2) - - # simple DDL only requires a single round trip - with self.conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") - self.assertRoundTrips(1) - - # array execution only requires a single round trip - num_rows = 590 - with self.conn.cursor() as cursor: - data = [(n + 1,) for n in range(num_rows)] - cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - data, - ) - self.assertRoundTrips(1) +def test_4331(cursor): + "4331 - test binding by name with leading colon" + params = {":arg1": 5} + cursor.execute("select :arg1 from dual", params) + (result,) = cursor.fetchone() + assert result == params[":arg1"] - # setting prefetch and array size to 1 requires a round-trip for each - # row - with self.conn.cursor() as cursor: - cursor.arraysize = 1 - cursor.prefetchrows = 1 - self.assertEqual(cursor.prefetchrows, 1) - cursor.execute("select IntCol from TestTempTable").fetchall() - self.assertRoundTrips(num_rows + 1) - - # setting prefetch and array size to 300 requires 2 round-trips - with self.conn.cursor() as cursor: - cursor.arraysize = 300 - cursor.prefetchrows = 300 - self.assertEqual(cursor.prefetchrows, 300) - cursor.execute("select IntCol from TestTempTable").fetchall() - self.assertRoundTrips(2) - - def test_4323(self): - "4323 - test prefetch rows using existing cursor" - self.setup_round_trip_checker() - - # Set prefetch rows on an existing cursor - num_rows = 590 - with self.conn.cursor() as cursor: - cursor.execute("truncate table TestTempTable") - self.assertRoundTrips(1) - data = [(n + 1,) for n in range(num_rows)] - cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - data, - ) - self.assertRoundTrips(1) - cursor.prefetchrows = 30 - cursor.arraysize = 100 - cursor.execute("select IntCol from TestTempTable").fetchall() - self.assertRoundTrips(7) - - def test_4324(self): - "4324 - test parsing plsql statements" - sql = "begin :value := 5; end;" - self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - def test_4325(self): - "4325 - test parsing ddl statements" - sql = "truncate table TestTempTable" - self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - def test_4326(self): - "4326 - test parsing dml statements" - sql = "insert into TestTempTable (IntCol) values (1)" - self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - def test_4327(self): - "4327 - test to verify encodingErrors is deprecated" - errors = "strict" - with self.assertRaisesFullCode("DPY-2014"): - self.cursor.var( - oracledb.NUMBER, encoding_errors=errors, encodingErrors=errors - ) - def test_4328(self): - "4328 - test arrays of arrays not supported" - simple_var = self.cursor.arrayvar(oracledb.NUMBER, 3) - with self.assertRaisesFullCode("DPY-3005"): - simple_var.setvalue(1, [1, 2, 3]) - - def test_4329(self): - "4329 - test cursor.setinputsizes() with invalid list parameters" - with self.assertRaisesFullCode("DPY-2011"): - self.cursor.setinputsizes([int, 2, 10]) - - def test_4330(self): - "4330 - test unsupported python type on cursor" - with self.assertRaisesFullCode("DPY-3003"): - self.cursor.var(list) - - def test_4331(self): - "4331 - test binding by name with leading colon" - params = {":arg1": 5} - self.cursor.execute("select :arg1 from dual", params) - (result,) = self.cursor.fetchone() - self.assertEqual(result, params[":arg1"]) - - def test_4332(self): - "4332 - test binding mixed null and not null values in a PL/SQL block" - out_vars = [self.cursor.var(str) for i in range(4)] - self.cursor.execute( - """ - begin - :1 := null; - :2 := 'Value 1'; - :3 := null; - :4 := 'Value 2'; - end; - """, - out_vars, - ) - values = [var.getvalue() for var in out_vars] - self.assertEqual(values, [None, "Value 1", None, "Value 2"]) - - def test_4333(self): - "4333 - test excluding statement from statement cache" - num_iters = 10 - sql = "select user from dual" - self.setup_parse_count_checker() - - # with statement cache enabled, only one parse should take place - for i in range(num_iters): - with self.conn.cursor() as cursor: - cursor.execute(sql) - self.assertParseCount(1) - - # with statement cache disabled for the statement, parse count should - # be the same as the number of iterations - for i in range(num_iters): - with self.conn.cursor() as cursor: - cursor.prepare(sql, cache_statement=False) - cursor.execute(None) - self.assertParseCount(num_iters - 1) - - def test_4334(self): - "4334 - test repeated DDL" - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute("insert into TestTempTable (IntCol) values (1)") - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute("insert into TestTempTable (IntCol) values (1)") - - def test_4335(self): - "4335 - test executing SQL with non-ASCII characters" - self.cursor.execute("select 'FÖÖ' from dual") - (result,) = self.cursor.fetchone() - self.assertIn(result, ("FÖÖ", "F¿¿")) - - def test_4336(self): - "4336 - test case sensitivity of unquoted bind names" - self.cursor.execute("select :test from dual", {"TEST": "a"}) - (result,) = self.cursor.fetchone() - self.assertEqual(result, "a") - - def test_4337(self): - "4337 - test case sensitivity of quoted bind names" - with self.assertRaisesFullCode("ORA-01036", "DPY-4008"): - self.cursor.execute('select :"test" from dual', {'"TEST"': "a"}) - - def test_4338(self): - "4338 - test using a reserved keywords as a bind name" - sql = "select :ROWID from dual" - with self.assertRaisesFullCode("ORA-01745"): - self.cursor.parse(sql) - - def test_4339(self): - "4339 - test array size less than prefetch rows" - for i in range(2): - with self.conn.cursor() as cursor: - cursor.arraysize = 1 - cursor.execute("select 1 from dual union select 2 from dual") - self.assertEqual(cursor.fetchall(), [(1,), (2,)]) - - def test_4340(self): - "4340 - test re-executing a query with blob as bytes" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_BLOB: - return cursor.var(bytes, arraysize=cursor.arraysize) - - self.conn.outputtypehandler = type_handler - blob_data = b"An arbitrary set of blob data for test case 4348" - self.cursor.execute("delete from TestBLOBs") - self.cursor.execute( - "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", - [blob_data], - ) - self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - self.assertEqual(self.cursor.fetchall(), [(1, blob_data)]) +def test_4332(cursor): + "4332 - test binding mixed null and not null values in a PL/SQL block" + out_vars = [cursor.var(str) for i in range(4)] + cursor.execute( + """ + begin + :1 := null; + :2 := 'Value 1'; + :3 := null; + :4 := 'Value 2'; + end; + """, + out_vars, + ) + values = [var.getvalue() for var in out_vars] + assert values == [None, "Value 1", None, "Value 2"] - self.cursor.execute("delete from TestBLOBs") - self.cursor.execute( - "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", - [blob_data], - ) - self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - self.assertEqual(self.cursor.fetchall(), [(1, blob_data)]) - - def test_4341(self): - "4341 - test re-executing a statement after raising an error" - sql = "select * from TestFakeTable" - with self.assertRaisesFullCode("ORA-00942"): - self.cursor.execute(sql) - with self.assertRaisesFullCode("ORA-00942"): - self.cursor.execute(sql) - - sql = "insert into TestStrings (StringCol) values (NULL)" - with self.assertRaisesFullCode("ORA-01400"): - self.cursor.execute(sql) - with self.assertRaisesFullCode("ORA-01400"): - self.cursor.execute(sql) - - def test_4342(self): - "4342 - test executing a statement that raises ORA-01007" - with self.conn.cursor() as cursor: - cursor.execute( - """ - create or replace view ora_1007 as - select 1 as SampleNumber, 'String' as SampleString, - 'Another String' as AnotherString - from dual - """ - ) - with self.conn.cursor() as cursor: - cursor.execute("select * from ora_1007") - self.assertEqual( - cursor.fetchone(), (1, "String", "Another String") - ) - with self.conn.cursor() as cursor: - cursor.execute( - """ - create or replace view ora_1007 as - select 1 as SampleNumber, - 'Another String' as AnotherString - from dual - """ - ) - with self.conn.cursor() as cursor: - cursor.execute("select * from ora_1007") - self.assertEqual(cursor.fetchone(), (1, "Another String")) - - def test_4343(self): - "4343 - test updating an empty row" - int_var = self.cursor.var(int) - self.cursor.execute("truncate table TestTempTable") - self.cursor.execute( - """ - begin - update TestTempTable set IntCol = :1 - where StringCol1 = :2 - returning IntCol into :3; - end; - """, - [1, "test string 4352", int_var], - ) - self.assertEqual(int_var.values, [None]) - - def test_4344(self): - "4344 - fetch duplicate data from query in statement cache" - sql = """ - select 'A', 'B', 'C' from dual - union all - select 'A', 'B', 'C' from dual - union all - select 'A', 'B', 'C' from dual""" - expected_data = [("A", "B", "C")] * 3 - with self.conn.cursor() as cursor: - cursor.prefetchrows = 0 - cursor.execute(sql) - self.assertEqual(cursor.fetchall(), expected_data) - with self.conn.cursor() as cursor: - cursor.prefetchrows = 0 + +def test_4333(conn, parse_count_checker): + "4333 - test excluding statement from statement cache" + num_iters = 10 + sql = "select user from dual" + + # with statement cache enabled, only one parse should take place + for i in range(num_iters): + with conn.cursor() as cursor: cursor.execute(sql) - self.assertEqual(cursor.fetchall(), expected_data) + assert parse_count_checker.get_value() == 1 + + # with statement cache disabled for the statement, parse count should + # be the same as the number of iterations + for i in range(num_iters): + with conn.cursor() as cursor: + cursor.prepare(sql, cache_statement=False) + cursor.execute(None) + assert parse_count_checker.get_value() == num_iters - 1 + + +def test_4334(cursor): + "4334 - test repeated DDL" + cursor.execute("truncate table TestTempTable") + cursor.execute("insert into TestTempTable (IntCol) values (1)") + cursor.execute("truncate table TestTempTable") + cursor.execute("insert into TestTempTable (IntCol) values (1)") + + +def test_4335(cursor): + "4335 - test executing SQL with non-ASCII characters" + cursor.execute("select 'FÖÖ' from dual") + (result,) = cursor.fetchone() + assert result in ("FÖÖ", "F¿¿") + + +def test_4336(cursor): + "4336 - test case sensitivity of unquoted bind names" + cursor.execute("select :test from dual", {"TEST": "a"}) + (result,) = cursor.fetchone() + assert result == "a" + + +def test_4337(cursor, test_env): + "4337 - test case sensitivity of quoted bind names" + with test_env.assert_raises_full_code("ORA-01036", "DPY-4008"): + cursor.execute('select :"test" from dual', {'"TEST"': "a"}) - def test_4345(self): - "4345 - fetch duplicate data with outconverter" - def out_converter(value): - self.assertIs(type(value), str) - return int(value) +def test_4338(cursor, test_env): + "4338 - test using a reserved keywords as a bind name" + sql = "select :ROWID from dual" + with test_env.assert_raises_full_code("ORA-01745"): + cursor.parse(sql) - def type_handler(cursor, metadata): - if metadata.name == "COL_3": - return cursor.var( - str, arraysize=cursor.arraysize, outconverter=out_converter - ) - self.cursor.outputtypehandler = type_handler - self.cursor.execute( +def test_4339(conn): + "4339 - test array size less than prefetch rows" + for i in range(2): + with conn.cursor() as cursor: + cursor.arraysize = 1 + cursor.execute("select 1 from dual union select 2 from dual") + assert cursor.fetchall() == [(1,), (2,)] + + +def test_4340(conn, cursor): + "4340 - test re-executing a query with blob as bytes" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_BLOB: + return cursor.var(bytes, arraysize=cursor.arraysize) + + conn.outputtypehandler = type_handler + blob_data = b"An arbitrary set of blob data for test case 4348" + cursor.execute("delete from TestBLOBs") + cursor.execute( + "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", + [blob_data], + ) + cursor.execute("select IntCol, BlobCol from TestBLOBs") + assert cursor.fetchall() == [(1, blob_data)] + + cursor.execute("delete from TestBLOBs") + cursor.execute( + "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", + [blob_data], + ) + cursor.execute("select IntCol, BlobCol from TestBLOBs") + assert cursor.fetchall() == [(1, blob_data)] + + +def test_4341(cursor, test_env): + "4341 - test re-executing a statement after raising an error" + sql = "select * from TestFakeTable" + with test_env.assert_raises_full_code("ORA-00942"): + cursor.execute(sql) + with test_env.assert_raises_full_code("ORA-00942"): + cursor.execute(sql) + + sql = "insert into TestStrings (StringCol) values (NULL)" + with test_env.assert_raises_full_code("ORA-01400"): + cursor.execute(sql) + with test_env.assert_raises_full_code("ORA-01400"): + cursor.execute(sql) + + +def test_4342(conn): + "4342 - test executing a statement that raises ORA-01007" + with conn.cursor() as cursor: + cursor.execute( """ - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual - union all - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual - union all - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + create or replace view ora_1007 as + select 1 as SampleNumber, 'String' as SampleString, + 'Another String' as AnotherString + from dual """ ) - expected_data = [("A", 2, 3)] * 3 - self.assertEqual(self.cursor.fetchall(), expected_data) - - def test_4346(self): - "4346 - test setinputsizes() with defaults specified" - self.cursor.setinputsizes(None, str) - self.assertIsNone(self.cursor.bindvars[0]) - self.assertIsInstance(self.cursor.bindvars[1], oracledb.Var) - self.cursor.setinputsizes(a=None, b=str) - self.assertIsNone(self.cursor.bindvars.get("a")) - self.assertIsInstance(self.cursor.bindvars["b"], oracledb.Var) - - @test_env.skip_if_drcp() - def test_4347(self): - "4547 - kill connection with open cursor" - admin_conn = test_env.get_admin_connection() - conn = test_env.get_connection() - self.assertEqual(conn.is_healthy(), True) - sid, serial = self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - sql = f"alter system kill session '{sid},{serial}'" - admin_cursor.execute(sql) - with self.assertRaisesFullCode("DPY-4011"): - with conn.cursor() as cursor: - cursor.execute("select user from dual") - self.assertFalse(conn.is_healthy()) - - @test_env.skip_if_drcp() - def test_4348(self): - "4348 - kill connection in cursor context manager" - admin_conn = test_env.get_admin_connection() - conn = test_env.get_connection() - self.assertEqual(conn.is_healthy(), True) - sid, serial = self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - admin_cursor.execute(f"alter system kill session '{sid},{serial}'") - with self.assertRaisesFullCode("DPY-4011"): - with conn.cursor() as cursor: - cursor.execute("select user from dual") - self.assertEqual(conn.is_healthy(), False) - - def test_4349(self): - "4349 - fetchmany() with and without parameters" - sql_part = "select user from dual" - sql = " union all ".join([sql_part] * 10) - with self.conn.cursor() as cursor: - cursor.arraysize = 6 - cursor.execute(sql) - rows = cursor.fetchmany() - self.assertEqual(len(rows), cursor.arraysize) - cursor.execute(sql) - rows = cursor.fetchmany(size=2) - self.assertEqual(len(rows), 2) - cursor.execute(sql) - rows = cursor.fetchmany(numRows=4) - self.assertEqual(len(rows), 4) - cursor.execute(sql) - with self.assertRaisesFullCode("DPY-2014"): - cursor.fetchmany(size=2, numRows=4) - - def test_4350(self): - "4350 - access cursor.rowcount after closing cursor" - with self.conn.cursor() as cursor: - cursor.execute("select user from dual") - cursor.fetchall() - self.assertEqual(cursor.rowcount, 1) - self.assertEqual(cursor.rowcount, -1) - - def test_4351(self): - "4351 - changing bind type with define needed" - self.cursor.execute("delete from TestClobs") - row_for_1 = (1, "Short value 1") - row_for_56 = (56, "Short value 56") - for data in (row_for_1, row_for_56): - self.cursor.execute( - "insert into TestClobs (IntCol, ClobCol) values (:1, :2)", - data, - ) - sql = "select IntCol, ClobCol from TestClobs where IntCol = :int_col" - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute(sql, int_col="1") - self.assertEqual(self.cursor.fetchone(), row_for_1) - self.cursor.execute(sql, int_col="56") - self.assertEqual(self.cursor.fetchone(), row_for_56) - self.cursor.execute(sql, int_col=1) - self.assertEqual(self.cursor.fetchone(), row_for_1) - - def test_4352(self): - "4352 - test calling cursor.parse() twice with the same statement" - self.cursor.execute("truncate table TestTempTable") - data = (4363, "Value for test 4363") - self.cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - data, - ) - sql = "update TestTempTable set StringCol1 = :v where IntCol = :i" - for i in range(2): - self.cursor.parse(sql) - self.cursor.execute(sql, ("Updated value", data[0])) - - def test_4353(self): - "4353 - test addition of column to cached query" - table_name = "test_4365" - try: - self.cursor.execute(f"drop table {table_name}") - except oracledb.DatabaseError: - pass - data = ("val 1", "val 2") - self.cursor.execute(f"create table {table_name} (col1 varchar2(10))") - self.cursor.execute(f"insert into {table_name} values (:1)", [data[0]]) - self.conn.commit() - self.cursor.execute(f"select * from {table_name}") - self.assertEqual(self.cursor.fetchall(), [(data[0],)]) - self.cursor.execute(f"alter table {table_name} add col2 varchar2(10)") - self.cursor.execute(f"update {table_name} set col2 = :1", [data[1]]) - self.conn.commit() - self.cursor.execute(f"select * from {table_name}") - self.assertEqual(self.cursor.fetchall(), [data]) - - def test_4354(self): - "4354 - test population of array var with too many elements" - var = self.cursor.arrayvar(int, 3) - with self.assertRaisesFullCode("DPY-2016"): - var.setvalue(0, [1, 2, 3, 4]) - - def test_4355(self): - "4355 - test executemany() with PL/SQL and increasing data lengths" - sql = "begin :1 := length(:2); end;" - var = self.cursor.var(int, arraysize=3) - self.cursor.executemany( - sql, [(var, "one"), (var, "two"), (var, "end")] - ) - self.assertEqual(var.values, [3, 3, 3]) - self.cursor.executemany( - sql, [(var, "three"), (var, "four"), (var, "end")] - ) - self.assertEqual(var.values, [5, 4, 3]) - self.cursor.executemany( - sql, [(var, "five"), (var, "six"), (var, "end")] - ) - self.assertEqual(var.values, [4, 3, 3]) - - def test_4356(self): - "4356 - test cursor.rowcount values for queries" - max_rows = 93 - self.cursor.arraysize = 10 - self.cursor.execute( - "select rownum as id from dual connect by rownum <= :1", - [max_rows], + with conn.cursor() as cursor: + cursor.execute("select * from ora_1007") + assert cursor.fetchone() == (1, "String", "Another String") + with conn.cursor() as cursor: + cursor.execute( + """ + create or replace view ora_1007 as + select 1 as SampleNumber, + 'Another String' as AnotherString + from dual + """ ) - self.assertEqual(self.cursor.rowcount, 0) - batch_num = 1 - while True: - rows = self.cursor.fetchmany() - if not rows: - break - expected_value = min(max_rows, batch_num * self.cursor.arraysize) - self.assertEqual(self.cursor.rowcount, expected_value) - batch_num += 1 - self.cursor.fetchall() - self.assertEqual(self.cursor.rowcount, max_rows) - - def test_4357(self): - "4357 - test bind order for PL/SQL" - self.cursor.execute("delete from TestClobs") - sql = """ - insert into TestClobs (IntCol, CLOBCol, ExtraNumCol1) - values (:1, :2, :3)""" - data = "x" * 9000 - rows = [(1, data, 5), (2, data, 6)] - self.cursor.execute(sql, rows[0]) - plsql = f"begin {sql}; end;" - self.cursor.execute(plsql, rows[1]) - self.conn.commit() - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute( - """ - select IntCol, CLOBCol, ExtraNumCol1 - from TestCLOBs - order by IntCol - """ + with conn.cursor() as cursor: + cursor.execute("select * from ora_1007") + assert cursor.fetchone() == (1, "Another String") + + +def test_4343(cursor): + "4343 - test updating an empty row" + int_var = cursor.var(int) + cursor.execute("truncate table TestTempTable") + cursor.execute( + """ + begin + update TestTempTable set IntCol = :1 + where StringCol1 = :2 + returning IntCol into :3; + end; + """, + [1, "test string 4352", int_var], + ) + assert int_var.values == [None] + + +def test_4344(conn): + "4344 - fetch duplicate data from query in statement cache" + sql = """ + select 'A', 'B', 'C' from dual + union all + select 'A', 'B', 'C' from dual + union all + select 'A', 'B', 'C' from dual""" + expected_data = [("A", "B", "C")] * 3 + with conn.cursor() as cursor: + cursor.prefetchrows = 0 + cursor.execute(sql) + assert cursor.fetchall() == expected_data + with conn.cursor() as cursor: + cursor.prefetchrows = 0 + cursor.execute(sql) + assert cursor.fetchall() == expected_data + + +def test_4345(cursor): + "4345 - fetch duplicate data with outconverter" + + def out_converter(value): + assert isinstance(value, str) + return int(value) + + def type_handler(cursor, metadata): + if metadata.name == "COL_3": + return cursor.var( + str, arraysize=cursor.arraysize, outconverter=out_converter ) - self.assertEqual(self.cursor.fetchall(), rows) - - def test_4358(self): - "4358 - test rebuild of table with LOB in cached query (as string)" - table_name = "test_4370" - drop_sql = f"drop table {table_name} purge" - create_sql = f""" - create table {table_name} ( - Col1 number(9) not null, - Col2 clob not null - )""" - insert_sql = f"insert into {table_name} values (:1, :2)" - query_sql = f"select * from {table_name} order by Col1" - data = [(1, "CLOB value 1"), (2, "CLOB value 2")] - try: - self.cursor.execute(drop_sql) - except oracledb.DatabaseError: - pass - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute(create_sql) - self.cursor.executemany(insert_sql, data) - self.cursor.execute(query_sql) - self.assertEqual(self.cursor.fetchall(), data) - self.cursor.execute(query_sql) - self.assertEqual(self.cursor.fetchall(), data) - self.cursor.execute(drop_sql) - self.cursor.execute(create_sql) - self.cursor.executemany(insert_sql, data) - self.cursor.execute(query_sql) - self.assertEqual(self.cursor.fetchall(), data) - - def test_4359(self): - "4359 - test rebuild of table with LOB in cached query (as LOB)" - table_name = "test_4371" - drop_sql = f"drop table {table_name} purge" - create_sql = f""" - create table {table_name} ( - Col1 number(9) not null, - Col2 clob not null)""" - insert_sql = f"insert into {table_name} values (:1, :2)" - query_sql = f"select * from {table_name} order by Col1" - data = [(1, "CLOB value 1"), (2, "CLOB value 2")] - try: - self.cursor.execute(drop_sql) - except oracledb.DatabaseError: - pass - self.cursor.execute(create_sql) - self.cursor.executemany(insert_sql, data) - self.cursor.execute(query_sql) - fetched_data = [(n, c.read()) for n, c in self.cursor] - self.assertEqual(fetched_data, data) - self.cursor.execute(query_sql) - fetched_data = [(n, c.read()) for n, c in self.cursor] - self.assertEqual(fetched_data, data) - self.cursor.execute(drop_sql) - self.cursor.execute(create_sql) - self.cursor.executemany(insert_sql, data) - self.cursor.execute(query_sql) - fetched_data = [(n, c.read()) for n, c in self.cursor] - self.assertEqual(fetched_data, data) - - @test_env.skip_unless_json_supported() - def test_4360(self): - "4360 - fetch JSON columns as Python objects" - expected_data = [ - (1, [1, 2, 3], [4, 5, 6], [7, 8, 9]), - (2, None, None, None), - ] - self.cursor.execute("select * from TestJsonCols order by IntCol") - self.assertEqual(self.cursor.fetchall(), expected_data) - - @test_env.skip_unless_domains_supported() - def test_4361(self): - "4361 - fetch table with domain and annotations" - self.cursor.execute("select * from TableWithDomainAndAnnotations") - self.assertEqual(self.cursor.fetchall(), [(1, 25)]) - column_1 = self.cursor.description[0] - self.assertIsNone(column_1.domain_schema) - self.assertIsNone(column_1.domain_name) - self.assertIsNone(column_1.annotations) - column_2 = self.cursor.description[1] - self.assertEqual( - column_2.domain_schema, test_env.get_main_user().upper() - ) - self.assertEqual(column_2.domain_name, "SIMPLEDOMAIN") - expected_annotations = { - "ANNO_1": "first annotation", - "ANNO_2": "second annotation", - "ANNO_3": "", - } - self.assertEqual(column_2.annotations, expected_annotations) - - def test_4362(self): - "4362 - test getting statement after it was executed" - cursor = self.conn.cursor() - sql = "select 1 from dual" + + cursor.outputtypehandler = type_handler + cursor.execute( + """ + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + union all + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + union all + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + """ + ) + expected_data = [("A", 2, 3)] * 3 + assert cursor.fetchall() == expected_data + + +def test_4346(cursor): + "4346 - test setinputsizes() with defaults specified" + cursor.setinputsizes(None, str) + assert cursor.bindvars[0] is None + assert isinstance(cursor.bindvars[1], oracledb.Var) + cursor.setinputsizes(a=None, b=str) + assert cursor.bindvars.get("a") is None + assert isinstance(cursor.bindvars["b"], oracledb.Var) + + +def test_4347(skip_if_drcp, test_env): + "4547 - kill connection with open cursor" + admin_conn = test_env.get_admin_connection() + conn = test_env.get_connection() + assert conn.is_healthy() + sid, serial = test_env.get_sid_serial(conn) + with admin_conn.cursor() as admin_cursor: + sql = f"alter system kill session '{sid},{serial}'" + admin_cursor.execute(sql) + with test_env.assert_raises_full_code("DPY-4011"): + with conn.cursor() as cursor: + cursor.execute("select user from dual") + assert not conn.is_healthy() + + +def test_4348(skip_if_drcp, test_env): + "4348 - kill connection in cursor context manager" + admin_conn = test_env.get_admin_connection() + conn = test_env.get_connection() + assert conn.is_healthy() + sid, serial = test_env.get_sid_serial(conn) + with admin_conn.cursor() as admin_cursor: + admin_cursor.execute(f"alter system kill session '{sid},{serial}'") + with test_env.assert_raises_full_code("DPY-4011"): + with conn.cursor() as cursor: + cursor.execute("select user from dual") + assert not conn.is_healthy() + + +def test_4349(conn, test_env): + "4349 - fetchmany() with and without parameters" + sql_part = "select user from dual" + sql = " union all ".join([sql_part] * 10) + with conn.cursor() as cursor: + cursor.arraysize = 6 + cursor.execute(sql) + rows = cursor.fetchmany() + assert len(rows) == cursor.arraysize + cursor.execute(sql) + rows = cursor.fetchmany(size=2) + assert len(rows) == 2 cursor.execute(sql) - self.assertEqual(cursor.statement, sql) + rows = cursor.fetchmany(numRows=4) + assert len(rows) == 4 + cursor.execute(sql) + with test_env.assert_raises_full_code("DPY-2014"): + cursor.fetchmany(size=2, numRows=4) - def test_4363(self): - "4363 - test getting cursor fetchvars" - cursor = self.conn.cursor() - self.assertIsNone(cursor.fetchvars) - cursor.execute("truncate table TestTempTable") +def test_4350(conn): + "4350 - access cursor.rowcount after closing cursor" + with conn.cursor() as cursor: + cursor.execute("select user from dual") + cursor.fetchall() + assert cursor.rowcount == 1 + assert cursor.rowcount == -1 + + +def test_4351(disable_fetch_lobs, cursor, test_env): + "4351 - changing bind type with define needed" + cursor.execute("delete from TestClobs") + row_for_1 = (1, "Short value 1") + row_for_56 = (56, "Short value 56") + for data in (row_for_1, row_for_56): cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (1, '12')", + "insert into TestClobs (IntCol, ClobCol) values (:1, :2)", + data, ) - cursor.execute("select IntCol, StringCol1 from TestTempTable") + sql = "select IntCol, ClobCol from TestClobs where IntCol = :int_col" + cursor.execute(sql, int_col="1") + assert cursor.fetchone() == row_for_1 + cursor.execute(sql, int_col="56") + assert cursor.fetchone() == row_for_56 + cursor.execute(sql, int_col=1) + assert cursor.fetchone() == row_for_1 + + +def test_4352(cursor): + "4352 - test calling cursor.parse() twice with the same statement" + cursor.execute("truncate table TestTempTable") + data = (4363, "Value for test 4363") + cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + data, + ) + sql = "update TestTempTable set StringCol1 = :v where IntCol = :i" + for i in range(2): + cursor.parse(sql) + cursor.execute(sql, ("Updated value", data[0])) + + +def test_4353(conn, cursor): + "4353 - test addition of column to cached query" + table_name = "test_4365" + try: + cursor.execute(f"drop table {table_name}") + except oracledb.DatabaseError: + pass + data = ("val 1", "val 2") + cursor.execute(f"create table {table_name} (col1 varchar2(10))") + cursor.execute(f"insert into {table_name} values (:1)", [data[0]]) + conn.commit() + cursor.execute(f"select * from {table_name}") + assert cursor.fetchall() == [(data[0],)] + cursor.execute(f"alter table {table_name} add col2 varchar2(10)") + cursor.execute(f"update {table_name} set col2 = :1", [data[1]]) + conn.commit() + cursor.execute(f"select * from {table_name}") + assert cursor.fetchall() == [data] + + +def test_4354(cursor, test_env): + "4354 - test population of array var with too many elements" + var = cursor.arrayvar(int, 3) + with test_env.assert_raises_full_code("DPY-2016"): + var.setvalue(0, [1, 2, 3, 4]) + + +def test_4355(cursor): + "4355 - test executemany() with PL/SQL and increasing data lengths" + sql = "begin :1 := length(:2); end;" + var = cursor.var(int, arraysize=3) + cursor.executemany(sql, [(var, "one"), (var, "two"), (var, "end")]) + assert var.values == [3, 3, 3] + cursor.executemany(sql, [(var, "three"), (var, "four"), (var, "end")]) + assert var.values == [5, 4, 3] + cursor.executemany(sql, [(var, "five"), (var, "six"), (var, "end")]) + assert var.values == [4, 3, 3] + + +def test_4356(cursor): + "4356 - test cursor.rowcount values for queries" + max_rows = 93 + cursor.arraysize = 10 + cursor.execute( + "select rownum as id from dual connect by rownum <= :1", + [max_rows], + ) + assert cursor.rowcount == 0 + batch_num = 1 + while True: + rows = cursor.fetchmany() + if not rows: + break + expected_value = min(max_rows, batch_num * cursor.arraysize) + assert cursor.rowcount == expected_value + batch_num += 1 + cursor.fetchall() + assert cursor.rowcount == max_rows + + +def test_4357(disable_fetch_lobs, conn, cursor, test_env): + "4357 - test bind order for PL/SQL" + cursor.execute("delete from TestClobs") + sql = """ + insert into TestClobs (IntCol, CLOBCol, ExtraNumCol1) + values (:1, :2, :3)""" + data = "x" * 9000 + rows = [(1, data, 5), (2, data, 6)] + cursor.execute(sql, rows[0]) + plsql = f"begin {sql}; end;" + cursor.execute(plsql, rows[1]) + conn.commit() + cursor.execute( + """ + select IntCol, CLOBCol, ExtraNumCol1 + from TestCLOBs + order by IntCol + """ + ) + assert cursor.fetchall() == rows + + +def test_4358(disable_fetch_lobs, cursor, test_env): + "4358 - test rebuild of table with LOB in cached query (as string)" + table_name = "test_4370" + drop_sql = f"drop table {table_name} purge" + create_sql = f""" + create table {table_name} ( + Col1 number(9) not null, + Col2 clob not null + )""" + insert_sql = f"insert into {table_name} values (:1, :2)" + query_sql = f"select * from {table_name} order by Col1" + data = [(1, "CLOB value 1"), (2, "CLOB value 2")] + try: + cursor.execute(drop_sql) + except oracledb.DatabaseError: + pass + cursor.execute(create_sql) + cursor.executemany(insert_sql, data) + cursor.execute(query_sql) + assert cursor.fetchall() == data + cursor.execute(query_sql) + assert cursor.fetchall() == data + cursor.execute(drop_sql) + cursor.execute(create_sql) + cursor.executemany(insert_sql, data) + cursor.execute(query_sql) + assert cursor.fetchall() == data + + +def test_4359(cursor): + "4359 - test rebuild of table with LOB in cached query (as LOB)" + table_name = "test_4371" + drop_sql = f"drop table {table_name} purge" + create_sql = f""" + create table {table_name} ( + Col1 number(9) not null, + Col2 clob not null)""" + insert_sql = f"insert into {table_name} values (:1, :2)" + query_sql = f"select * from {table_name} order by Col1" + data = [(1, "CLOB value 1"), (2, "CLOB value 2")] + try: + cursor.execute(drop_sql) + except oracledb.DatabaseError: + pass + cursor.execute(create_sql) + cursor.executemany(insert_sql, data) + cursor.execute(query_sql) + fetched_data = [(n, c.read()) for n, c in cursor] + assert fetched_data == data + cursor.execute(query_sql) + fetched_data = [(n, c.read()) for n, c in cursor] + assert fetched_data == data + cursor.execute(drop_sql) + cursor.execute(create_sql) + cursor.executemany(insert_sql, data) + cursor.execute(query_sql) + fetched_data = [(n, c.read()) for n, c in cursor] + assert fetched_data == data + + +def test_4360(skip_unless_json_supported, cursor): + "4360 - fetch JSON columns as Python objects" + expected_data = [ + (1, [1, 2, 3], [4, 5, 6], [7, 8, 9]), + (2, None, None, None), + ] + cursor.execute("select * from TestJsonCols order by IntCol") + assert cursor.fetchall() == expected_data + + +def test_4361(skip_unless_domains_supported, cursor, test_env): + "4361 - fetch table with domain and annotations" + cursor.execute("select * from TableWithDomainAndAnnotations") + assert cursor.fetchall() == [(1, 25)] + column_1 = cursor.description[0] + assert column_1.domain_schema is None + assert column_1.domain_name is None + assert column_1.annotations is None + column_2 = cursor.description[1] + assert column_2.domain_schema == test_env.main_user.upper() + assert column_2.domain_name == "SIMPLEDOMAIN" + expected_annotations = { + "ANNO_1": "first annotation", + "ANNO_2": "second annotation", + "ANNO_3": "", + } + assert column_2.annotations == expected_annotations + + +def test_4362(cursor): + "4362 - test getting statement after it was executed" + sql = "select 1 from dual" + cursor.execute(sql) + assert cursor.statement == sql + + +def test_4363(cursor): + "4363 - test getting cursor fetchvars" + assert cursor.fetchvars is None + + cursor.execute("truncate table TestTempTable") + cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (1, '12')", + ) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + cursor.fetchall() + assert len(cursor.fetchvars) == 2 + assert cursor.fetchvars[0].getvalue() == 1 + assert cursor.fetchvars[1].getvalue() == "12" + + +def test_4364(cursor): + "4364 - test fetchmany() with non-default cursor.arraysize" + cursor.arraysize = 20 + values = [(i,) for i in range(30)] + cursor.execute("truncate table TestTempTable") + cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", values + ) + cursor.execute("select IntCol from TestTempTable order by IntCol") + # fetch first 20 elements + fetched_values = cursor.fetchmany() + assert fetched_values == values[: cursor.arraysize] + + # fetch missing elements + fetched_values = cursor.fetchmany() + assert fetched_values == values[cursor.arraysize :] + + +def test_4365(cursor, test_env): + "4365 - negative tests for cursor.arraysize" + with test_env.assert_raises_full_code("DPY-2045"): + cursor.arraysize = 0 + with test_env.assert_raises_full_code("DPY-2045"): + cursor.arraysize = -1 + with test_env.assert_raises_full_code("DPY-2045"): + cursor.arraysize = "not valid" + + +def test_4366(cursor, test_env): + "4366 - test fetching LOBs after an error" + sql = """ + select + to_clob(:val), + 1 / (dbms_lob.getlength(to_clob(:val)) - 1) + from dual""" + with test_env.assert_raises_full_code("ORA-01476"): + cursor.execute(sql, val="a") cursor.fetchall() - self.assertEqual(len(cursor.fetchvars), 2) - self.assertEqual(cursor.fetchvars[0].getvalue(), 1) - self.assertEqual(cursor.fetchvars[1].getvalue(), "12") - - def test_4364(self): - "4364 - test fetchmany() with non-default cursor.arraysize" - self.cursor.arraysize = 20 - values = [(i,) for i in range(30)] - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", values - ) - self.cursor.execute("select IntCol from TestTempTable order by IntCol") - # fetch first 20 elements - fetched_values = self.cursor.fetchmany() - self.assertEqual(fetched_values, values[: self.cursor.arraysize]) - - # fetch missing elements - fetched_values = self.cursor.fetchmany() - self.assertEqual(fetched_values, values[self.cursor.arraysize :]) - - def test_4365(self): - "4365 - negative tests for cursor.arraysize" - with self.assertRaisesFullCode("DPY-2045"): - self.cursor.arraysize = 0 - with self.assertRaisesFullCode("DPY-2045"): - self.cursor.arraysize = -1 - with self.assertRaisesFullCode("DPY-2045"): - self.cursor.arraysize = "not valid" - - def test_4366(self): - "4366 - test fetching LOBs after an error" - sql = """ - select - to_clob(:val), - 1 / (dbms_lob.getlength(to_clob(:val)) - 1) - from dual""" - with self.assertRaisesFullCode("ORA-01476"): - self.cursor.execute(sql, val="a") - self.cursor.fetchall() - self.cursor.execute(sql, val="bb") - lob, num_val = self.cursor.fetchone() - self.assertEqual(lob.read(), "bb") - self.assertEqual(num_val, 1) - - def test_4367(self): - "4367 - test parse() with autocommit enabled" - conn = test_env.get_connection() - conn.autocommit = True + cursor.execute(sql, val="bb") + lob, num_val = cursor.fetchone() + assert lob.read() == "bb" + assert num_val == 1 + + +def test_4367(conn): + "4367 - test parse() with autocommit enabled" + conn.autocommit = True + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + cursor.parse("insert into TestTempTable (IntCol) values (:1)") + cursor.execute(None, [1]) + + +def test_4368(cursor, test_env): + "4368 - test cursor.setinputsizes() with early failed execute" + cursor.setinputsizes(a=int, b=str) + with test_env.assert_raises_full_code("DPY-2006"): + cursor.execute("select :c from dual", [5]) + value = 4368 + cursor.execute("select :d from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert fetched_value == value + + +def test_4369(test_env): + "4369 - access cursor.rowcount after closing connection" + with test_env.get_connection() as conn: cursor = conn.cursor() - cursor.execute("truncate table TestTempTable") - cursor.parse("insert into TestTempTable (IntCol) values (:1)") - cursor.execute(None, [1]) - - def test_4368(self): - "4368 - test cursor.setinputsizes() with early failed execute" - self.cursor.setinputsizes(a=int, b=str) - with self.assertRaisesFullCode("DPY-2006"): - self.cursor.execute("select :c from dual", [5]) - value = 4368 - self.cursor.execute("select :d from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value, value) - - def test_4369(self): - "4369 - access cursor.rowcount after closing connection" - with test_env.get_connection() as conn: - cursor = conn.cursor() - self.assertEqual(cursor.rowcount, -1) - - def test_4370(self): - "4370 - execute PL/SQL with out vars after query with duplicate data" - self.cursor.execute("truncate table TestTempTable") - self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - [(i + 1, "test_4370") for i in range(20)], - ) - self.conn.commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - var = self.cursor.var(int) - self.cursor.execute("begin :1 := 4370; end;", [var]) - self.assertEqual(var.getvalue(), 4370) - - def test_4371(self): - "4371 - test cursor with fetch_decimals=True specified" - value = 4371 - self.cursor.execute( - "select :1 from dual", [value], fetch_decimals=True - ) - rows = self.cursor.fetchall() - self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) - - -if __name__ == "__main__": - test_env.run_test_cases() + assert cursor.rowcount == -1 + + +def test_4370(conn, cursor): + "4370 - execute PL/SQL with out vars after query with duplicate data" + cursor.execute("truncate table TestTempTable") + cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + [(i + 1, "test_4370") for i in range(20)], + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + var = cursor.var(int) + cursor.execute("begin :1 := 4370; end;", [var]) + assert var.getvalue() == 4370 + + +def test_4371(cursor): + "4371 - test cursor with fetch_decimals=True specified" + value = 4371 + cursor.execute("select :1 from dual", [value], fetch_decimals=True) + rows = cursor.fetchall() + assert isinstance(rows[0][0], decimal.Decimal) diff --git a/tests/test_4400_tpc.py b/tests/test_4400_tpc.py index f096e489..a08b4963 100644 --- a/tests/test_4400_tpc.py +++ b/tests/test_4400_tpc.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2021, 2024, Oracle and/or its affiliates. +# Copyright (c) 2021, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,325 +27,336 @@ """ import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_4400(self): - "4400 - test begin, prepare, roll back global transaction" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3900, b"txn3900", b"branchId") - self.conn.tpc_begin(xid) - self.assertEqual(self.conn.tpc_prepare(), False) - self.conn.tpc_begin(xid) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.assertEqual(self.conn.tpc_prepare(), True) - self.conn.tpc_rollback() - self.cursor.execute("select count(*) from TestTempTable") - (count,) = self.cursor.fetchone() - self.assertEqual(count, 0) - - def test_4401(self): - "4401 - test begin, prepare, commit global transaction" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3901, "txn3901", "branchId") - self.conn.tpc_begin(xid) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.assertEqual(self.conn.tpc_prepare(), True) - self.conn.tpc_commit() - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(self.cursor.fetchall(), [(1, "tesName")]) - - def test_4402(self): - "4402 - test multiple global transactions on the same connection" - self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3902, "txn3902", "branch1") - xid2 = self.conn.xid(3902, b"txn3902", b"branch2") - self.conn.tpc_begin(xid1) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.conn.tpc_end() - self.conn.tpc_begin(xid2) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (2, 'tesName') - """ - ) - self.conn.tpc_end() - needs_commit1 = self.conn.tpc_prepare(xid1) - needs_commit2 = self.conn.tpc_prepare(xid2) - if needs_commit1: - self.conn.tpc_commit(xid1) - if needs_commit2: - self.conn.tpc_commit(xid2) - self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - expected_rows = [(1, "tesName"), (2, "tesName")] - self.assertEqual(self.cursor.fetchall(), expected_rows) - - def test_4403(self): - "4403 - test rollback with parameter xid" - self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3901, b"txn3901", b"branch1") - xid2 = self.conn.xid(3902, "txn3902", "branch2") - for count, xid in enumerate([xid1, xid2]): - self.conn.tpc_begin(xid) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:id, 'tesName') - """, - id=count, - ) - self.conn.tpc_end() - self.conn.tpc_rollback(xid1) - - with self.assertRaisesFullCode("ORA-24756"): - self.conn.tpc_prepare(xid1) - needs_commit = self.conn.tpc_prepare(xid2) - if needs_commit: - self.conn.tpc_commit(xid2) - self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(self.cursor.fetchall(), [(1, "tesName")]) - - def test_4404(self): - "4404 - test resuming a transaction" - self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3939, "txn3939", "branch39") - xid2 = self.conn.xid(3940, "txn3940", "branch40") - values = [[xid1, (1, "User Info")], [xid2, (2, "Other User Info")]] - for xid, data in values: - self.conn.tpc_begin(xid) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - self.conn.tpc_end() - for xid, data in values: - self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - (res,) = self.cursor.fetchall() - self.assertEqual(res, data) - self.conn.tpc_rollback(xid) - - def test_4405(self): - "4405 - test promoting a local transaction to a tpc transaction" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3941, "txn3941", "branch41") - values = (1, "String 1") - self.cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - values, - ) - with self.assertRaisesFullCode("ORA-24776"): - self.conn.tpc_begin(xid) - self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - (res,) = self.cursor.fetchall() - self.assertEqual(res, values) - self.conn.tpc_rollback(xid) - - def test_4406(self): - "4406 - test ending a transaction with parameter xid" - self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(4406, "txn4406a", "branch3") - xid2 = self.conn.xid(4406, b"txn4406b", b"branch4") - self.conn.tpc_begin(xid1) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test4406a') - """ - ) - self.conn.tpc_begin(xid2) - with self.assertRaisesFullCode("ORA-24758"): - self.conn.tpc_end(xid1) - self.cursor.execute( +import pytest + + +def test_4400(conn, cursor): + "4400 - test begin, prepare, roll back global transaction" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(3900, b"txn3900", b"branchId") + conn.tpc_begin(xid) + assert not conn.tpc_prepare() + conn.tpc_begin(xid) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert conn.tpc_prepare() + conn.tpc_rollback() + cursor.execute("select count(*) from TestTempTable") + (count,) = cursor.fetchone() + assert count == 0 + + +def test_4401(conn, cursor): + "4401 - test begin, prepare, commit global transaction" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(3901, "txn3901", "branchId") + conn.tpc_begin(xid) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert conn.tpc_prepare() + conn.tpc_commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [(1, "tesName")] + + +def test_4402(conn, cursor): + "4402 - test multiple global transactions on the same connection" + cursor.execute("truncate table TestTempTable") + xid1 = conn.xid(3902, "txn3902", "branch1") + xid2 = conn.xid(3902, b"txn3902", b"branch2") + conn.tpc_begin(xid1) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + conn.tpc_end() + conn.tpc_begin(xid2) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'tesName') + """ + ) + conn.tpc_end() + needs_commit1 = conn.tpc_prepare(xid1) + needs_commit2 = conn.tpc_prepare(xid2) + if needs_commit1: + conn.tpc_commit(xid1) + if needs_commit2: + conn.tpc_commit(xid2) + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + expected_rows = [(1, "tesName"), (2, "tesName")] + assert cursor.fetchall() == expected_rows + + +def test_4403(conn, cursor, test_env): + "4403 - test rollback with parameter xid" + cursor.execute("truncate table TestTempTable") + xid1 = conn.xid(3901, b"txn3901", b"branch1") + xid2 = conn.xid(3902, "txn3902", "branch2") + for count, xid in enumerate([xid1, xid2]): + conn.tpc_begin(xid) + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (2, 'test4406b') - """ + values (:id, 'tesName') + """, + id=count, ) - self.conn.tpc_end(xid2) - with self.assertRaisesFullCode("ORA-25352"): - self.conn.tpc_end(xid1) - self.conn.tpc_rollback(xid1) - self.conn.tpc_rollback(xid2) - - def test_4407(self): - "4407 - test tpc_recover()" - self.cursor.execute("truncate table TestTempTable") - n_xids = 10 - for i in range(n_xids): - xid = self.conn.xid(4407 + i, f"txn4407{i}", f"branch{i}") - self.conn.tpc_begin(xid) - self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, 'test4407') - """, - [i + 1], - ) - self.conn.tpc_prepare(xid) - recovers = self.conn.tpc_recover() - self.assertEqual(len(recovers), n_xids) - - self.cursor.execute("select * from DBA_PENDING_TRANSACTIONS") - self.assertEqual(self.cursor.fetchall(), recovers) - - for xid in recovers: - if xid.format_id % 2 == 0: - self.conn.tpc_commit(xid) - recovers = self.conn.tpc_recover() - self.assertEqual(len(recovers), n_xids // 2) - - for xid in recovers: - self.conn.tpc_rollback(xid) - recovers = self.conn.tpc_recover() - self.assertEqual(len(recovers), 0) - - def test_4408(self): - "4408 - test tpc_recover() with read-only transaction" - self.cursor.execute("truncate table TestTempTable") - for i in range(4): - xid = self.conn.xid(4408 + i, f"txn4408{i}", f"branch{i}") - self.conn.tpc_begin(xid) - self.cursor.execute("select * from TestTempTable") - self.conn.tpc_prepare(xid) - recovers = self.conn.tpc_recover() - self.assertEqual(len(recovers), 0) - - def test_4409(self): - "4409 - test tpc_commit() with one_phase parameter" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4409, "txn4409", "branch1") - self.conn.tpc_begin(xid) - values = (1, "test4409") - self.cursor.execute( + conn.tpc_end() + conn.tpc_rollback(xid1) + + with test_env.assert_raises_full_code("ORA-24756"): + conn.tpc_prepare(xid1) + needs_commit = conn.tpc_prepare(xid2) + if needs_commit: + conn.tpc_commit(xid2) + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert cursor.fetchall() == [(1, "tesName")] + + +def test_4404(conn, cursor): + "4404 - test resuming a transaction" + cursor.execute("truncate table TestTempTable") + xid1 = conn.xid(3939, "txn3939", "branch39") + xid2 = conn.xid(3940, "txn3940", "branch40") + values = [[xid1, (1, "User Info")], [xid2, (2, "Other User Info")]] + for xid, data in values: + conn.tpc_begin(xid) + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - values, + data, ) - self.cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.conn.tpc_commit(xid, one_phase=True) - self.assertEqual(self.cursor.fetchall(), [values]) - - def test_4410(self): - "4410 - test negative cases for tpc_commit()" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4410, "txn4410", "branch1") - self.conn.tpc_begin(xid) - self.cursor.execute( + conn.tpc_end() + for xid, data in values: + conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + (res,) = cursor.fetchall() + assert res == data + conn.tpc_rollback(xid) + + +def test_4405(conn, cursor, test_env): + "4405 - test promoting a local transaction to a tpc transaction" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(3941, "txn3941", "branch41") + values = (1, "String 1") + cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + values, + ) + with test_env.assert_raises_full_code("ORA-24776"): + conn.tpc_begin(xid) + conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + (res,) = cursor.fetchall() + assert res == values + conn.tpc_rollback(xid) + + +def test_4406(conn, cursor, test_env): + "4406 - test ending a transaction with parameter xid" + cursor.execute("truncate table TestTempTable") + xid1 = conn.xid(4406, "txn4406a", "branch3") + xid2 = conn.xid(4406, b"txn4406b", b"branch4") + conn.tpc_begin(xid1) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test4406a') + """ + ) + conn.tpc_begin(xid2) + with test_env.assert_raises_full_code("ORA-24758"): + conn.tpc_end(xid1) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'test4406b') + """ + ) + conn.tpc_end(xid2) + with test_env.assert_raises_full_code("ORA-25352"): + conn.tpc_end(xid1) + conn.tpc_rollback(xid1) + conn.tpc_rollback(xid2) + + +def test_4407(conn, cursor): + "4407 - test tpc_recover()" + cursor.execute("truncate table TestTempTable") + n_xids = 10 + for i in range(n_xids): + xid = conn.xid(4407 + i, f"txn4407{i}", f"branch{i}") + conn.tpc_begin(xid) + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (1, 'test4410') - """ + values (:1, 'test4407') + """, + [i + 1], ) - self.assertRaises(TypeError, self.conn.tpc_commit, "invalid xid") - self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-02053"): - self.conn.tpc_commit(xid, one_phase=True) - with self.assertRaisesFullCode("ORA-24756"): - self.conn.tpc_commit(xid) - - def test_4411(self): - "4411 - test starting an already created transaction" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4411, "txn4411", "branch1") - self.conn.tpc_begin(xid) - with self.assertRaisesFullCode("ORA-24757"): - self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_NEW) - with self.assertRaisesFullCode("ORA-24797"): - self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) - self.conn.tpc_end() - for flag in [oracledb.TPC_BEGIN_NEW, oracledb.TPC_BEGIN_PROMOTE]: - with self.assertRaisesFullCode("ORA-24757"): - self.conn.tpc_begin(xid, flag) - self.conn.tpc_rollback(xid) - - def test_4412(self): - "4412 - test resuming a prepared transaction" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4412, "txn4412", "branch1") - self.conn.tpc_begin(xid) - self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-24756"): - self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) - - def test_4413(self): - "4413 - test tpc_begin and tpc_end with invalid parameters" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4413, "txn4413", "branch1") - test_values = [ - (self.conn.tpc_begin, "DPY-2050"), - (self.conn.tpc_end, "DPY-2051"), - ] - for tpc_function, error_code in test_values: - self.assertRaises(TypeError, tpc_function, "invalid xid") - with self.assertRaisesFullCode(error_code): - tpc_function(xid, "invalid flag") - with self.assertRaisesFullCode(error_code): - tpc_function(xid, 70) - - def test_4414(self): - "4414 - test commiting transaction without tpc_commit" - xid = self.conn.xid(4414, "txn4409", "branch1") - self.conn.tpc_begin(xid) - with self.assertRaisesFullCode("ORA-02089"): - self.cursor.execute("truncate table TestTempTable") - - def test_4415(self): - "4415 - test tpc_commit when a commit is not needed" - xid = self.conn.xid(4416, "txn4416", "branch1") - self.conn.tpc_begin(xid) - self.cursor.execute("select * from TestTempTable") - self.conn.tpc_end(xid) - self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-24756"): - self.conn.tpc_commit(xid) - - def test_4416(self): - "4416 - test transaction_in_progress" - self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(4415, "txn4415", "branch1") - self.assertFalse(self.conn.transaction_in_progress) - - self.conn.tpc_begin(xid) - self.assertTrue(self.conn.transaction_in_progress) - self.cursor.execute("insert into TestTempTable (IntCol) values (2)") - - self.conn.tpc_end(xid) - self.assertFalse(self.conn.transaction_in_progress) - - self.conn.tpc_prepare(xid) - self.assertFalse(self.conn.transaction_in_progress) - - self.conn.tpc_commit(xid) - self.assertFalse(self.conn.transaction_in_progress) - - -if __name__ == "__main__": - test_env.run_test_cases() + conn.tpc_prepare(xid) + recovers = conn.tpc_recover() + assert len(recovers) == n_xids + + cursor.execute("select * from DBA_PENDING_TRANSACTIONS") + assert cursor.fetchall() == recovers + + for xid in recovers: + if xid.format_id % 2 == 0: + conn.tpc_commit(xid) + recovers = conn.tpc_recover() + assert len(recovers) == n_xids // 2 + + for xid in recovers: + conn.tpc_rollback(xid) + recovers = conn.tpc_recover() + assert len(recovers) == 0 + + +def test_4408(conn, cursor): + "4408 - test tpc_recover() with read-only transaction" + cursor.execute("truncate table TestTempTable") + for i in range(4): + xid = conn.xid(4408 + i, f"txn4408{i}", f"branch{i}") + conn.tpc_begin(xid) + cursor.execute("select * from TestTempTable") + conn.tpc_prepare(xid) + recovers = conn.tpc_recover() + assert len(recovers) == 0 + + +def test_4409(conn, cursor): + "4409 - test tpc_commit() with one_phase parameter" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4409, "txn4409", "branch1") + conn.tpc_begin(xid) + values = (1, "test4409") + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + values, + ) + cursor.execute("select IntCol, StringCol1 from TestTempTable") + conn.tpc_commit(xid, one_phase=True) + assert cursor.fetchall() == [values] + + +def test_4410(conn, cursor, test_env): + "4410 - test negative cases for tpc_commit()" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4410, "txn4410", "branch1") + conn.tpc_begin(xid) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test4410') + """ + ) + pytest.raises(TypeError, conn.tpc_commit, "invalid xid") + conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-02053"): + conn.tpc_commit(xid, one_phase=True) + with test_env.assert_raises_full_code("ORA-24756"): + conn.tpc_commit(xid) + + +def test_4411(conn, cursor, test_env): + "4411 - test starting an already created transaction" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4411, "txn4411", "branch1") + conn.tpc_begin(xid) + with test_env.assert_raises_full_code("ORA-24757"): + conn.tpc_begin(xid, oracledb.TPC_BEGIN_NEW) + with test_env.assert_raises_full_code("ORA-24797"): + conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) + conn.tpc_end() + for flag in [oracledb.TPC_BEGIN_NEW, oracledb.TPC_BEGIN_PROMOTE]: + with test_env.assert_raises_full_code("ORA-24757"): + conn.tpc_begin(xid, flag) + conn.tpc_rollback(xid) + + +def test_4412(conn, cursor, test_env): + "4412 - test resuming a prepared transaction" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4412, "txn4412", "branch1") + conn.tpc_begin(xid) + conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-24756"): + conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) + + +def test_4413(conn, cursor, test_env): + "4413 - test tpc_begin and tpc_end with invalid parameters" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4413, "txn4413", "branch1") + test_values = [ + (conn.tpc_begin, "DPY-2050"), + (conn.tpc_end, "DPY-2051"), + ] + for tpc_function, error_code in test_values: + pytest.raises(TypeError, tpc_function, "invalid xid") + with test_env.assert_raises_full_code(error_code): + tpc_function(xid, "invalid flag") + with test_env.assert_raises_full_code(error_code): + tpc_function(xid, 70) + + +def test_4414(conn, cursor, test_env): + "4414 - test commiting transaction without tpc_commit" + xid = conn.xid(4414, "txn4409", "branch1") + conn.tpc_begin(xid) + with test_env.assert_raises_full_code("ORA-02089"): + cursor.execute("truncate table TestTempTable") + + +def test_4415(conn, cursor, test_env): + "4415 - test tpc_commit when a commit is not needed" + xid = conn.xid(4416, "txn4416", "branch1") + conn.tpc_begin(xid) + cursor.execute("select * from TestTempTable") + conn.tpc_end(xid) + conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-24756"): + conn.tpc_commit(xid) + + +def test_4416(conn, cursor): + "4416 - test transaction_in_progress" + cursor.execute("truncate table TestTempTable") + xid = conn.xid(4415, "txn4415", "branch1") + assert not conn.transaction_in_progress + + conn.tpc_begin(xid) + assert conn.transaction_in_progress + cursor.execute("insert into TestTempTable (IntCol) values (2)") + + conn.tpc_end(xid) + assert not conn.transaction_in_progress + + conn.tpc_prepare(xid) + assert not conn.transaction_in_progress + + conn.tpc_commit(xid) + assert not conn.transaction_in_progress diff --git a/tests/test_4500_connect_params.py b/tests/test_4500_connect_params.py index b0e798dd..bc745135 100644 --- a/tests/test_4500_connect_params.py +++ b/tests/test_4500_connect_params.py @@ -31,1675 +31,1724 @@ import ssl import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - requires_connection = False - - def __test_writable_parameter(self, name, value): - """ - Tests that a writable parameter can be written to and the modified - value read back successfully. - """ - params = oracledb.ConnectParams() - orig_value = getattr(params, name) - copied_params = params.copy() +import pytest + + +def _test_writable_parameter(name, value): + """ + Tests that a writable parameter can be written to and the modified + value read back successfully. + """ + params = oracledb.ConnectParams() + orig_value = getattr(params, name) + copied_params = params.copy() + args = {} + args[name] = value + params.set(**args) + assert getattr(params, name) == value + assert getattr(copied_params, name) == orig_value + args[name] = None + params.set(**args) + assert getattr(params, name) == value + + +def _verify_network_name_attr(test_env, name): + """ + Verify that a network name attribute is handled properly in both valid + and invalid cases. + """ + cp = oracledb.ConnectParams() + assert getattr(cp, name) == getattr(oracledb.defaults, name) + for value, ok in [ + ("valid_value", True), + ("'contains_quotes'", False), + ('"contains_double_quotes"', False), + ("contains_opening_paren (", False), + ("contains_closing_paren )", False), + ("contains_equals =", False), + ("contains_trailing_slash\\", False), + ]: args = {} args[name] = value - params.set(**args) - self.assertEqual(getattr(params, name), value) - self.assertEqual(getattr(copied_params, name), orig_value) - args[name] = None - params.set(**args) - self.assertEqual(getattr(params, name), value) - - def __verify_network_name_attr(self, name): - """ - Verify that a network name attribute is handled properly in both valid - and invalid cases. - """ - cp = oracledb.ConnectParams() - self.assertEqual(getattr(cp, name), getattr(oracledb.defaults, name)) - for value, ok in [ - ("valid_value", True), - ("'contains_quotes'", False), - ('"contains_double_quotes"', False), - ("contains_opening_paren (", False), - ("contains_closing_paren )", False), - ("contains_equals =", False), - ("contains_trailing_slash\\", False), - ]: - args = {} - args[name] = value - if ok: - cp = oracledb.ConnectParams(**args) - self.assertEqual(getattr(cp, name), value) - else: - with self.assertRaisesFullCode("DPY-3029"): - oracledb.ConnectParams(**args) - - def test_4500(self): - "4500 - test simple EasyConnect string parsing with port specified" - params = oracledb.ConnectParams() - params.parse_connect_string("my_host:1578/my_service_name") - self.assertEqual(params.host, "my_host") - self.assertEqual(params.port, 1578) - self.assertEqual(params.service_name, "my_service_name") + if ok: + cp = oracledb.ConnectParams(**args) + assert getattr(cp, name) == value + else: + with test_env.assert_raises_full_code("DPY-3029"): + oracledb.ConnectParams(**args) + + +def test_4500(): + "4500 - test simple EasyConnect string parsing with port specified" + params = oracledb.ConnectParams() + params.parse_connect_string("my_host:1578/my_service_name") + assert params.host == "my_host" + assert params.port == 1578 + assert params.service_name == "my_service_name" + + +def test_4501(): + "4501 - test simple Easy Connect string parsing with no port specified" + params = oracledb.ConnectParams() + params.parse_connect_string("my_host2/my_service_name2") + assert params.host == "my_host2" + assert params.port == 1521 + assert params.service_name == "my_service_name2" + + +def test_4502(): + "4502 - test simple EasyConnect string parsing with DRCP enabled" + params = oracledb.ConnectParams() + params.parse_connect_string("my_host3.org/my_service_name3:pooled") + assert params.host == "my_host3.org" + assert params.service_name == "my_service_name3" + assert params.server_type == "pooled" + params.parse_connect_string("my_host3/my_service_name3:ShArEd") + assert params.server_type == "shared" + + +def test_4503(): + "4503 - test simple name-value pair format connect string" + connect_string = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host4)(PORT=1589)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name4)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == "my_host4" + assert params.port == 1589 + assert params.service_name == "my_service_name4" + + +def test_4504(): + "4504 - test EasyConnect with protocol" + params = oracledb.ConnectParams() + params.parse_connect_string("tcps://my_host6/my_service_name6") + assert params.host == "my_host6" + assert params.service_name == "my_service_name6" + assert params.protocol == "tcps" + + +def test_4505(test_env): + "4505 - test EasyConnect with invalid protocol" + params = oracledb.ConnectParams() + with test_env.assert_raises_full_code("DPY-4021"): + params.parse_connect_string( + "invalid_proto://my_host7/my_service_name7" + ) - def test_4501(self): - "4501 - test simple Easy Connect string parsing with no port specified" - params = oracledb.ConnectParams() - params.parse_connect_string("my_host2/my_service_name2") - self.assertEqual(params.host, "my_host2") - self.assertEqual(params.port, 1521) - self.assertEqual(params.service_name, "my_service_name2") - def test_4502(self): - "4502 - test simple EasyConnect string parsing with DRCP enabled" - params = oracledb.ConnectParams() - params.parse_connect_string("my_host3.org/my_service_name3:pooled") - self.assertEqual(params.host, "my_host3.org") - self.assertEqual(params.service_name, "my_service_name3") - self.assertEqual(params.server_type, "pooled") - params.parse_connect_string("my_host3/my_service_name3:ShArEd") - self.assertEqual(params.server_type, "shared") - - def test_4503(self): - "4503 - test simple name-value pair format connect string" - connect_string = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host4)(PORT=1589)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name4)))""" - params = oracledb.ConnectParams() +def test_4506(test_env): + "4506 - confirm an exception is raised if using ipc protocol" + connect_string = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=my_view8)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name8)))""" + params = oracledb.ConnectParams() + with test_env.assert_raises_full_code("DPY-4021"): params.parse_connect_string(connect_string) - self.assertEqual(params.host, "my_host4") - self.assertEqual(params.port, 1589) - self.assertEqual(params.service_name, "my_service_name4") - def test_4504(self): - "4504 - test EasyConnect with protocol" - params = oracledb.ConnectParams() - params.parse_connect_string("tcps://my_host6/my_service_name6") - self.assertEqual(params.host, "my_host6") - self.assertEqual(params.service_name, "my_service_name6") - self.assertEqual(params.protocol, "tcps") - def test_4505(self): - "4505 - test EasyConnect with invalid protocol" +def test_4507(): + "4507 - connect descriptor with retry count and retry delay" + connect_string = """ + (DESCRIPTION=(RETRY_COUNT=6)(RETRY_DELAY=5) + (ADDRESS=(PROTOCOL=TCP)(HOST=my_host9)(PORT=1593)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name9)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.retry_count == 6 + assert params.retry_delay == 5 + + +def test_4508(): + "4508 - connect descriptor with expire_time setting" + connect_string = """ + (DESCRIPTION=(EXPIRE_TIME=12) + (ADDRESS=(PROTOCOL=TCP)(HOST=my_host11)(PORT=1594)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name11))) + """ + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.expire_time == 12 + + +def test_4509(): + "4509 - connect descriptor with purity parameters" + for purity in oracledb.Purity: + if purity is oracledb.Purity.DEFAULT: + continue + cclass = f"cclass_4510_{purity.name}" + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host12)(PORT=694)) + (CONNECT_DATA=(SERVICE_NAME=service_4510) + (POOL_CONNECTION_CLASS={cclass}) + (POOL_PURITY={purity.name})))""" params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-4021"): - params.parse_connect_string( - "invalid_proto://my_host7/my_service_name7" - ) + params.parse_connect_string(connect_string) + assert params.cclass == cclass + assert params.purity is purity + gen_connect_string = params.get_connect_string() + gen_params = oracledb.ConnectParams() + gen_params.parse_connect_string(gen_connect_string) + assert gen_params.cclass == cclass + assert gen_params.purity is purity + + +def test_4510(test_env): + "4510 - connect descriptor with invalid pool purity" + connect_string = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host13)(PORT=695)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name13) + (POOL_CONNECTION_CLASS=cclass_13)(POOL_PURITY=INVALID)))""" + params = oracledb.ConnectParams() + with test_env.assert_raises_full_code("DPY-4022"): + params.parse_connect_string(connect_string) - def test_4506(self): - "4506 - confirm an exception is raised if using ipc protocol" - connect_string = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=ipc)(KEY=my_view8)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name8)))""" - params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-4021"): - params.parse_connect_string(connect_string) - def test_4507(self): - "4507 - connect descriptor with retry count and retry delay" - connect_string = """ - (DESCRIPTION=(RETRY_COUNT=6)(RETRY_DELAY=5) - (ADDRESS=(PROTOCOL=TCP)(HOST=my_host9)(PORT=1593)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name9)))""" - params = oracledb.ConnectParams() +def test_4511(): + "4511 - connect descriptor with transport connect timeout values" + connect_string = """ + (DESCRIPTION=(TRANSPORT_CONNECT_TIMEOUT=500 ms) + (ADDRESS=(PROTOCOL=TCP)(HOST=my_host14)(PORT=695)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name14)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.tcp_connect_timeout == 0.5 + connect_string = connect_string.replace("500 ms", "15 SEC") + params.parse_connect_string(connect_string) + assert params.tcp_connect_timeout == 15 + connect_string = connect_string.replace("15 SEC", "5 min") + params.parse_connect_string(connect_string) + assert params.tcp_connect_timeout == 300 + connect_string = connect_string.replace("5 min", "34") + params.parse_connect_string(connect_string) + assert params.tcp_connect_timeout == 34 + + +def test_4512(): + "4512 - test EasyConnect string parsing with no service name specified" + params = oracledb.ConnectParams() + params.parse_connect_string("my_host15:1578/") + assert params.host == "my_host15" + assert params.port == 1578 + assert params.service_name is None + + +def test_4513(): + "4513 - test EasyConnect string parsing with port value missing" + params = oracledb.ConnectParams() + params.parse_connect_string("my_host17:/my_service_name17") + assert params.host == "my_host17" + assert params.port == 1521 + assert params.service_name == "my_service_name17" + + +def test_4514(test_env): + "4514 - test connect descriptor with invalid number" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION=(RETRY_COUNT=wrong)(RETRY_DELAY=5) + (ADDRESS=(PROTOCOL=TCP)(HOST=my_host18)(PORT=1598)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name18)))""" + with test_env.assert_raises_full_code("DPY-4018"): params.parse_connect_string(connect_string) - self.assertEqual(params.retry_count, 6) - self.assertEqual(params.retry_delay, 5) - - def test_4508(self): - "4508 - connect descriptor with expire_time setting" - connect_string = """ - (DESCRIPTION=(EXPIRE_TIME=12) - (ADDRESS=(PROTOCOL=TCP)(HOST=my_host11)(PORT=1594)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name11))) - """ + + +def test_4515(): + "4515 - test connect descriptor with security options" + options = [ + ("CN=unknown19a", "/tmp/wallet_loc19a", "On", True), + ("CN=unknown19b", "/tmp/wallet_loc19b", "False", False), + ("CN=unknown19c", "/tmp/wallet_loc19c", "Off", False), + ("CN=unknown19d", "/tmp/wallet_loc19d", "True", True), + ("CN=unknown19e", "/tmp/wallet_loc19e", "yes", True), + ("CN=unknown19f", "/tmp/wallet_loc19f", "no", False), + ] + for dn, wallet_loc, match_option, match_value in options: params = oracledb.ConnectParams() + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host19)(PORT=872)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name19)) + (SECURITY=(SSL_SERVER_CERT_DN="{dn}") + (SSL_SERVER_DN_MATCH={match_option}) + (MY_WALLET_DIRECTORY="{wallet_loc}")))""" params.parse_connect_string(connect_string) - self.assertEqual(params.expire_time, 12) - - def test_4509(self): - "4509 - connect descriptor with purity parameters" - for purity in oracledb.Purity: - if purity is oracledb.Purity.DEFAULT: - continue - cclass = f"cclass_4510_{purity.name}" - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host12)(PORT=694)) - (CONNECT_DATA=(SERVICE_NAME=service_4510) - (POOL_CONNECTION_CLASS={cclass}) - (POOL_PURITY={purity.name})))""" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.cclass, cclass) - self.assertIs(params.purity, purity) - gen_connect_string = params.get_connect_string() - gen_params = oracledb.ConnectParams() - gen_params.parse_connect_string(gen_connect_string) - self.assertEqual(gen_params.cclass, cclass) - self.assertIs(gen_params.purity, purity) - - def test_4510(self): - "4510 - connect descriptor with invalid pool purity" - connect_string = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host13)(PORT=695)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name13) - (POOL_CONNECTION_CLASS=cclass_13)(POOL_PURITY=INVALID)))""" + assert params.ssl_server_cert_dn == dn + assert params.wallet_location == wallet_loc + assert params.ssl_server_dn_match == match_value + + +def test_4516(): + "4516 - test easy connect string with security options" + options = [ + ("CN=unknown20a", "/tmp/wallet_loc20a", "On", True), + ("CN=unknown20b", "/tmp/wallet_loc20b", "False", False), + ("CN=unknown20c", "/tmp/wallet_loc20c", "Off", False), + ("CN=unknown20d", "/tmp/wallet_loc20d", "True", True), + ("CN=unknown20e", "/tmp/wallet_loc20e", "yes", True), + ("CN=unknown20f", "/tmp/wallet_loc20f", "no", False), + ] + for dn, wallet_loc, match_option, match_value in options: params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-4022"): - params.parse_connect_string(connect_string) - - def test_4511(self): - "4511 - connect descriptor with transport connect timeout values" - connect_string = """ - (DESCRIPTION=(TRANSPORT_CONNECT_TIMEOUT=500 ms) - (ADDRESS=(PROTOCOL=TCP)(HOST=my_host14)(PORT=695)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name14)))""" + connect_string = f""" + my_host20/my_server_name20? + ssl_server_cert_dn="{dn}"& + ssl_server_dn_match= {match_option} & + wallet_location = "{wallet_loc}" """ params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.tcp_connect_timeout, 0.5) - connect_string = connect_string.replace("500 ms", "15 SEC") + assert params.ssl_server_cert_dn == dn + assert params.ssl_server_dn_match == match_value + assert params.wallet_location == wallet_loc + + +def test_4517(): + "4517 - test easy connect string with description options" + params = oracledb.ConnectParams() + connect_string = """ + my_host21/my_server_name21? + expire_time=5& + retry_delay=10& + retry_count=12& + transport_connect_timeout=2.5""" + params.parse_connect_string(connect_string) + assert params.expire_time == 5 + assert params.retry_delay == 10 + assert params.retry_count == 12 + assert params.tcp_connect_timeout == 2.5 + + +def test_4518(test_env): + "4518 - test easy connect string with invalid parameters" + params = oracledb.ConnectParams() + connect_string_prefix = "my_host22/my_server_name22?" + suffixes = ["expire_time=invalid", "expire_time"] + for suffix in suffixes: + with test_env.assert_raises_full_code("DPY-4018"): + params.parse_connect_string(connect_string_prefix + suffix) + + +def test_4519(): + "4519 - test connect string containing spaces and newlines" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP) \n(HOST=my_host23)\n + (PORT=1560))(CONNECT_DATA= (SERVICE_NAME=my_service_name23)) + (SECURITY=(MY_WALLET_DIRECTORY="my wallet dir 23")))""" + params.parse_connect_string(connect_string) + assert params.host == "my_host23" + assert params.port == 1560 + assert params.service_name == "my_service_name23" + assert params.wallet_location == "my wallet dir 23" + + +def test_4520(test_env): + "4520 - test missing configuration directory" + params = oracledb.ConnectParams(config_dir="/missing") + with test_env.assert_raises_full_code("DPY-4026"): + params.parse_connect_string("tns_alias") + + +def test_4521(): + "4521 - test connect string with an address list" + params = oracledb.ConnectParams() + connect_string = ( + "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=5)(RETRY_DELAY=2)" + "(ADDRESS_LIST=(LOAD_BALANCE=ON)" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host25)(PORT=4521))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host26)(PORT=4522)))" + "(CONNECT_DATA=(SERVICE_NAME=my_service_name25)))" + ) + params.parse_connect_string(connect_string) + assert params.host == ["my_host25", "my_host26"] + assert params.port == [4521, 4522] + assert params.protocol == ["tcp", "tcp"] + assert params.service_name == "my_service_name25" + assert params.retry_count == 5 + assert params.retry_delay == 2 + assert params.get_connect_string() == connect_string + + +def test_4522(): + "4522 - test connect string with multiple address lists" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=5)(RETRY_DELAY=2) + (ADDRESS_LIST=(LOAD_BALANCE=ON) + (ADDRESS=(PROTOCOL=tcp)(PORT=1521)(HOST=my_host26)) + (ADDRESS=(PROTOCOL=tcp)(PORT=222)(HOST=my_host27))) + (ADDRESS_LIST=(LOAD_BALANCE=ON) + (ADDRESS=(PROTOCOL=tcps)(PORT=5555)(HOST=my_host28)) + (ADDRESS=(PROTOCOL=tcps)(PORT=444)(HOST=my_host29))) + (CONNECT_DATA=(SERVICE_NAME=my_service_name26)))""" + params.parse_connect_string(connect_string) + hosts = ["my_host26", "my_host27", "my_host28", "my_host29"] + assert params.host == hosts + assert params.port == [1521, 222, 5555, 444] + assert params.protocol == ["tcp", "tcp", "tcps", "tcps"] + assert params.service_name == "my_service_name26" + assert params.retry_count == 5 + assert params.retry_delay == 2 + + +def test_4523(): + "4523 - test connect string with multiple descriptions" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION_LIST=(FAIL_OVER=ON)(LOAD_BALANCE=OFF) + (DESCRIPTION=(LOAD_BALANCE=OFF)(RETRY_COUNT=1)(RETRY_DELAY=1) + (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5001) + (HOST=my_host30)) + (ADDRESS=(PROTOCOL=tcp)(PORT=1521)(HOST=my_host31))) + (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5002) + (HOST=my_host32)) + (ADDRESS=(PROTOCOL=tcp)(PORT=5003)(HOST=my_host33))) + (CONNECT_DATA=(SERVICE_NAME=my_service_name27))) + (DESCRIPTION=(LOAD_BALANCE=OFF)(RETRY_COUNT=2)(RETRY_DELAY=3) + (ADDRESS_LIST = (ADDRESS=(PROTOCOL=tcp)(PORT=5001) + (HOST=my_host34)) + (ADDRESS=(PROTOCOL=tcp)(PORT=5001)(HOST=my_host35))) + (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5001) + (HOST=my_host36)) + (ADDRESS=(PROTOCOL=tcps)(HOST=my_host37)(PORT=1521))) + (CONNECT_DATA=(SERVICE_NAME=my_service_name28))))""" + params.parse_connect_string(connect_string) + hosts = [ + "my_host30", + "my_host31", + "my_host32", + "my_host33", + "my_host34", + "my_host35", + "my_host36", + "my_host37", + ] + ports = [5001, 1521, 5002, 5003, 5001, 5001, 5001, 1521] + protocols = ["tcp", "tcp", "tcp", "tcp", "tcp", "tcp", "tcp", "tcps"] + service_names = ["my_service_name27", "my_service_name28"] + assert params.host == hosts + assert params.port == ports + assert params.protocol == protocols + assert params.service_name == service_names + assert params.retry_count == [1, 2] + assert params.retry_delay == [1, 3] + + +def test_4524(): + "4524 - test connect strings with https_proxy defined" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION= + (ADDRESS=(HTTPS_PROXY=proxy_4528a)(HTTPS_PROXY_PORT=4528) + (PROTOCOL=TCP)(HOST=my_host4528a)(PORT=8528)) + (CONNECT_DATA=(SERVICE_NAME=my_service_name4528a)))""" + params.parse_connect_string(connect_string) + assert params.https_proxy == "proxy_4528a" + assert params.https_proxy_port == 4528 + connect_string = """ + tcps://my_host_4528b/my_service_name_4528b? + https_proxy=proxy_4528b&https_proxy_port=9528""" + params.parse_connect_string(connect_string) + assert params.https_proxy == "proxy_4528b" + assert params.https_proxy_port == 9528 + + +def test_4525(test_env): + "4525 - test connect strings with server_type defined" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host4529)(PORT=4529)) + (CONNECT_DATA=(SERVER=DEDICATED) + (SERVICE_NAME=my_service_name4529)))""" + params.parse_connect_string(connect_string) + assert params.server_type == "dedicated" + connect_string = connect_string.replace("DEDICATED", "INVALID") + with test_env.assert_raises_full_code("DPY-4028"): params.parse_connect_string(connect_string) - self.assertEqual(params.tcp_connect_timeout, 15) - connect_string = connect_string.replace("15 SEC", "5 min") + + +def test_4526(): + "4526 - test writable parameters" + _test_writable_parameter("appcontext", [("a", "b", "c")]) + _test_writable_parameter("config_dir", "config_dir_4530") + _test_writable_parameter("disable_oob", True) + _test_writable_parameter("edition", "edition_4530") + _test_writable_parameter("events", True) + _test_writable_parameter("matchanytag", True) + _test_writable_parameter("mode", oracledb.AUTH_MODE_SYSDBA) + _test_writable_parameter("shardingkey", [1, 2, 3]) + _test_writable_parameter("stmtcachesize", 25) + _test_writable_parameter("supershardingkey", [1, 2, 3]) + _test_writable_parameter("tag", "tag_4530") + _test_writable_parameter("debug_jdwp", "host=host;port=4530") + _test_writable_parameter("externalauth", True) + _test_writable_parameter("user", "USER_1") + _test_writable_parameter("proxy_user", "PROXY_USER_1") + + +def test_4527(): + "4527 - test building connect string with TCP connect timeout" + host = "my_host4531" + service_name = "my_service4531" + options = [ + (25, "25"), + (120, "2min"), + (2.5, "2500ms"), + (3.4328, "3432ms"), + ] + for in_val, out_val in options: + params = oracledb.ConnectParams( + host=host, + service_name=service_name, + tcp_connect_timeout=in_val, + retry_delay=0, + ) + tcp_timeout_val = f"(TRANSPORT_CONNECT_TIMEOUT={out_val})" + connect_string = ( + f"(DESCRIPTION={tcp_timeout_val}" + "(ADDRESS=(PROTOCOL=tcp)" + f"(HOST={host})(PORT=1521))(CONNECT_DATA=" + f"(SERVICE_NAME={service_name})))" + ) + assert params.get_connect_string() == connect_string + + +def test_4528(): + "4528 - test EasyConnect with pool parameters" + options = [ + ("cclass_33a", "self", oracledb.PURITY_SELF), + ("cclass_33b", "new", oracledb.PURITY_NEW), + ] + for cclass, purity_str, purity_int in options: + connect_string = f""" + my_host_33/my_service_name_33:pooled? + pool_connection_class={cclass}& + pool_purity={purity_str}""" + params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.tcp_connect_timeout, 300) - connect_string = connect_string.replace("5 min", "34") + assert params.host == "my_host_33" + assert params.service_name == "my_service_name_33" + assert params.port == 1521 + assert params.server_type == "pooled" + assert params.cclass == cclass + assert params.purity == purity_int + + +def test_4529(): + "4529 - test connect descriptor with different containers (small 1st)" + connect_string = """ + (DESCRIPTION= + (ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521)) + (ADDRESS_LIST= + (ADDRESS=(PROTOCOL=tcp)(HOST=host2a)(PORT=1522)) + (ADDRESS=(PROTOCOL=tcp)(HOST=host2b)(PORT=1523))) + (ADDRESS=(PROTOCOL=tcp)(HOST=host3)(PORT=1524)) + (CONNECT_DATA=(SERVICE_NAME=my_service_34)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == ["host1", "host2a", "host2b", "host3"] + + +def test_4530(): + "4530 - test connect descriptor with different containers (small 2nd)" + connect_string = """ + (DESCRIPTION= + (ADDRESS_LIST= + (ADDRESS=(PROTOCOL=tcp)(HOST=host1a)(PORT=1532)) + (ADDRESS=(PROTOCOL=tcp)(HOST=host1b)(PORT=1533))) + (ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1534)) + (ADDRESS_LIST= + (ADDRESS=(PROTOCOL=tcp)(HOST=host3a)(PORT=1535)) + (ADDRESS=(PROTOCOL=tcp)(HOST=host3b)(PORT=1536))) + (CONNECT_DATA=(SERVICE_NAME=my_service_34)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == ["host1a", "host1b", "host2", "host3a", "host3b"] + + +def test_4531(): + "4531 - test building connect string with source route designation" + options = [ + ("on", True), + ("off", False), + ("true", True), + ("false", False), + ("yes", True), + ("no", False), + ] + + for in_val, has_section in options: + connect_string = f""" + (DESCRIPTION= + (RETRY_DELAY=0) + (SOURCE_ROUTE={in_val}) + (ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521)) + (ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1522)) + (CONNECT_DATA=(SERVICE_NAME=my_service_35)))""" + params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.tcp_connect_timeout, 34) + source_route_clause = "(SOURCE_ROUTE=ON)" if has_section else "" + connect_string = ( + f"(DESCRIPTION={source_route_clause}" + f"(ADDRESS_LIST={source_route_clause}" + "(ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1522)))" + "(CONNECT_DATA=(SERVICE_NAME=my_service_35)))" + ) + assert params.get_connect_string() == connect_string - def test_4512(self): - "4512 - test EasyConnect string parsing with no service name specified" - params = oracledb.ConnectParams() - params.parse_connect_string("my_host15:1578/") - self.assertEqual(params.host, "my_host15") - self.assertEqual(params.port, 1578) - self.assertEqual(params.service_name, None) - def test_4513(self): - "4513 - test EasyConnect string parsing with port value missing" - params = oracledb.ConnectParams() - params.parse_connect_string("my_host17:/my_service_name17") - self.assertEqual(params.host, "my_host17") - self.assertEqual(params.port, 1521) - self.assertEqual(params.service_name, "my_service_name17") +def test_4532(): + "4532 - test connect parameters which generate no connect string" + params = oracledb.ConnectParams() + assert params.get_connect_string() is None + params.set(mode=oracledb.SYSDBA) + assert params.get_connect_string() is None - def test_4514(self): - "4514 - test connect descriptor with invalid number" - params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION=(RETRY_COUNT=wrong)(RETRY_DELAY=5) - (ADDRESS=(PROTOCOL=TCP)(HOST=my_host18)(PORT=1598)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name18)))""" - with self.assertRaisesFullCode("DPY-4018"): - params.parse_connect_string(connect_string) - def test_4515(self): - "4515 - test connect descriptor with security options" - options = [ - ("CN=unknown19a", "/tmp/wallet_loc19a", "On", True), - ("CN=unknown19b", "/tmp/wallet_loc19b", "False", False), - ("CN=unknown19c", "/tmp/wallet_loc19c", "Off", False), - ("CN=unknown19d", "/tmp/wallet_loc19d", "True", True), - ("CN=unknown19e", "/tmp/wallet_loc19e", "yes", True), - ("CN=unknown19f", "/tmp/wallet_loc19f", "no", False), - ] - for dn, wallet_loc, match_option, match_value in options: - params = oracledb.ConnectParams() - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host19)(PORT=872)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name19)) - (SECURITY=(SSL_SERVER_CERT_DN="{dn}") - (SSL_SERVER_DN_MATCH={match_option}) - (MY_WALLET_DIRECTORY="{wallet_loc}")))""" - params.parse_connect_string(connect_string) - self.assertEqual(params.ssl_server_cert_dn, dn) - self.assertEqual(params.wallet_location, wallet_loc) - self.assertEqual(params.ssl_server_dn_match, match_value) - - def test_4516(self): - "4516 - test easy connect string with security options" - options = [ - ("CN=unknown20a", "/tmp/wallet_loc20a", "On", True), - ("CN=unknown20b", "/tmp/wallet_loc20b", "False", False), - ("CN=unknown20c", "/tmp/wallet_loc20c", "Off", False), - ("CN=unknown20d", "/tmp/wallet_loc20d", "True", True), - ("CN=unknown20e", "/tmp/wallet_loc20e", "yes", True), - ("CN=unknown20f", "/tmp/wallet_loc20f", "no", False), - ] - for dn, wallet_loc, match_option, match_value in options: - params = oracledb.ConnectParams() - connect_string = f""" - my_host20/my_server_name20? - ssl_server_cert_dn="{dn}"& - ssl_server_dn_match= {match_option} & - wallet_location = "{wallet_loc}" """ - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.ssl_server_cert_dn, dn) - self.assertEqual(params.ssl_server_dn_match, match_value) - self.assertEqual(params.wallet_location, wallet_loc) +def test_4533(): + "4533 - test parsing a DSN with credentials and a connect string" + params = oracledb.ConnectParams() + dsn = "my_user4538/my_password4538@localhost:1525/my_service_name" + user, password, dsn = params.parse_dsn_with_credentials(dsn) + assert user == "my_user4538" + assert password == "my_password4538" + assert dsn == "localhost:1525/my_service_name" - def test_4517(self): - "4517 - test easy connect string with description options" - params = oracledb.ConnectParams() - connect_string = """ - my_host21/my_server_name21? - expire_time=5& - retry_delay=10& - retry_count=12& - transport_connect_timeout=2.5""" - params.parse_connect_string(connect_string) - self.assertEqual(params.expire_time, 5) - self.assertEqual(params.retry_delay, 10) - self.assertEqual(params.retry_count, 12) - self.assertEqual(params.tcp_connect_timeout, 2.5) - def test_4518(self): - "4518 - test easy connect string with invalid parameters" - params = oracledb.ConnectParams() - connect_string_prefix = "my_host22/my_server_name22?" - suffixes = ["expire_time=invalid", "expire_time"] - for suffix in suffixes: - with self.assertRaisesFullCode("DPY-4018"): - params.parse_connect_string(connect_string_prefix + suffix) - - def test_4519(self): - "4519 - test connect string containing spaces and newlines" - params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP) \n(HOST=my_host23)\n - (PORT=1560))(CONNECT_DATA= (SERVICE_NAME=my_service_name23)) - (SECURITY=(MY_WALLET_DIRECTORY="my wallet dir 23")))""" - params.parse_connect_string(connect_string) - self.assertEqual(params.host, "my_host23") - self.assertEqual(params.port, 1560) - self.assertEqual(params.service_name, "my_service_name23") - self.assertEqual(params.wallet_location, "my wallet dir 23") - - def test_4520(self): - "4520 - test missing configuration directory" - params = oracledb.ConnectParams(config_dir="/missing") - with self.assertRaisesFullCode("DPY-4026"): - params.parse_connect_string("tns_alias") - - def test_4521(self): - "4521 - test connect string with an address list" +def test_4534(): + "4534 - test parsing a DSN with only credentials" + params = oracledb.ConnectParams() + dsn = "my_user4539/my_password4539" + user, password, dsn = params.parse_dsn_with_credentials(dsn) + assert user == "my_user4539" + assert password == "my_password4539" + assert dsn is None + + +def test_4535(): + "4535 - test parsing a DSN with empty credentials" + for dsn in ("", "/"): params = oracledb.ConnectParams() + user, password, dsn = params.parse_dsn_with_credentials(dsn) + assert user is None + assert password is None + assert dsn is None + + +def test_4536(): + "4536 - test parsing a DSN with no credentials" + dsn_in = "my_alias_4561" + params = oracledb.ConnectParams() + user, password, dsn_out = params.parse_dsn_with_credentials(dsn_in) + assert user is None + assert password is None + assert dsn_out == dsn_in + + +def test_4537(): + "4537 - test connect strings with connection_id_prefix defined" + params = oracledb.ConnectParams() + connect_string = """ + (DESCRIPTION= + (ADDRESS=(PROTOCOL=TCP)(HOST=my_host4562a)(PORT=4562)) + (CONNECT_DATA=(CONNECTION_ID_PREFIX=prefix4562a) + (SERVICE_NAME=my_service_name4562a)))""" + params.parse_connect_string(connect_string) + assert params.connection_id_prefix == "prefix4562a" + params = oracledb.ConnectParams() + params.set(connection_id_prefix="prefix4562b") + params.parse_connect_string("my_host4562b/my_service_name_4562b") + assert params.connection_id_prefix == "prefix4562b" + + +def test_4538(): + "4538 - test overriding parameters" + params = oracledb.ConnectParams() + host = "my_host_4538" + port = 3578 + service_name = "my_service_name_4538" + connect_string = f"{host}:{port}/{service_name}" + params.parse_connect_string(connect_string) + assert params.service_name == service_name + assert params.port == port + new_service_name = "new_service_name_4538" + new_port = 613 + params.set(service_name=new_service_name, port=new_port) + assert params.service_name == new_service_name + assert params.port == new_port + + +def test_4539(): + "4539 - test ConnectParams repr()" + values = [ + ("user", "USER_1"), + ("proxy_user", "PROXY_USER_1"), + ("host", "my_host_1"), + ("port", 1521), + ("protocol", "tcp"), + ("https_proxy", "proxy_a"), + ("https_proxy_port", 4528), + ("service_name", "my_service_name1"), + ("instance_name", "my_instance_name"), + ("sid", "my_sid1"), + ("server_type", "dedicated"), + ("cclass", "cclass_1"), + ("purity", oracledb.PURITY_SELF), + ("expire_time", 60), + ("retry_count", 6), + ("retry_delay", 10), + ("tcp_connect_timeout", 40.0), + ("ssl_server_dn_match", False), + ("ssl_server_cert_dn", "CN=unknown19a"), + ("wallet_location", "/tmp/wallet_loc1a"), + ("events", True), + ("externalauth", True), + ("mode", oracledb.AUTH_MODE_SYSDBA), + ("disable_oob", True), + ("stmtcachesize", 25), + ("edition", "edition_4"), + ("tag", "tag4"), + ("matchanytag", True), + ("config_dir", "config_dir_4"), + ("appcontext", [("a", "b", "c")]), + ("shardingkey", [1, 2, 3]), + ("supershardingkey", [4]), + ("debug_jdwp", "host=host;port=4538"), + ("connection_id_prefix", "prefix4564"), + ("ssl_context", None), + ("sdu", 16384), + ("pool_boundary", "statement"), + ("use_tcp_fast_open", True), + ("ssl_version", ssl.TLSVersion.TLSv1_2), + ("program", "my_program"), + ("machine", "my_machine"), + ("terminal", "my_terminal"), + ("osuser", "me"), + ("driver_name", "custom_driver"), + ("use_sni", True), + ("thick_mode_dsn_passthrough", True), + ("extra_auth_params", dict(extra1="A", extra2="B")), + ("pool_name", "my_pool"), + ] + params = oracledb.ConnectParams(**dict(values)) + parts = [f"{name}={value!r}" for name, value in values] + expected_value = f"ConnectParams({', '.join(parts)})" + assert repr(params) == expected_value + assert params.purity is oracledb.Purity.SELF + assert params.mode is oracledb.AuthMode.SYSDBA + new_values = [ + ("user", "USER_NEW"), + ("proxy_user", "PROXY_USER_NEW"), + ("host", "my_host_new"), + ("port", 1621), + ("protocol", "tcps"), + ("https_proxy", "proxy_b"), + ("https_proxy_port", 4529), + ("service_name", "my_service_name_new"), + ("instance_name", "my_instance_name_new"), + ("sid", "my_sid_new"), + ("server_type", "pooled"), + ("cclass", "cclass_new"), + ("purity", oracledb.PURITY_NEW), + ("expire_time", 90), + ("retry_count", 8), + ("retry_delay", 15), + ("tcp_connect_timeout", 15.0), + ("ssl_server_dn_match", True), + ("ssl_server_cert_dn", "CN=unknown19_new"), + ("wallet_location", "/tmp/wallet_loc1_new"), + ("events", False), + ("externalauth", False), + ("mode", oracledb.AUTH_MODE_SYSDGD), + ("disable_oob", False), + ("stmtcachesize", 35), + ("edition", "edition_new"), + ("tag", "tag_new"), + ("matchanytag", False), + ("config_dir", "config_dir_new"), + ("appcontext", [("a", "b", "c"), ("d", "e", "f")]), + ("shardingkey", [1, 2, 3, 4]), + ("supershardingkey", [6]), + ("debug_jdwp", "host=host;port=4638"), + ("connection_id_prefix", "prefix4664"), + ("ssl_context", ssl.create_default_context()), + ("sdu", 32768), + ("pool_boundary", "transaction"), + ("use_tcp_fast_open", False), + ("ssl_version", ssl.TLSVersion.TLSv1_2), + ("program", "modified_program"), + ("machine", "modified_machine"), + ("terminal", "modified_terminal"), + ("osuser", "modified_osuser"), + ("driver_name", "modified_driver_name"), + ("use_sni", False), + ("thick_mode_dsn_passthrough", False), + ("extra_auth_params", dict(extra1="X", extra2="Y")), + ("pool_name", "my_second_pool"), + ] + params.set(**dict(new_values)) + parts = [f"{name}={value!r}" for name, value in new_values] + expected_value = f"ConnectParams({', '.join(parts)})" + assert repr(params) == expected_value + cs_values = dict( + host="my_host_final", + service_name="my_service_final", + ) + connect_string = f"{cs_values['host']}/{cs_values['service_name']}" + params.parse_connect_string(connect_string) + final_values = [(n, cs_values.get(n, v)) for n, v in new_values] + parts = [f"{name}={value!r}" for name, value in final_values] + expected_value = f"ConnectParams({', '.join(parts)})" + assert repr(params) == expected_value + + +def test_4540(): + "4540 - connect descriptor with SDU" + connect_string = """ + (DESCRIPTION=(SDU=65535)(ADDRESS=(PROTOCOL=TCP) + (HOST=my_host1)(PORT=1589)))""" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.sdu == 65535 + + +def test_4541(): + "4541 - test that SDU is set correctly with invalid sizes" + params = oracledb.ConnectParams() + params.set(sdu=random.randint(0, 511)) + assert params.sdu == 512 + params.set(sdu=2097153) + assert params.sdu == 2097152 + + +def test_4542(): + "4542 - test empty connection class" + params = oracledb.ConnectParams() + assert params.cclass is None + params.set(cclass="") + assert params.cclass is None + + +def test_4543(): + "4543 - test easy connect string with protocol specified" + protocol = "tcp" + host = "my_host_4568" + port = 1668 + service_name = "my_service_4568" + connect_string = f"{protocol}://{host}:{port}/{service_name}" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.protocol == protocol + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_4544(): + "4544 - calling set() doesn't clear object parameters" + sharding_key = [1, 2, 3] + super_sharding_key = [4, 5, 6] + app_context = [("NAMESPACE", "KEY", "VALUE")] + ssl_context = ssl.create_default_context() + params = oracledb.ConnectParams( + shardingkey=sharding_key, + supershardingkey=super_sharding_key, + appcontext=app_context, + ssl_context=ssl_context, + ) + assert params.appcontext == app_context + assert params.shardingkey == sharding_key + assert params.supershardingkey == super_sharding_key + assert params.ssl_context == ssl_context + user = "user_4571" + params.set(user=user) + assert params.user == user + assert params.appcontext == app_context + assert params.shardingkey == sharding_key + assert params.supershardingkey == super_sharding_key + assert params.ssl_context == ssl_context + + +def test_4545(): + "4545 - test that use_tcp_fast_open is set correctly" + params = oracledb.ConnectParams() + params.set(use_tcp_fast_open=True) + assert params.use_tcp_fast_open + params.set(use_tcp_fast_open=False) + assert not params.use_tcp_fast_open + params.set(use_tcp_fast_open="True") + assert params.use_tcp_fast_open + params.set(use_tcp_fast_open="False") + assert not params.use_tcp_fast_open + params.set(use_tcp_fast_open=None) + assert not params.use_tcp_fast_open + params.set(use_tcp_fast_open=1) + assert params.use_tcp_fast_open + + +def test_4546(test_env): + "4546 - test connect descriptor without addresses defined" + params = oracledb.ConnectParams() + host = "host_4546" + port = 4546 + service_name = "service_name_4546" + ok_container_names = ("DESCRIPTION", "ADDRESS") + options = [ + ("DESRIPTION", "ADDRESS"), + ok_container_names, + ("DESCRIPTION", "ADRESS"), + ] + for option in options: + desc_name, addr_name = option connect_string = ( - "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=5)(RETRY_DELAY=2)" - "(ADDRESS_LIST=(LOAD_BALANCE=ON)" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host25)(PORT=4521))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host26)(PORT=4522)))" - "(CONNECT_DATA=(SERVICE_NAME=my_service_name25)))" + f"({desc_name}=({addr_name}=(PROTOCOL=TCP)(HOST={host})" + f"(PORT={port}))(CONNECT_DATA=(SERVICE_NAME={service_name})))" ) - params.parse_connect_string(connect_string) - self.assertEqual(params.host, ["my_host25", "my_host26"]) - self.assertEqual(params.port, [4521, 4522]) - self.assertEqual(params.protocol, ["tcp", "tcp"]) - self.assertEqual(params.service_name, "my_service_name25") - self.assertEqual(params.retry_count, 5) - self.assertEqual(params.retry_delay, 2) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4522(self): - "4522 - test connect string with multiple address lists" - params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=5)(RETRY_DELAY=2) - (ADDRESS_LIST=(LOAD_BALANCE=ON) - (ADDRESS=(PROTOCOL=tcp)(PORT=1521)(HOST=my_host26)) - (ADDRESS=(PROTOCOL=tcp)(PORT=222)(HOST=my_host27))) - (ADDRESS_LIST=(LOAD_BALANCE=ON) - (ADDRESS=(PROTOCOL=tcps)(PORT=5555)(HOST=my_host28)) - (ADDRESS=(PROTOCOL=tcps)(PORT=444)(HOST=my_host29))) - (CONNECT_DATA=(SERVICE_NAME=my_service_name26)))""" - params.parse_connect_string(connect_string) - hosts = ["my_host26", "my_host27", "my_host28", "my_host29"] - self.assertEqual(params.host, hosts) - self.assertEqual(params.port, [1521, 222, 5555, 444]) - self.assertEqual(params.protocol, ["tcp", "tcp", "tcps", "tcps"]) - self.assertEqual(params.service_name, "my_service_name26") - self.assertEqual(params.retry_count, 5) - self.assertEqual(params.retry_delay, 2) - - def test_4523(self): - "4523 - test connect string with multiple descriptions" - params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION_LIST=(FAIL_OVER=ON)(LOAD_BALANCE=OFF) - (DESCRIPTION=(LOAD_BALANCE=OFF)(RETRY_COUNT=1)(RETRY_DELAY=1) - (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5001) - (HOST=my_host30)) - (ADDRESS=(PROTOCOL=tcp)(PORT=1521)(HOST=my_host31))) - (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5002) - (HOST=my_host32)) - (ADDRESS=(PROTOCOL=tcp)(PORT=5003)(HOST=my_host33))) - (CONNECT_DATA=(SERVICE_NAME=my_service_name27))) - (DESCRIPTION=(LOAD_BALANCE=OFF)(RETRY_COUNT=2)(RETRY_DELAY=3) - (ADDRESS_LIST = (ADDRESS=(PROTOCOL=tcp)(PORT=5001) - (HOST=my_host34)) - (ADDRESS=(PROTOCOL=tcp)(PORT=5001)(HOST=my_host35))) - (ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(PORT=5001) - (HOST=my_host36)) - (ADDRESS=(PROTOCOL=tcps)(HOST=my_host37)(PORT=1521))) - (CONNECT_DATA=(SERVICE_NAME=my_service_name28))))""" - params.parse_connect_string(connect_string) - hosts = [ - "my_host30", - "my_host31", - "my_host32", - "my_host33", - "my_host34", - "my_host35", - "my_host36", - "my_host37", - ] - ports = [5001, 1521, 5002, 5003, 5001, 5001, 5001, 1521] - protocols = ["tcp", "tcp", "tcp", "tcp", "tcp", "tcp", "tcp", "tcps"] - service_names = ["my_service_name27", "my_service_name28"] - self.assertEqual(params.host, hosts) - self.assertEqual(params.port, ports) - self.assertEqual(params.protocol, protocols) - self.assertEqual(params.service_name, service_names) - self.assertEqual(params.retry_count, [1, 2]) - self.assertEqual(params.retry_delay, [1, 3]) - - def test_4524(self): - "4524 - test connect strings with https_proxy defined" params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION= - (ADDRESS=(HTTPS_PROXY=proxy_4528a)(HTTPS_PROXY_PORT=4528) - (PROTOCOL=TCP)(HOST=my_host4528a)(PORT=8528)) - (CONNECT_DATA=(SERVICE_NAME=my_service_name4528a)))""" - params.parse_connect_string(connect_string) - self.assertEqual(params.https_proxy, "proxy_4528a") - self.assertEqual(params.https_proxy_port, 4528) - connect_string = """ - tcps://my_host_4528b/my_service_name_4528b? - https_proxy=proxy_4528b&https_proxy_port=9528""" + if option == ok_container_names: + params.parse_connect_string(connect_string) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + else: + with test_env.assert_raises_full_code("DPY-2049"): + params.parse_connect_string(connect_string) + + +def test_4547(): + "4547 - test simple EasyConnect string parsing with IPv6 address" + host = "::1" + port = 4547 + service_name = "service_name_4547" + connect_string = f"[{host}]:{port}/{service_name}" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_4548(): + "4548 - test easy connect string with multiple hosts, different ports" + connect_string = ( + "host4548a,host4548b:4548,host4548c,host4548d:4549/" + "service_name_4548" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == ["host4548a", "host4548b", "host4548c", "host4548d"] + assert params.port == [4548, 4548, 4549, 4549] + assert params.service_name == "service_name_4548" + + +def test_4549(): + "4549 - test easy connect string with multiple address lists" + connect_string = ( + "host4549a;host4549b,host4549c:4549;host4549d/service_name_4549" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == ["host4549a", "host4549b", "host4549c", "host4549d"] + assert params.port == [1521, 4549, 4549, 1521] + assert params.service_name == "service_name_4549" + expected_conn_string = ( + "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4549a)(PORT=1521))" + "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=host4549b)(PORT=4549))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=host4549c)(PORT=4549)))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=host4549d)(PORT=1521))" + "(CONNECT_DATA=(SERVICE_NAME=service_name_4549)))" + ) + assert params.get_connect_string() == expected_conn_string + + +def test_4550(test_env): + "4550 - test connect descriptor with mixed complex and simple data" + connect_string = ( + "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=localhost)(PORT=1521))" + "(CONNECT_DATA=(SERVER=DEDICATED) SERVICE_NAME=orclpdb1))" + ) + params = oracledb.ConnectParams() + with test_env.assert_raises_full_code("DPY-4017"): params.parse_connect_string(connect_string) - self.assertEqual(params.https_proxy, "proxy_4528b") - self.assertEqual(params.https_proxy_port, 9528) - def test_4525(self): - "4525 - test connect strings with server_type defined" + +def test_4551(test_env): + "4551 - test connect descriptor with simple data for containers" + container_names = [ + "address", + "address_list", + "connect_data", + "description", + "description_list", + "security", + ] + for name in container_names: + connect_string = f"({name}=5)" params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=my_host4529)(PORT=4529)) - (CONNECT_DATA=(SERVER=DEDICATED) - (SERVICE_NAME=my_service_name4529)))""" - params.parse_connect_string(connect_string) - self.assertEqual(params.server_type, "dedicated") - connect_string = connect_string.replace("DEDICATED", "INVALID") - with self.assertRaisesFullCode("DPY-4028"): + with test_env.assert_raises_full_code("DPY-4017"): params.parse_connect_string(connect_string) - def test_4526(self): - "4526 - test writable parameters" - self.__test_writable_parameter("appcontext", [("a", "b", "c")]) - self.__test_writable_parameter("config_dir", "config_dir_4530") - self.__test_writable_parameter("disable_oob", True) - self.__test_writable_parameter("edition", "edition_4530") - self.__test_writable_parameter("events", True) - self.__test_writable_parameter("matchanytag", True) - self.__test_writable_parameter("mode", oracledb.AUTH_MODE_SYSDBA) - self.__test_writable_parameter("shardingkey", [1, 2, 3]) - self.__test_writable_parameter("stmtcachesize", 25) - self.__test_writable_parameter("supershardingkey", [1, 2, 3]) - self.__test_writable_parameter("tag", "tag_4530") - self.__test_writable_parameter("debug_jdwp", "host=host;port=4530") - self.__test_writable_parameter("externalauth", True) - self.__test_writable_parameter("user", "USER_1") - self.__test_writable_parameter("proxy_user", "PROXY_USER_1") - - def test_4527(self): - "4527 - test building connect string with TCP connect timeout" - host = "my_host4531" - service_name = "my_service4531" - options = [ - (25, "25"), - (120, "2min"), - (2.5, "2500ms"), - (3.4328, "3432ms"), - ] - for in_val, out_val in options: - params = oracledb.ConnectParams( - host=host, - service_name=service_name, - tcp_connect_timeout=in_val, - retry_delay=0, - ) - tcp_timeout_val = f"(TRANSPORT_CONNECT_TIMEOUT={out_val})" - connect_string = ( - f"(DESCRIPTION={tcp_timeout_val}" - "(ADDRESS=(PROTOCOL=tcp)" - f"(HOST={host})(PORT=1521))(CONNECT_DATA=" - f"(SERVICE_NAME={service_name})))" - ) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4528(self): - "4528 - test EasyConnect with pool parameters" - options = [ - ("cclass_33a", "self", oracledb.PURITY_SELF), - ("cclass_33b", "new", oracledb.PURITY_NEW), - ] - for cclass, purity_str, purity_int in options: - connect_string = f""" - my_host_33/my_service_name_33:pooled? - pool_connection_class={cclass}& - pool_purity={purity_str}""" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, "my_host_33") - self.assertEqual(params.service_name, "my_service_name_33") - self.assertEqual(params.port, 1521) - self.assertEqual(params.server_type, "pooled") - self.assertEqual(params.cclass, cclass) - self.assertEqual(params.purity, purity_int) - - def test_4529(self): - "4529 - test connect descriptor with different containers (small 1st)" - connect_string = """ - (DESCRIPTION= - (ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521)) - (ADDRESS_LIST= - (ADDRESS=(PROTOCOL=tcp)(HOST=host2a)(PORT=1522)) - (ADDRESS=(PROTOCOL=tcp)(HOST=host2b)(PORT=1523))) - (ADDRESS=(PROTOCOL=tcp)(HOST=host3)(PORT=1524)) - (CONNECT_DATA=(SERVICE_NAME=my_service_34)))""" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, ["host1", "host2a", "host2b", "host3"]) - def test_4530(self): - "4530 - test connect descriptor with different containers (small 2nd)" - connect_string = """ - (DESCRIPTION= - (ADDRESS_LIST= - (ADDRESS=(PROTOCOL=tcp)(HOST=host1a)(PORT=1532)) - (ADDRESS=(PROTOCOL=tcp)(HOST=host1b)(PORT=1533))) - (ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1534)) - (ADDRESS_LIST= - (ADDRESS=(PROTOCOL=tcp)(HOST=host3a)(PORT=1535)) - (ADDRESS=(PROTOCOL=tcp)(HOST=host3b)(PORT=1536))) - (CONNECT_DATA=(SERVICE_NAME=my_service_34)))""" +def test_4552(): + "4552 - test easy connect string with degenerate protocol" + host = "host_4552" + port = 4552 + service_name = "service_name_4552" + connect_string = f"//{host}:{port}/{service_name}" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_4553(): + "4553 - test easy connect string with registered protocol" + protocol = "proto-test" + protocol_arg = "args/for/proto4553" + host = "host_4553" + service_name = "service_name_4553" + connect_string = f"{protocol}://{protocol_arg}" + + def hook(passed_protocol, passed_protocol_arg, passed_params): + assert passed_protocol == protocol + assert passed_protocol_arg == protocol_arg + new_connect_string = f"{host}/{service_name}" + passed_params.parse_connect_string(new_connect_string) + + try: + oracledb.register_protocol(protocol, hook) params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual( - params.host, ["host1a", "host1b", "host2", "host3a", "host3b"] - ) + assert params.host == host + assert params.service_name == service_name + finally: + oracledb.register_protocol(protocol, None) - def test_4531(self): - "4531 - test building connect string with source route designation" - options = [ - ("on", True), - ("off", False), - ("true", True), - ("false", False), - ("yes", True), - ("no", False), - ] - - for in_val, has_section in options: - connect_string = f""" - (DESCRIPTION= - (RETRY_DELAY=0) - (SOURCE_ROUTE={in_val}) - (ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521)) - (ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1522)) - (CONNECT_DATA=(SERVICE_NAME=my_service_35)))""" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - source_route_clause = "(SOURCE_ROUTE=ON)" if has_section else "" - connect_string = ( - f"(DESCRIPTION={source_route_clause}" - f"(ADDRESS_LIST={source_route_clause}" - "(ADDRESS=(PROTOCOL=tcp)(HOST=host1)(PORT=1521))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=host2)(PORT=1522)))" - "(CONNECT_DATA=(SERVICE_NAME=my_service_35)))" - ) - self.assertEqual(params.get_connect_string(), connect_string) - def test_4532(self): - "4532 - test connect parameters which generate no connect string" - params = oracledb.ConnectParams() - self.assertEqual(params.get_connect_string(), None) - params.set(mode=oracledb.SYSDBA) - self.assertEqual(params.get_connect_string(), None) +def test_4554(): + "4554 - test parsing a DSN with a protocol specified" + dsn_in = "my-protocol://some_arguments_to_protocol" + params = oracledb.ConnectParams() + user, password, dsn_out = params.parse_dsn_with_credentials(dsn_in) + assert user is None + assert password is None + assert dsn_out == dsn_in - def test_4533(self): - "4533 - test parsing a DSN with credentials and a connect string" - params = oracledb.ConnectParams() - dsn = "my_user4538/my_password4538@localhost:1525/my_service_name" - user, password, dsn = params.parse_dsn_with_credentials(dsn) - self.assertEqual(user, "my_user4538") - self.assertEqual(password, "my_password4538") - self.assertEqual(dsn, "localhost:1525/my_service_name") - def test_4534(self): - "4534 - test parsing a DSN with only credentials" - params = oracledb.ConnectParams() - dsn = "my_user4539/my_password4539" - user, password, dsn = params.parse_dsn_with_credentials(dsn) - self.assertEqual(user, "my_user4539") - self.assertEqual(password, "my_password4539") - self.assertEqual(dsn, None) +def test_4555(test_env): + "4555 - test program attribute" + _verify_network_name_attr(test_env, "program") - def test_4535(self): - "4535 - test parsing a DSN with empty credentials" - for dsn in ("", "/"): - params = oracledb.ConnectParams() - user, password, dsn = params.parse_dsn_with_credentials(dsn) - self.assertEqual(user, None) - self.assertEqual(password, None) - self.assertEqual(dsn, None) - - def test_4536(self): - "4536 - test parsing a DSN with no credentials" - dsn_in = "my_alias_4561" - params = oracledb.ConnectParams() - user, password, dsn_out = params.parse_dsn_with_credentials(dsn_in) - self.assertEqual(user, None) - self.assertEqual(password, None) - self.assertEqual(dsn_out, dsn_in) - def test_4537(self): - "4537 - test connect strings with connection_id_prefix defined" - params = oracledb.ConnectParams() - connect_string = """ - (DESCRIPTION= - (ADDRESS=(PROTOCOL=TCP)(HOST=my_host4562a)(PORT=4562)) - (CONNECT_DATA=(CONNECTION_ID_PREFIX=prefix4562a) - (SERVICE_NAME=my_service_name4562a)))""" - params.parse_connect_string(connect_string) - self.assertEqual(params.connection_id_prefix, "prefix4562a") - params = oracledb.ConnectParams() - params.set(connection_id_prefix="prefix4562b") - params.parse_connect_string("my_host4562b/my_service_name_4562b") - self.assertEqual(params.connection_id_prefix, "prefix4562b") +def test_4556(test_env): + "4556 - test machine attribute" + _verify_network_name_attr(test_env, "machine") - def test_4538(self): - "4538 - test overriding parameters" - params = oracledb.ConnectParams() - host = "my_host_4538" - port = 3578 - service_name = "my_service_name_4538" - connect_string = f"{host}:{port}/{service_name}" - params.parse_connect_string(connect_string) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.port, port) - new_service_name = "new_service_name_4538" - new_port = 613 - params.set(service_name=new_service_name, port=new_port) - self.assertEqual(params.service_name, new_service_name) - self.assertEqual(params.port, new_port) - - def test_4539(self): - "4539 - test ConnectParams repr()" - values = [ - ("user", "USER_1"), - ("proxy_user", "PROXY_USER_1"), - ("host", "my_host_1"), - ("port", 1521), - ("protocol", "tcp"), - ("https_proxy", "proxy_a"), - ("https_proxy_port", 4528), - ("service_name", "my_service_name1"), - ("instance_name", "my_instance_name"), - ("sid", "my_sid1"), - ("server_type", "dedicated"), - ("cclass", "cclass_1"), - ("purity", oracledb.PURITY_SELF), - ("expire_time", 60), - ("retry_count", 6), - ("retry_delay", 10), - ("tcp_connect_timeout", 40.0), - ("ssl_server_dn_match", False), - ("ssl_server_cert_dn", "CN=unknown19a"), - ("wallet_location", "/tmp/wallet_loc1a"), - ("events", True), - ("externalauth", True), - ("mode", oracledb.AUTH_MODE_SYSDBA), - ("disable_oob", True), - ("stmtcachesize", 25), - ("edition", "edition_4"), - ("tag", "tag4"), - ("matchanytag", True), - ("config_dir", "config_dir_4"), - ("appcontext", [("a", "b", "c")]), - ("shardingkey", [1, 2, 3]), - ("supershardingkey", [4]), - ("debug_jdwp", "host=host;port=4538"), - ("connection_id_prefix", "prefix4564"), - ("ssl_context", None), - ("sdu", 16384), - ("pool_boundary", "statement"), - ("use_tcp_fast_open", True), - ("ssl_version", ssl.TLSVersion.TLSv1_2), - ("program", "my_program"), - ("machine", "my_machine"), - ("terminal", "my_terminal"), - ("osuser", "me"), - ("driver_name", "custom_driver"), - ("use_sni", True), - ("thick_mode_dsn_passthrough", True), - ("extra_auth_params", dict(extra1="A", extra2="B")), - ("pool_name", "my_pool"), - ] - params = oracledb.ConnectParams(**dict(values)) - parts = [f"{name}={value!r}" for name, value in values] - expected_value = f"ConnectParams({', '.join(parts)})" - self.assertEqual(repr(params), expected_value) - self.assertIs(params.purity, oracledb.Purity.SELF) - self.assertIs(params.mode, oracledb.AuthMode.SYSDBA) - new_values = [ - ("user", "USER_NEW"), - ("proxy_user", "PROXY_USER_NEW"), - ("host", "my_host_new"), - ("port", 1621), - ("protocol", "tcps"), - ("https_proxy", "proxy_b"), - ("https_proxy_port", 4529), - ("service_name", "my_service_name_new"), - ("instance_name", "my_instance_name_new"), - ("sid", "my_sid_new"), - ("server_type", "pooled"), - ("cclass", "cclass_new"), - ("purity", oracledb.PURITY_NEW), - ("expire_time", 90), - ("retry_count", 8), - ("retry_delay", 15), - ("tcp_connect_timeout", 15.0), - ("ssl_server_dn_match", True), - ("ssl_server_cert_dn", "CN=unknown19_new"), - ("wallet_location", "/tmp/wallet_loc1_new"), - ("events", False), - ("externalauth", False), - ("mode", oracledb.AUTH_MODE_SYSDGD), - ("disable_oob", False), - ("stmtcachesize", 35), - ("edition", "edition_new"), - ("tag", "tag_new"), - ("matchanytag", False), - ("config_dir", "config_dir_new"), - ("appcontext", [("a", "b", "c"), ("d", "e", "f")]), - ("shardingkey", [1, 2, 3, 4]), - ("supershardingkey", [6]), - ("debug_jdwp", "host=host;port=4638"), - ("connection_id_prefix", "prefix4664"), - ("ssl_context", ssl.create_default_context()), - ("sdu", 32768), - ("pool_boundary", "transaction"), - ("use_tcp_fast_open", False), - ("ssl_version", ssl.TLSVersion.TLSv1_2), - ("program", "modified_program"), - ("machine", "modified_machine"), - ("terminal", "modified_terminal"), - ("osuser", "modified_osuser"), - ("driver_name", "modified_driver_name"), - ("use_sni", False), - ("thick_mode_dsn_passthrough", False), - ("extra_auth_params", dict(extra1="X", extra2="Y")), - ("pool_name", "my_second_pool"), - ] - params.set(**dict(new_values)) - parts = [f"{name}={value!r}" for name, value in new_values] - expected_value = f"ConnectParams({', '.join(parts)})" - self.assertEqual(repr(params), expected_value) - cs_values = dict( - host="my_host_final", - service_name="my_service_final", - ) - connect_string = f"{cs_values['host']}/{cs_values['service_name']}" - params.parse_connect_string(connect_string) - final_values = [(n, cs_values.get(n, v)) for n, v in new_values] - parts = [f"{name}={value!r}" for name, value in final_values] - expected_value = f"ConnectParams({', '.join(parts)})" - self.assertEqual(repr(params), expected_value) - - def test_4540(self): - "4540 - connect descriptor with SDU" - connect_string = """ - (DESCRIPTION=(SDU=65535)(ADDRESS=(PROTOCOL=TCP) - (HOST=my_host1)(PORT=1589)))""" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.sdu, 65535) - def test_4541(self): - "4541 - test that SDU is set correctly with invalid sizes" - params = oracledb.ConnectParams() - params.set(sdu=random.randint(0, 511)) - self.assertEqual(params.sdu, 512) - params.set(sdu=2097153) - self.assertEqual(params.sdu, 2097152) +def test_4557(test_env): + "4557 - test osuser attribute" + _verify_network_name_attr(test_env, "osuser") - def test_4542(self): - "4542 - test empty connection class" - params = oracledb.ConnectParams() - self.assertEqual(params.cclass, None) - params.set(cclass="") - self.assertEqual(params.cclass, None) - - def test_4543(self): - "4543 - test easy connect string with protocol specified" - protocol = "tcp" - host = "my_host_4568" - port = 1668 - service_name = "my_service_4568" - connect_string = f"{protocol}://{host}:{port}/{service_name}" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.protocol, protocol) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_4544(self): - "4544 - calling set() doesn't clear object parameters" - sharding_key = [1, 2, 3] - super_sharding_key = [4, 5, 6] - app_context = [("NAMESPACE", "KEY", "VALUE")] - ssl_context = ssl.create_default_context() - params = oracledb.ConnectParams( - shardingkey=sharding_key, - supershardingkey=super_sharding_key, - appcontext=app_context, - ssl_context=ssl_context, - ) - self.assertEqual(params.appcontext, app_context) - self.assertEqual(params.shardingkey, sharding_key) - self.assertEqual(params.supershardingkey, super_sharding_key) - self.assertEqual(params.ssl_context, ssl_context) - user = "user_4571" - params.set(user=user) - self.assertEqual(params.user, user) - self.assertEqual(params.appcontext, app_context) - self.assertEqual(params.shardingkey, sharding_key) - self.assertEqual(params.supershardingkey, super_sharding_key) - self.assertEqual(params.ssl_context, ssl_context) - - def test_4545(self): - "4545 - test that use_tcp_fast_open is set correctly" - params = oracledb.ConnectParams() - params.set(use_tcp_fast_open=True) - self.assertTrue(params.use_tcp_fast_open) - params.set(use_tcp_fast_open=False) - self.assertFalse(params.use_tcp_fast_open) - params.set(use_tcp_fast_open="True") - self.assertTrue(params.use_tcp_fast_open) - params.set(use_tcp_fast_open="False") - self.assertFalse(params.use_tcp_fast_open) - params.set(use_tcp_fast_open=None) - self.assertFalse(params.use_tcp_fast_open) - params.set(use_tcp_fast_open=1) - self.assertTrue(params.use_tcp_fast_open) - - def test_4546(self): - "4546 - test connect descriptor without addresses defined" - params = oracledb.ConnectParams() - host = "host_4546" - port = 4546 - service_name = "service_name_4546" - ok_container_names = ("DESCRIPTION", "ADDRESS") - options = [ - ("DESRIPTION", "ADDRESS"), - ok_container_names, - ("DESCRIPTION", "ADRESS"), - ] - for option in options: - desc_name, addr_name = option - connect_string = ( - f"({desc_name}=({addr_name}=(PROTOCOL=TCP)(HOST={host})" - f"(PORT={port}))(CONNECT_DATA=(SERVICE_NAME={service_name})))" - ) - params = oracledb.ConnectParams() - if option == ok_container_names: - params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - else: - with self.assertRaisesFullCode("DPY-2049"): - params.parse_connect_string(connect_string) - - def test_4547(self): - "4547 - test simple EasyConnect string parsing with IPv6 address" - host = "::1" - port = 4547 - service_name = "service_name_4547" - connect_string = f"[{host}]:{port}/{service_name}" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - def test_4548(self): - "4548 - test easy connect string with multiple hosts, different ports" - connect_string = ( - "host4548a,host4548b:4548,host4548c,host4548d:4549/" - "service_name_4548" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual( - params.host, ["host4548a", "host4548b", "host4548c", "host4548d"] - ) - self.assertEqual(params.port, [4548, 4548, 4549, 4549]) - self.assertEqual(params.service_name, "service_name_4548") +def test_4558(): + "4558 - test terminal attribute" + params = oracledb.ConnectParams() + assert params.terminal == oracledb.defaults.terminal + value = "myterminal" + params = oracledb.ConnectParams(terminal=value) + assert params.terminal == value - def test_4549(self): - "4549 - test easy connect string with multiple address lists" - connect_string = ( - "host4549a;host4549b,host4549c:4549;host4549d/service_name_4549" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual( - params.host, ["host4549a", "host4549b", "host4549c", "host4549d"] - ) - self.assertEqual(params.port, [1521, 4549, 4549, 1521]) - self.assertEqual(params.service_name, "service_name_4549") - expected_conn_string = ( - "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4549a)(PORT=1521))" - "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=host4549b)(PORT=4549))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=host4549c)(PORT=4549)))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=host4549d)(PORT=1521))" - "(CONNECT_DATA=(SERVICE_NAME=service_name_4549)))" - ) - self.assertEqual(params.get_connect_string(), expected_conn_string) - def test_4550(self): - "4550 - test connect descriptor with mixed complex and simple data" - connect_string = ( - "(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=localhost)(PORT=1521))" - "(CONNECT_DATA=(SERVER=DEDICATED) SERVICE_NAME=orclpdb1))" - ) - params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-4017"): - params.parse_connect_string(connect_string) +def test_4559(): + "4559 - test driver_name attribute" + params = oracledb.ConnectParams() + assert params.driver_name == oracledb.defaults.driver_name + value = "newdriver" + params = oracledb.ConnectParams(driver_name=value) + assert params.driver_name == value - def test_4551(self): - "4551 - test connect descriptor with simple data for containers" - container_names = [ - "address", - "address_list", - "connect_data", - "description", - "description_list", - "security", - ] - for name in container_names: - with self.subTest(name=name): - connect_string = f"({name}=5)" - params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-4017"): - params.parse_connect_string(connect_string) - - def test_4552(self): - "4552 - test easy connect string with degenerate protocol" - host = "host_4552" - port = 4552 - service_name = "service_name_4552" - connect_string = f"//{host}:{port}/{service_name}" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_4553(self): - "4553 - test easy connect string with registered protocol" - protocol = "proto-test" - protocol_arg = "args/for/proto4553" - host = "host_4553" - service_name = "service_name_4553" - connect_string = f"{protocol}://{protocol_arg}" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - self.assertEqual(passed_protocol, protocol) - self.assertEqual(passed_protocol_arg, protocol_arg) - new_connect_string = f"{host}/{service_name}" - passed_params.parse_connect_string(new_connect_string) - try: +def test_4560(test_env): + "4560 - test register_protocol with invalid hook type" + + def hook1(protocol, protocol_arg, params, extra_invalid_param): + pass + + def hook2(passed_protocol): + pass + + protocol = "proto-test" + try: + for hook in [hook1, hook2]: oracledb.register_protocol(protocol, hook) params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - finally: - oracledb.register_protocol(protocol, None) + with test_env.assert_raises_full_code("DPY-2056"): + params.parse_connect_string(f"{protocol}://args") + finally: + oracledb.register_protocol(protocol, None) - def test_4554(self): - "4554 - test parsing a DSN with a protocol specified" - dsn_in = "my-protocol://some_arguments_to_protocol" - params = oracledb.ConnectParams() - user, password, dsn_out = params.parse_dsn_with_credentials(dsn_in) - self.assertEqual(user, None) - self.assertEqual(password, None) - self.assertEqual(dsn_out, dsn_in) - - def test_4555(self): - "4555 - test program attribute" - self.__verify_network_name_attr("program") - def test_4556(self): - "4556 - test machine attribute" - self.__verify_network_name_attr("machine") +def test_4561(): + "4561 - test register_protocol with invalid protocol type" + with pytest.raises(TypeError): + oracledb.register_protocol(1, lambda: None) + with pytest.raises(TypeError): + oracledb.register_protocol("proto", 5) - def test_4557(self): - "4557 - test osuser attribute" - self.__verify_network_name_attr("osuser") - def test_4558(self): - "4558 - test terminal attribute" - params = oracledb.ConnectParams() - self.assertEqual(params.terminal, oracledb.defaults.terminal) - value = "myterminal" - params = oracledb.ConnectParams(terminal=value) - self.assertEqual(params.terminal, value) +def test_4562(): + "4562 - test removing unregistered protocol" + with pytest.raises(KeyError): + oracledb.register_protocol("unregistered-protocol", None) - def test_4559(self): - "4559 - test driver_name attribute" - params = oracledb.ConnectParams() - self.assertEqual(params.driver_name, oracledb.defaults.driver_name) - value = "newdriver" - params = oracledb.ConnectParams(driver_name=value) - self.assertEqual(params.driver_name, value) - def test_4560(self): - "4560 - test register_protocol with invalid hook type" +def test_4563(): + "4563 - test restoring pre-registered protocols (tcp and tcps)" - def hook1(protocol, protocol_arg, params, extra_invalid_param): - pass + host = "host_4563" + port = 4563 + service_name = "service_4563" + user = "user_4563" - def hook2(passed_protocol): - pass + def hook(passed_protocol, passed_protocol_arg, passed_params): + passed_params.set(user=user) - protocol = "proto-test" + for protocol in ["tcp", "tcps"]: try: - for hook in [hook1, hook2]: - oracledb.register_protocol(protocol, hook) - params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-2056"): - params.parse_connect_string(f"{protocol}://args") - finally: - oracledb.register_protocol(protocol, None) - - def test_4561(self): - "4561 - test register_protocol with invalid protocol type" - with self.assertRaises(TypeError): - oracledb.register_protocol(1, lambda: None) - with self.assertRaises(TypeError): - oracledb.register_protocol("proto", 5) - - def test_4562(self): - "4562 - test removing unregistered protocol" - with self.assertRaises(KeyError): - oracledb.register_protocol("unregistered-protocol", None) - - def test_4563(self): - "4563 - test restoring pre-registered protocols (tcp and tcps)" - - host = "host_4565" - port = 4565 - service_name = "service_4565" - user = "user_4565" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - passed_params.set(user=user) - - for protocol in ["tcp", "tcps"]: - try: - oracledb.register_protocol(protocol, hook) - connect_string = f"{protocol}://{host}:{port}/{service_name}" - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.user, user) - self.assertEqual(params.service_name, None) - finally: - oracledb.register_protocol(protocol, None) + oracledb.register_protocol(protocol, hook) + connect_string = f"{protocol}://{host}:{port}/{service_name}" params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_4564(self): - "4564 - test extended connect strings for ConnectParams" - test_scenarios = [ - ("cclass", "test_cclass", "test_cclass"), - ("connection_id_prefix", "cid_prefix", "cid_prefix"), - ("disable_oob", "true", True), - ("disable_oob", "off", False), - ("driver_name", "test_driver_name", "test_driver_name"), - ("edition", "test_edition", "test_edition"), - ("events", "on", True), - ("events", "false", False), - ("expire_time", "10", 10), - ("externalauth", "yes", True), - ("externalauth", "no", False), - ("https_proxy", "test_proxy", "test_proxy"), - ("https_proxy_port", "80", 80), - ("machine", "test_machine", "test_machine"), - ("machine", "test_machine", "test_machine"), - ("mode", "SYSDBA", oracledb.AUTH_MODE_SYSDBA), - ("osuser", "test_osuser", "test_osuser"), - ("pool_boundary", "statement", "statement"), - ("program", "test_program", "test_program"), - ("purity", "NEW", oracledb.PURITY_NEW), - ("retry_count", "5", 5), - ("retry_delay", "3", 3), - ("sdu", "16384", 16384), - ("ssl_server_cert_dn", "test_dn", "test_dn"), - ("ssl_server_dn_match", "on", True), - ("ssl_server_dn_match", "false", False), - ("stmtcachesize", "25", 25), - ("tcp_connect_timeout", "15", 15), - ("terminal", "test_terminal", "test_terminal"), - ("use_tcp_fast_open", "true", True), - ("use_tcp_fast_open", "off", False), - ("wallet_location", "test_location", "test_location"), - ] - host = "host_4564" - service_name = "service_4564" - for name, str_value, actual_value in test_scenarios: - conn_string = f"{host}/{service_name}?pyo.{name}={str_value}" - with self.subTest(name=name, value=str_value): - params = oracledb.ConnectParams() - params.parse_connect_string(conn_string) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - self.assertEqual(getattr(params, name), actual_value) - - def test_4565(self): - "4565 - test set_from_config() with no user and password set" - user = "user_4565" - password = test_env.get_random_string() - options = [ - ("a", user, password), - ("b", user, None), - ("c", None, None), - ] - for option, user, password in options: - with self.subTest(option=option): - host = f"host_4565{option}" - service_name = f"service_4565{option}" - connect_string = f"{host}/{service_name}" - config = dict(connect_descriptor=connect_string) - if user is not None: - config["user"] = user - if password is not None: - config["password"] = dict( - type="base64", - value=base64.b64encode(password.encode()).decode(), - ) - params = oracledb.ConnectParams() - params.set_from_config(config) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - if user is not None: - self.assertEqual(params.user, user) - - def test_4566(self): - "4566 - test set_from_config() with user and password already set" - host = "host_4566" - service_name = "service_4566" - connect_string = f"{host}/{service_name}" - user = "user_4566" - password = test_env.get_random_string() - config_user = "user_4566_in_config" - config_password = test_env.get_random_string() - config = dict( - connect_descriptor=connect_string, - user=config_user, - password=dict( - type="base64", - value=base64.b64encode(config_password.encode()).decode(), - ), - ) - params = oracledb.ConnectParams(user=user, password=password) - params.set_from_config(config) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.user, user) - - def test_4567(self): - "4567 - test set_from_config() without connect_descriptor" + assert params.user == user + assert params.service_name is None + finally: + oracledb.register_protocol(protocol, None) params = oracledb.ConnectParams() - with self.assertRaisesFullCode("DPY-2059"): - params.set_from_config(dict(connect_descriptor_missing="missing")) - - def test_4568(self): - "4568 - test set_from_config() with extended parameters" - host = "host_4566" - service_name = "service_4566" + params.parse_connect_string(connect_string) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_4564(): + "4564 - test extended connect strings for ConnectParams" + test_scenarios = [ + ("cclass", "test_cclass", "test_cclass"), + ("connection_id_prefix", "cid_prefix", "cid_prefix"), + ("disable_oob", "true", True), + ("disable_oob", "off", False), + ("driver_name", "test_driver_name", "test_driver_name"), + ("edition", "test_edition", "test_edition"), + ("events", "on", True), + ("events", "false", False), + ("expire_time", "10", 10), + ("externalauth", "yes", True), + ("externalauth", "no", False), + ("https_proxy", "test_proxy", "test_proxy"), + ("https_proxy_port", "80", 80), + ("machine", "test_machine", "test_machine"), + ("machine", "test_machine", "test_machine"), + ("mode", "SYSDBA", oracledb.AUTH_MODE_SYSDBA), + ("osuser", "test_osuser", "test_osuser"), + ("pool_boundary", "statement", "statement"), + ("program", "test_program", "test_program"), + ("purity", "NEW", oracledb.PURITY_NEW), + ("retry_count", "5", 5), + ("retry_delay", "3", 3), + ("sdu", "16384", 16384), + ("ssl_server_cert_dn", "test_dn", "test_dn"), + ("ssl_server_dn_match", "on", True), + ("ssl_server_dn_match", "false", False), + ("stmtcachesize", "25", 25), + ("tcp_connect_timeout", "15", 15), + ("terminal", "test_terminal", "test_terminal"), + ("use_tcp_fast_open", "true", True), + ("use_tcp_fast_open", "off", False), + ("wallet_location", "test_location", "test_location"), + ] + host = "host_4564" + service_name = "service_4564" + for name, str_value, actual_value in test_scenarios: + conn_string = f"{host}/{service_name}?pyo.{name}={str_value}" + params = oracledb.ConnectParams() + params.parse_connect_string(conn_string) + assert params.host == host + assert params.service_name == service_name + assert getattr(params, name) == actual_value + + +@pytest.mark.filterwarnings("ignore:base64 encoded") +def test_4565(test_env): + "4565 - test set_from_config() with no user and password set" + user = "user_4565" + password = test_env.get_random_string() + options = [ + ("a", user, password), + ("b", user, None), + ("c", None, None), + ] + for option, user, password in options: + host = f"host_4565{option}" + service_name = f"service_4565{option}" connect_string = f"{host}/{service_name}" - stmtcachesize = 35 - user = "user_4566" - password = test_env.get_random_string() - config = dict( - connect_descriptor=connect_string, - user=user, - password=dict( + config = dict(connect_descriptor=connect_string) + if user is not None: + config["user"] = user + if password is not None: + config["password"] = dict( type="base64", value=base64.b64encode(password.encode()).decode(), - ), - pyo=dict(stmtcachesize=stmtcachesize), - ) - params = oracledb.ConnectParams(user=user, password=password) - params.set_from_config(config) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.user, user) - self.assertEqual(params.stmtcachesize, stmtcachesize) - - def test_4569(self): - "4569 - test USE_SNI in connect string" - options = [("on", True), ("off", False)] - service_name = "service_4569" - host = "host_4569" - port = 4569 - for str_val, val in options: - easy_connect = f"{host}:{port}/{service_name}?use_sni={str_val}" - descriptor_part = f"(USE_SNI={str_val.upper()})" if val else "" - connect_descriptor = ( - f"(DESCRIPTION={descriptor_part}" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})" - f"(PORT={port}))(CONNECT_DATA=(SERVICE_NAME={service_name})))" ) - for connect_string in (easy_connect, connect_descriptor): - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.use_sni, val) - self.assertEqual(params.get_connect_string(), connect_descriptor) - - def test_4570(self): - "4570 - test passing through unrecognized parameters in CONNECT_DATA" - options = [ - "(SIMPLE_KEY=SIMPLE_VALUE)", - "(COMPLEX_KEY=(SUB_VALUE_A=5)(SUB_VALUE_B=6))", - "(COMPLEX_KEY=(SUB_VALUE_A=5)(SUB_VALUE_B=(SUB_SUB_A=6)))", - ] - for option in options: - with self.subTest(option=option): - connect_string = ( - "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4570)" - "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4570)" - f"{option}))" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4571(self): - "4571 - test INSTANCE_NAME in connect string" - service_name = "service_4571" - instance_name = "instance_4571" - host = "host_4571" - port = 4571 - easy_connect = f"{host}:{port}/{service_name}/{instance_name}" + params = oracledb.ConnectParams() + params.set_from_config(config) + assert params.host == host + assert params.service_name == service_name + if user is not None: + assert params.user == user + + +def test_4566(test_env): + "4566 - test set_from_config() with user and password already set" + host = "host_4566" + service_name = "service_4566" + connect_string = f"{host}/{service_name}" + user = "user_4566" + password = test_env.get_random_string() + config_user = "user_4566_in_config" + config_password = test_env.get_random_string() + config = dict( + connect_descriptor=connect_string, + user=config_user, + password=dict( + type="base64", + value=base64.b64encode(config_password.encode()).decode(), + ), + ) + params = oracledb.ConnectParams(user=user, password=password) + params.set_from_config(config) + assert params.host == host + assert params.service_name == service_name + assert params.user == user + + +def test_4567(test_env): + "4567 - test set_from_config() without connect_descriptor" + params = oracledb.ConnectParams() + with test_env.assert_raises_full_code("DPY-2059"): + params.set_from_config(dict(connect_descriptor_missing="missing")) + + +def test_4568(test_env): + "4568 - test set_from_config() with extended parameters" + host = "host_4566" + service_name = "service_4566" + connect_string = f"{host}/{service_name}" + stmtcachesize = 35 + user = "user_4566" + password = test_env.get_random_string() + config = dict( + connect_descriptor=connect_string, + user=user, + password=dict( + type="base64", + value=base64.b64encode(password.encode()).decode(), + ), + pyo=dict(stmtcachesize=stmtcachesize), + ) + params = oracledb.ConnectParams(user=user, password=password) + params.set_from_config(config) + assert params.host == host + assert params.service_name == service_name + assert params.user == user + assert params.stmtcachesize == stmtcachesize + + +def test_4569(): + "4569 - test USE_SNI in connect string" + options = [("on", True), ("off", False)] + service_name = "service_4569" + host = "host_4569" + port = 4569 + for str_val, val in options: + easy_connect = f"{host}:{port}/{service_name}?use_sni={str_val}" + descriptor_part = f"(USE_SNI={str_val.upper()})" if val else "" connect_descriptor = ( - f"(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name})" - f"(INSTANCE_NAME={instance_name})))" + f"(DESCRIPTION={descriptor_part}" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})" + f"(PORT={port}))(CONNECT_DATA=(SERVICE_NAME={service_name})))" ) for connect_string in (easy_connect, connect_descriptor): params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.instance_name, instance_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.get_connect_string(), connect_descriptor) - - def test_4572(self): - "4572 - test passing through unrecognized parameters in SECURITY" - options = [ - "(SIMPLE_KEY=SIMPLE_VALUE)", - "(COMPLEX_KEY=(SUB_VALUE_A=23)(SUB_VALUE_B=27))", - "(COMPLEX_KEY=(SUB_VALUE_A=A)(SUB_VALUE_B=(SUB_SUB_A=B)))", - ] - for option in options: - with self.subTest(option=option): - connect_string = ( - "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcps)(HOST=host4572)" - "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4572))" - f"(SECURITY=(SSL_SERVER_DN_MATCH=ON){option}))" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4573(self): - "4573 - test passing through unrecognized parameters in DESCRIPTION" - options = [ - "(SIMPLE_KEY=SIMPLE_VALUE)", - "(COMPLEX_KEY=(SUB_VALUE_1=1)(SUB_VALUE_B=2))", - "(COMPLEX_KEY=(SUB_VALUE_2=S)(SUB_VALUE_B=(SUB_SUB_A=T)))", - ] - for option in options: - with self.subTest(option=option): - connect_string = ( - "(DESCRIPTION_LIST=" - f"(DESCRIPTION={option}(ADDRESS=(PROTOCOL=tcp)" - "(HOST=host4573a)(PORT=1521))" - "(CONNECT_DATA=(SERVICE_NAME=service4573)))" - f"(DESCRIPTION={option}(ADDRESS=(PROTOCOL=tcp)" - "(HOST=host4573b)(PORT=1521))" - "(CONNECT_DATA=(SERVICE_NAME=service4573))))" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4574(self): - "4574 - test passing through specific unsupported parameters" - easy_connect = ( - "host_4574/service_4574?" - "enable=broken&recv_buf_size=1024&send_buf_size=2048" + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.use_sni == val + assert params.get_connect_string() == connect_descriptor + + +def test_4570(): + "4570 - test passing through unrecognized parameters in CONNECT_DATA" + options = [ + "(SIMPLE_KEY=SIMPLE_VALUE)", + "(COMPLEX_KEY=(SUB_VALUE_A=5)(SUB_VALUE_B=6))", + "(COMPLEX_KEY=(SUB_VALUE_A=5)(SUB_VALUE_B=(SUB_SUB_A=6)))", + ] + for option in options: + connect_string = ( + "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4570)" + "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4570)" + f"{option}))" ) - connect_descriptor = ( - "(DESCRIPTION=(ENABLE=broken)(RECV_BUF_SIZE=1024)" - "(SEND_BUF_SIZE=2048)(ADDRESS=(PROTOCOL=tcp)(HOST=host_4574)" - "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service_4574)))" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.get_connect_string() == connect_string + + +def test_4571(): + "4571 - test INSTANCE_NAME in connect string" + service_name = "service_4571" + instance_name = "instance_4571" + host = "host_4571" + port = 4571 + easy_connect = f"{host}:{port}/{service_name}/{instance_name}" + connect_descriptor = ( + f"(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name})" + f"(INSTANCE_NAME={instance_name})))" + ) + for connect_string in (easy_connect, connect_descriptor): + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.service_name == service_name + assert params.instance_name == instance_name + assert params.host == host + assert params.port == port + assert params.get_connect_string() == connect_descriptor + + +def test_4572(): + "4572 - test passing through unrecognized parameters in SECURITY" + options = [ + "(SIMPLE_KEY=SIMPLE_VALUE)", + "(COMPLEX_KEY=(SUB_VALUE_A=23)(SUB_VALUE_B=27))", + "(COMPLEX_KEY=(SUB_VALUE_A=A)(SUB_VALUE_B=(SUB_SUB_A=B)))", + ] + for option in options: + connect_string = ( + "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcps)(HOST=host4572)" + "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4572))" + f"(SECURITY=(SSL_SERVER_DN_MATCH=ON){option}))" ) params = oracledb.ConnectParams() - params.parse_connect_string(easy_connect) - self.assertEqual(params.get_connect_string(), connect_descriptor) - - def test_4575(self): - "4575 - test syntax rule for keywords" - for value, ok in [ - ("(SIMPLE_KEY=SIMPLE_VALUE)", True), - ("(KEY_CONTAINS SPACE=SIMPLE_VALUE)", False), - ("(∆KEY✓🚀=SIMPLE_VALUE)", False), - ("(§∞ホスト🔑=SIMPLE_VALUE)", False), - ("(^MY_KEY_NAME=SIMPLE_VALUE)", False), - ("(KEY_CONTAINS TAB=SIMPLE_VALUE)", False), - ("(KEY_CONTAINS_QUOTES_''=SIMPLE_VALUE)", False), - ("(KEY_CONTAINS'\r'=SIMPLE_VALUE)", False), - ("(KEY_CONTAINS'\n'=SIMPLE_VALUE)", False), - ]: - with self.subTest(value=value): - connect_string = ( - "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4573)" - + "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4573)" - + f"{value}))" - ) - if ok: - params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual( - params.get_connect_string(), connect_string - ) - else: - with self.assertRaisesFullCode("DPY-4017"): - params.parse_connect_string(connect_string) - - def test_4576(self): - "4576 - test syntax rule for keywords in easy connect string" - for value, ok in [ - ("simple_key=simple_value", True), - ("key_contains space=simple_value", False), - ("∆key✓🚀=simple_value", False), - ("^my_key_name=simple_value", False), - ("key_contains tab=simple_value", False), - ("key_contains_quotes_''=simple_value", False), - ("key_contains'r'=simple_value", False), - ("key_contains'\n'=simple_value", False), - ]: - with self.subTest(value=value): - easy_connect = f"""host4574:1589/service4574?{value}""" - connect_string_exp = ( - "(DESCRIPTION=" - + "(ADDRESS=(PROTOCOL=tcp)(HOST=host4574)(PORT=1589))" - + "(CONNECT_DATA=(SERVICE_NAME=service4574)))" - ) - if ok: - params = oracledb.ConnectParams() - params.parse_connect_string(easy_connect) - self.assertEqual(params.host, "host4574") - self.assertEqual(params.port, 1589) - self.assertEqual(params.service_name, "service4574") - self.assertEqual( - params.get_connect_string(), connect_string_exp - ) - else: - with self.assertRaisesFullCode("DPY-4018"): - params.parse_connect_string(easy_connect) - - def test_4577(self): - "4577 - test for DESCRIPTION_LIST with FAILOVER" + params.parse_connect_string(connect_string) + assert params.get_connect_string() == connect_string + + +def test_4573(): + "4573 - test passing through unrecognized parameters in DESCRIPTION" + options = [ + "(SIMPLE_KEY=SIMPLE_VALUE)", + "(COMPLEX_KEY=(SUB_VALUE_1=1)(SUB_VALUE_B=2))", + "(COMPLEX_KEY=(SUB_VALUE_2=S)(SUB_VALUE_B=(SUB_SUB_A=T)))", + ] + for option in options: connect_string = ( - "(DESCRIPTION_LIST=(FAILOVER=OFF)(LOAD_BALANCE=ON)" - "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=1)(RETRY_DELAY=1)" - "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host30)(PORT=5001))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host31)(PORT=1521)))" - "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host32)(PORT=5002))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host32)(PORT=5003)))" - "(CONNECT_DATA=(SERVICE_NAME=my_service_name27)))" - "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=2)(RETRY_DELAY=3)" - "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host34)(PORT=5002))" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host35)(PORT=5001)))" - "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host36)(PORT=5002))" - "(ADDRESS=(PROTOCOL=tcps)(HOST=my_host37)(PORT=1521)))" - "(SECURITY=(SSL_SERVER_DN_MATCH=ON))))" + "(DESCRIPTION_LIST=" + f"(DESCRIPTION={option}(ADDRESS=(PROTOCOL=tcp)" + "(HOST=host4573a)(PORT=1521))" + "(CONNECT_DATA=(SERVICE_NAME=service4573)))" + f"(DESCRIPTION={option}(ADDRESS=(PROTOCOL=tcp)" + "(HOST=host4573b)(PORT=1521))" + "(CONNECT_DATA=(SERVICE_NAME=service4573))))" ) params = oracledb.ConnectParams() params.parse_connect_string(connect_string) - self.assertEqual(params.get_connect_string(), connect_string) - - def test_4578(self): - "4578 - test for descriptor parameters in connect descriptor" - options = [ - ("(FAILOVER=on)", ""), - ("(FAILOVER=off)", "(FAILOVER=OFF)"), - ("(FAILOVER=true)", ""), - ("(FAILOVER=false)", "(FAILOVER=OFF)"), - ("(FAILOVER=yes)", ""), - ("(FAILOVER=no)", "(FAILOVER=OFF)"), - ("(FAILOVER=unsupported_value)", "(FAILOVER=OFF)"), - ("(FAILOVER=1700)", "(FAILOVER=OFF)"), - ("(ENABLE=broken)", "(ENABLE=broken)"), - ("(LOAD_BALANCE=on)", "(LOAD_BALANCE=ON)"), - ("(LOAD_BALANCE=off)", ""), - ("(LOAD_BALANCE=true)", "(LOAD_BALANCE=ON)"), - ("(LOAD_BALANCE=false)", ""), - ("(LOAD_BALANCE=yes)", "(LOAD_BALANCE=ON)"), - ("(LOAD_BALANCE=no)", ""), - ("(LOAD_BALANCE=unsupported_value)", ""), - ("(LOAD_BALANCE=1700)", ""), - ("(RECV_BUF_SIZE=87300)", "(RECV_BUF_SIZE=87300)"), - ("(RECV_BUF_SIZE=11784)", "(RECV_BUF_SIZE=11784)"), - ("(SEND_BUF_SIZE=87300)", "(SEND_BUF_SIZE=87300)"), - ("(SEND_BUF_SIZE=11784)", "(SEND_BUF_SIZE=11784)"), - ("(RECV_TIMEOUT=10)", "(RECV_TIMEOUT=10)"), - ("(RECV_TIMEOUT=10ms)", "(RECV_TIMEOUT=10ms)"), - ("(RECV_TIMEOUT=10 ms)", "(RECV_TIMEOUT=10 ms)"), - ("(RECV_TIMEOUT=10 hr)", "(RECV_TIMEOUT=10 hr)"), - ("(RECV_TIMEOUT=10 min)", "(RECV_TIMEOUT=10 min)"), - ("(RECV_TIMEOUT=10 sec)", "(RECV_TIMEOUT=10 sec)"), - ("(COMPRESSION=on)", "(COMPRESSION=on)"), - ("(COMPRESSION=off)", "(COMPRESSION=off)"), - ( - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=low))", - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=low))", - ), - ( - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=high))", - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=high))", - ), - ( - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=wrong))", - "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=wrong))", - ), - ] - - service_name = "service_4576" - host1 = "host_4576_1" - host2 = "host_4576_2" - port1 = 45761 - port2 = 45762 - for str_val, exp_val in options: - with self.subTest(str_val=str_val): - descriptor_part = str_val - descriptor_part_exp = exp_val - connect_descriptor = ( - f"(DESCRIPTION={descriptor_part}(ADDRESS_LIST=" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host1})(PORT={port1}))" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host2})(PORT={port2})))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_descriptor) - - connect_descriptor_exp = ( - f"(DESCRIPTION={descriptor_part_exp}(ADDRESS_LIST=" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host1})(PORT={port1}))" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host2})(PORT={port2})))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" - ) - - self.assertEqual(params.host, [host1, host2]) - self.assertEqual(params.port, [port1, port2]) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_connect_string(), connect_descriptor_exp - ) - - def test_4579(self): - "4579 - test for connect data parameters in connect descriptor" - options = [ - "(COLOCATION_TAG=ColocationTag4577)", - "(COLOCATION_TAG=ColocationTag_4577)", - "(FAILOVER_MODE=(BACKUP=bhost)(TYPE=session)(METHOD=basic))", - "(FAILOVER_MODE=(BACKUP=bhost)(TYPE=select)(METHOD=preconnect))", - "(FAILOVER_MODE=(TYPE=select)(METHOD=basic)(RETRIES=2)(DELAY=15))", - "(HS=ok)", - "(TUNNEL_SERVICE_NAME=south)", - "(POOL_NAME=pool_name_4579)", - ] - - service_name = "service_4577" - host = "host_4577" - port = 4577 - for str_val in options: - with self.subTest(str_val=str_val): - connect_data_part = str_val - connect_descriptor = ( - f"(DESCRIPTION=" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name})" - f"{connect_data_part}))" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_descriptor) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_connect_string(), connect_descriptor - ) - - def test_4580(self): - "4580 - test for security parameters in connect descriptor" - - security_options = { - # IGNORE_ANO_ENCRYPTION_FOR_TCPS variations - "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=TRUE))": ( - "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" - "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=TRUE))" - ), - "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))": ( - "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" - "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))" - ), - "(SECURITY=(SSL_SERVER_DN_MATCH=false)" - "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))": ( - "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))" - ), - # KERBEROS5_CC_NAME and KERBEROS5_PRINCIPAL variations - "(SECURITY=(KERBEROS5_CC_NAME=/tmp/krbuser2/krb.cc)" - "(KERBEROS5_PRINCIPAL=krbprinc2@example.com))": ( - "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" - "(KERBEROS5_CC_NAME=/tmp/krbuser2/krb.cc)" - "(KERBEROS5_PRINCIPAL=krbprinc2@example.com))" - ), - # SSL_SERVER_CERT_DN and SSL_SERVER_DN_MATCH variations - "(SECURITY=(SSL_SERVER_DN_MATCH=on)" - "(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( - "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" - "(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" - ), - "(SECURITY=(SSL_SERVER_DN_MATCH=false)" - "(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( - "(SECURITY=(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" - ), - "(SECURITY=(SSL_SERVER_DN_MATCH=wrong)" - "(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( - "(SECURITY=(SSL_SERVER_CERT_DN=CN=unknown19a)" - "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" - ), - } - - service_name = "service_4578" - host = "host_4578" - port = 4578 - for str_val, exp_val in security_options.items(): - with self.subTest(str_val=str_val): - security_part = str_val - security_part_exp = exp_val - connect_descriptor = ( - f"(DESCRIPTION=" - f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" - f"{security_part})" - ) - params = oracledb.ConnectParams() - params.parse_connect_string(connect_descriptor) - connect_descriptor_exp = ( - f"(DESCRIPTION=" - f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" - f"{security_part_exp})" - ) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_connect_string(), connect_descriptor_exp - ) - - def test_4581(self): - "4581 - test for parameters supported in easy connect descriptor" - options = [ - ("retry_count=3&retry_delay=6", "(RETRY_COUNT=3)(RETRY_DELAY=6)"), - ("enable=broken", "(ENABLE=broken)"), - ("failover=on", ""), - ("failover=off", "(FAILOVER=OFF)"), - ("failover=true", ""), - ("failover=false", "(FAILOVER=OFF)"), - ("failover=yes", ""), - ("failover=no", "(FAILOVER=OFF)"), - ("failover=unsupported_value", "(FAILOVER=OFF)"), - ("failover=1700", "(FAILOVER=OFF)"), - ("load_balance=on", "(LOAD_BALANCE=ON)"), - ("load_balance=off", ""), - ("load_balance=true", "(LOAD_BALANCE=ON)"), - ("load_balance=false", ""), - ("load_balance=yes", "(LOAD_BALANCE=ON)"), - ("load_balance=no", ""), - ("load_balance=unsupported_value", ""), - ("load_balance=1700", ""), - ("recv_buf_size=87300", "(RECV_BUF_SIZE=87300)"), - ("send_buf_size=11786", "(SEND_BUF_SIZE=11786)"), - ("sdu=16384", "(SDU=16384)"), - ("retry_count=6", "(RETRY_COUNT=6)(RETRY_DELAY=1)"), - ("source_route=on", "(SOURCE_ROUTE=ON)"), - ("source_route=true", "(SOURCE_ROUTE=ON)"), - ("source_route=yes", "(SOURCE_ROUTE=ON)"), - ("source_route=off", ""), - ("source_route=false", ""), - ("source_route=no", ""), - ("source_route=wrong", ""), - ( - "transport_connect_timeout=100", - "(TRANSPORT_CONNECT_TIMEOUT=100)", - ), - ( - "transport_connect_timeout=500ms", - "(TRANSPORT_CONNECT_TIMEOUT=500ms)", - ), - ] - - service_name = "service_4579" - host = "host_4579" - port = 4579 - for str_val, exp_str in options: - with self.subTest(str_val=str_val): - descriptor_part = exp_str - easy_connect = f"""{host}:{port}/{service_name}?{str_val}""" - connect_descriptor_exp = ( - f"(DESCRIPTION={descriptor_part}" - f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" - ) - params = oracledb.ConnectParams() + assert params.get_connect_string() == connect_string + + +def test_4574(): + "4574 - test passing through specific unsupported parameters" + easy_connect = ( + "host_4574/service_4574?" + "enable=broken&recv_buf_size=1024&send_buf_size=2048" + ) + connect_descriptor = ( + "(DESCRIPTION=(ENABLE=broken)(RECV_BUF_SIZE=1024)" + "(SEND_BUF_SIZE=2048)(ADDRESS=(PROTOCOL=tcp)(HOST=host_4574)" + "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service_4574)))" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(easy_connect) + assert params.get_connect_string() == connect_descriptor + + +def test_4575(test_env): + "4575 - test syntax rule for keywords" + for value, ok in [ + ("(SIMPLE_KEY=SIMPLE_VALUE)", True), + ("(KEY_CONTAINS SPACE=SIMPLE_VALUE)", False), + ("(∆KEY✓🚀=SIMPLE_VALUE)", False), + ("(§∞ホスト🔑=SIMPLE_VALUE)", False), + ("(^MY_KEY_NAME=SIMPLE_VALUE)", False), + ("(KEY_CONTAINS TAB=SIMPLE_VALUE)", False), + ("(KEY_CONTAINS_QUOTES_''=SIMPLE_VALUE)", False), + ("(KEY_CONTAINS'\r'=SIMPLE_VALUE)", False), + ("(KEY_CONTAINS'\n'=SIMPLE_VALUE)", False), + ]: + connect_string = ( + "(DESCRIPTION=(ADDRESS=(PROTOCOL=tcp)(HOST=host4573)" + + "(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=service4573)" + + f"{value}))" + ) + if ok: + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.get_connect_string() == connect_string + else: + with test_env.assert_raises_full_code("DPY-4017"): + params.parse_connect_string(connect_string) + + +def test_4576(test_env): + "4576 - test syntax rule for keywords in easy connect string" + for value, ok in [ + ("simple_key=simple_value", True), + ("key_contains space=simple_value", False), + ("∆key✓🚀=simple_value", False), + ("^my_key_name=simple_value", False), + ("key_contains tab=simple_value", False), + ("key_contains_quotes_''=simple_value", False), + ("key_contains'r'=simple_value", False), + ("key_contains'\n'=simple_value", False), + ]: + easy_connect = f"""host4574:1589/service4574?{value}""" + connect_string_exp = ( + "(DESCRIPTION=" + + "(ADDRESS=(PROTOCOL=tcp)(HOST=host4574)(PORT=1589))" + + "(CONNECT_DATA=(SERVICE_NAME=service4574)))" + ) + if ok: + params = oracledb.ConnectParams() + params.parse_connect_string(easy_connect) + assert params.host == "host4574" + assert params.port == 1589 + assert params.service_name == "service4574" + assert params.get_connect_string() == connect_string_exp + else: + with test_env.assert_raises_full_code("DPY-4018"): params.parse_connect_string(easy_connect) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_connect_string(), connect_descriptor_exp - ) - - def test_4582(self): - "4582 - test for security parameters in easy connect descriptor" - service_name = "service_4580" - srvc_str = ( - "ssl_server_dn_match=true" - "&ssl_server_cert_dn='cn=sales,cn=OracleContext," - "dc=us,dc=example,dc=com'" - "&wallet_location='/tmp/oracle'" + + +def test_4577(): + "4577 - test for DESCRIPTION_LIST with FAILOVER" + connect_string = ( + "(DESCRIPTION_LIST=(FAILOVER=OFF)(LOAD_BALANCE=ON)" + "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=1)(RETRY_DELAY=1)" + "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host30)(PORT=5001))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host31)(PORT=1521)))" + "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host32)(PORT=5002))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host32)(PORT=5003)))" + "(CONNECT_DATA=(SERVICE_NAME=my_service_name27)))" + "(DESCRIPTION=(LOAD_BALANCE=ON)(RETRY_COUNT=2)(RETRY_DELAY=3)" + "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host34)(PORT=5002))" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host35)(PORT=5001)))" + "(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=my_host36)(PORT=5002))" + "(ADDRESS=(PROTOCOL=tcps)(HOST=my_host37)(PORT=1521)))" + "(SECURITY=(SSL_SERVER_DN_MATCH=ON))))" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.get_connect_string() == connect_string + + +def test_4578(): + "4578 - test for descriptor parameters in connect descriptor" + options = [ + ("(FAILOVER=on)", ""), + ("(FAILOVER=off)", "(FAILOVER=OFF)"), + ("(FAILOVER=true)", ""), + ("(FAILOVER=false)", "(FAILOVER=OFF)"), + ("(FAILOVER=yes)", ""), + ("(FAILOVER=no)", "(FAILOVER=OFF)"), + ("(FAILOVER=unsupported_value)", "(FAILOVER=OFF)"), + ("(FAILOVER=1700)", "(FAILOVER=OFF)"), + ("(ENABLE=broken)", "(ENABLE=broken)"), + ("(LOAD_BALANCE=on)", "(LOAD_BALANCE=ON)"), + ("(LOAD_BALANCE=off)", ""), + ("(LOAD_BALANCE=true)", "(LOAD_BALANCE=ON)"), + ("(LOAD_BALANCE=false)", ""), + ("(LOAD_BALANCE=yes)", "(LOAD_BALANCE=ON)"), + ("(LOAD_BALANCE=no)", ""), + ("(LOAD_BALANCE=unsupported_value)", ""), + ("(LOAD_BALANCE=1700)", ""), + ("(RECV_BUF_SIZE=87300)", "(RECV_BUF_SIZE=87300)"), + ("(RECV_BUF_SIZE=11784)", "(RECV_BUF_SIZE=11784)"), + ("(SEND_BUF_SIZE=87300)", "(SEND_BUF_SIZE=87300)"), + ("(SEND_BUF_SIZE=11784)", "(SEND_BUF_SIZE=11784)"), + ("(RECV_TIMEOUT=10)", "(RECV_TIMEOUT=10)"), + ("(RECV_TIMEOUT=10ms)", "(RECV_TIMEOUT=10ms)"), + ("(RECV_TIMEOUT=10 ms)", "(RECV_TIMEOUT=10 ms)"), + ("(RECV_TIMEOUT=10 hr)", "(RECV_TIMEOUT=10 hr)"), + ("(RECV_TIMEOUT=10 min)", "(RECV_TIMEOUT=10 min)"), + ("(RECV_TIMEOUT=10 sec)", "(RECV_TIMEOUT=10 sec)"), + ("(COMPRESSION=on)", "(COMPRESSION=on)"), + ("(COMPRESSION=off)", "(COMPRESSION=off)"), + ( + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=low))", + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=low))", + ), + ( + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=high))", + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=high))", + ), + ( + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=wrong))", + "(COMPRESSION=on)(COMPRESSION_LEVELS=(LEVEL=wrong))", + ), + ] + + service_name = "service_4576" + host1 = "host_4576_1" + host2 = "host_4576_2" + port1 = 45761 + port2 = 45762 + for str_val, exp_val in options: + descriptor_part = str_val + descriptor_part_exp = exp_val + connect_descriptor = ( + f"(DESCRIPTION={descriptor_part}(ADDRESS_LIST=" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host1})(PORT={port1}))" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host2})(PORT={port2})))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" ) - host = "host_4580" - port = 4580 - easy_connect = f"tcps://{host}:{port}/{service_name}?{srvc_str}" + params = oracledb.ConnectParams() + params.parse_connect_string(connect_descriptor) + connect_descriptor_exp = ( - f"(DESCRIPTION=" - f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})" - f"(PORT={port}))" - f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" - "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" - "(SSL_SERVER_CERT_DN='cn=sales,cn=OracleContext," - "dc=us,dc=example,dc=com')" - "(MY_WALLET_DIRECTORY='/tmp/oracle')))" + f"(DESCRIPTION={descriptor_part_exp}(ADDRESS_LIST=" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host1})(PORT={port1}))" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host2})(PORT={port2})))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" ) - params = oracledb.ConnectParams() - params.parse_connect_string(easy_connect) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.get_connect_string(), connect_descriptor_exp) - def test_4583(self): - "4583 - test for TYPE_OF_SERVICE, RDB_DATABASE, GLOBAL_NAME parameters" - connect_string = ( - "(DESCRIPTION_LIST=" - "(DESCRIPTION=(TYPE_OF_SERVICE=rdb_database)" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host94_1)(PORT=5002))" - "(CONNECT_DATA=" - "(SERVICE_NAME=generic)" - "(RDB_DATABASE=[.mf]mf_personal.rdb)" - "(GLOBAL_NAME=alpha5)))" - "(DESCRIPTION=(TYPE_OF_SERVICE=oracle11_database)" - "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host94_2)(PORT=5003))" - "(CONNECT_DATA=" - "(SERVICE_NAME=sales.us.example.com))))" + assert params.host == [host1, host2] + assert params.port == [port1, port2] + assert params.service_name == service_name + assert params.get_connect_string() == connect_descriptor_exp + + +def test_4579(): + "4579 - test for connect data parameters in connect descriptor" + options = [ + "(COLOCATION_TAG=ColocationTag4577)", + "(COLOCATION_TAG=ColocationTag_4577)", + "(FAILOVER_MODE=(BACKUP=bhost)(TYPE=session)(METHOD=basic))", + "(FAILOVER_MODE=(BACKUP=bhost)(TYPE=select)(METHOD=preconnect))", + "(FAILOVER_MODE=(TYPE=select)(METHOD=basic)(RETRIES=2)(DELAY=15))", + "(HS=ok)", + "(TUNNEL_SERVICE_NAME=south)", + "(POOL_NAME=pool_name_4579)", + ] + + service_name = "service_4577" + host = "host_4577" + port = 4577 + for str_val in options: + connect_data_part = str_val + connect_descriptor = ( + f"(DESCRIPTION=" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name})" + f"{connect_data_part}))" ) params = oracledb.ConnectParams() - params.parse_connect_string(connect_string) - self.assertEqual(params.get_connect_string(), connect_string) + params.parse_connect_string(connect_descriptor) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_connect_string() == connect_descriptor + +def test_4580(): + "4580 - test for security parameters in connect descriptor" -if __name__ == "__main__": - test_env.run_test_cases() + security_options = { + # IGNORE_ANO_ENCRYPTION_FOR_TCPS variations + "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=TRUE))": ( + "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" + "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=TRUE))" + ), + "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))": ( + "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" + "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))" + ), + "(SECURITY=(SSL_SERVER_DN_MATCH=false)" + "(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))": ( + "(SECURITY=(IGNORE_ANO_ENCRYPTION_FOR_TCPS=FALSE))" + ), + # KERBEROS5_CC_NAME and KERBEROS5_PRINCIPAL variations + "(SECURITY=(KERBEROS5_CC_NAME=/tmp/krbuser2/krb.cc)" + "(KERBEROS5_PRINCIPAL=krbprinc2@example.com))": ( + "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" + "(KERBEROS5_CC_NAME=/tmp/krbuser2/krb.cc)" + "(KERBEROS5_PRINCIPAL=krbprinc2@example.com))" + ), + # SSL_SERVER_CERT_DN and SSL_SERVER_DN_MATCH variations + "(SECURITY=(SSL_SERVER_DN_MATCH=on)" + "(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( + "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" + "(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" + ), + "(SECURITY=(SSL_SERVER_DN_MATCH=false)" + "(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( + "(SECURITY=(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" + ), + "(SECURITY=(SSL_SERVER_DN_MATCH=wrong)" + "(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))": ( + "(SECURITY=(SSL_SERVER_CERT_DN=CN=unknown19a)" + "(MY_WALLET_DIRECTORY=/tmp/wallet_loc19a))" + ), + } + + service_name = "service_4578" + host = "host_4578" + port = 4578 + for str_val, exp_val in security_options.items(): + security_part = str_val + security_part_exp = exp_val + connect_descriptor = ( + f"(DESCRIPTION=" + f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" + f"{security_part})" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(connect_descriptor) + connect_descriptor_exp = ( + f"(DESCRIPTION=" + f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" + f"{security_part_exp})" + ) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_connect_string() == connect_descriptor_exp + + +def test_4581(): + "4581 - test for parameters supported in easy connect descriptor" + options = [ + ("retry_count=3&retry_delay=6", "(RETRY_COUNT=3)(RETRY_DELAY=6)"), + ("enable=broken", "(ENABLE=broken)"), + ("failover=on", ""), + ("failover=off", "(FAILOVER=OFF)"), + ("failover=true", ""), + ("failover=false", "(FAILOVER=OFF)"), + ("failover=yes", ""), + ("failover=no", "(FAILOVER=OFF)"), + ("failover=unsupported_value", "(FAILOVER=OFF)"), + ("failover=1700", "(FAILOVER=OFF)"), + ("load_balance=on", "(LOAD_BALANCE=ON)"), + ("load_balance=off", ""), + ("load_balance=true", "(LOAD_BALANCE=ON)"), + ("load_balance=false", ""), + ("load_balance=yes", "(LOAD_BALANCE=ON)"), + ("load_balance=no", ""), + ("load_balance=unsupported_value", ""), + ("load_balance=1700", ""), + ("recv_buf_size=87300", "(RECV_BUF_SIZE=87300)"), + ("send_buf_size=11786", "(SEND_BUF_SIZE=11786)"), + ("sdu=16384", "(SDU=16384)"), + ("retry_count=6", "(RETRY_COUNT=6)(RETRY_DELAY=1)"), + ("source_route=on", "(SOURCE_ROUTE=ON)"), + ("source_route=true", "(SOURCE_ROUTE=ON)"), + ("source_route=yes", "(SOURCE_ROUTE=ON)"), + ("source_route=off", ""), + ("source_route=false", ""), + ("source_route=no", ""), + ("source_route=wrong", ""), + ( + "transport_connect_timeout=100", + "(TRANSPORT_CONNECT_TIMEOUT=100)", + ), + ( + "transport_connect_timeout=500ms", + "(TRANSPORT_CONNECT_TIMEOUT=500ms)", + ), + ] + + service_name = "service_4579" + host = "host_4579" + port = 4579 + for str_val, exp_str in options: + descriptor_part = exp_str + easy_connect = f"""{host}:{port}/{service_name}?{str_val}""" + connect_descriptor_exp = ( + f"(DESCRIPTION={descriptor_part}" + f"(ADDRESS=(PROTOCOL=tcp)(HOST={host})(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name})))" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(easy_connect) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_connect_string() == connect_descriptor_exp + + +def test_4582(): + "4582 - test for security parameters in easy connect descriptor" + service_name = "service_4580" + srvc_str = ( + "ssl_server_dn_match=true" + "&ssl_server_cert_dn='cn=sales,cn=OracleContext," + "dc=us,dc=example,dc=com'" + "&wallet_location='/tmp/oracle'" + ) + host = "host_4580" + port = 4580 + easy_connect = f"tcps://{host}:{port}/{service_name}?{srvc_str}" + connect_descriptor_exp = ( + f"(DESCRIPTION=" + f"(ADDRESS=(PROTOCOL=tcps)(HOST={host})" + f"(PORT={port}))" + f"(CONNECT_DATA=(SERVICE_NAME={service_name}))" + "(SECURITY=(SSL_SERVER_DN_MATCH=ON)" + "(SSL_SERVER_CERT_DN='cn=sales,cn=OracleContext," + "dc=us,dc=example,dc=com')" + "(MY_WALLET_DIRECTORY='/tmp/oracle')))" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(easy_connect) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_connect_string() == connect_descriptor_exp + + +def test_4583(): + "4583 - test for TYPE_OF_SERVICE, RDB_DATABASE, GLOBAL_NAME parameters" + connect_string = ( + "(DESCRIPTION_LIST=" + "(DESCRIPTION=(TYPE_OF_SERVICE=rdb_database)" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host94_1)(PORT=5002))" + "(CONNECT_DATA=" + "(SERVICE_NAME=generic)" + "(RDB_DATABASE=[.mf]mf_personal.rdb)" + "(GLOBAL_NAME=alpha5)))" + "(DESCRIPTION=(TYPE_OF_SERVICE=oracle11_database)" + "(ADDRESS=(PROTOCOL=tcp)(HOST=my_host94_2)(PORT=5003))" + "(CONNECT_DATA=" + "(SERVICE_NAME=sales.us.example.com))))" + ) + params = oracledb.ConnectParams() + params.parse_connect_string(connect_string) + assert params.get_connect_string() == connect_string diff --git a/tests/test_4600_type_changes.py b/tests/test_4600_type_changes.py index 8f86cfe6..dd53b087 100644 --- a/tests/test_4600_type_changes.py +++ b/tests/test_4600_type_changes.py @@ -29,310 +29,356 @@ import datetime import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def __test_type_change( - self, - query_frag_1, - query_value_1, - query_frag_2, - query_value_2, - table_name="dual", - type_handler=None, - ): - if test_env.get_is_implicit_pooling(): - self.skipTest("sessions can change with implicit pooling") - orig_type_handler = self.conn.outputtypehandler - self.conn.outputtypehandler = type_handler - try: - self.cursor.execute( - f""" - create or replace view TestTypesChanged as - select {query_frag_1} as value - from {table_name} - """ - ) - self.cursor.execute("select * from TestTypesChanged") - self.assertEqual(self.cursor.fetchall(), [(query_value_1,)]) - self.cursor.execute( - f""" - create or replace view TestTypesChanged as - select {query_frag_2} as value - from dual - """ - ) - self.cursor.execute("select * from TestTypesChanged") - self.assertEqual(self.cursor.fetchall(), [(query_value_2,)]) - finally: - self.conn.outputtypehandler = orig_type_handler - - @test_env.skip_unless_thin_mode() - def test_4600(self): - "4600 - test data type changing from VARCHAR to CLOB" - self.__test_type_change( - "cast('string_4600' as VARCHAR2(15))", - "string_4600", - "to_clob('clob_4600')", - "clob_4600", - ) - - @test_env.skip_unless_thin_mode() - def test_4601(self): - "4601 - test data type changing from CHAR to CLOB" - self.__test_type_change( - "cast('string_4601' as CHAR(11))", - "string_4601", - "to_clob('clob_4601')", - "clob_4601", - ) - - @test_env.skip_unless_thin_mode() - def test_4602(self): - "4602 - test data type changing from LONG to CLOB" - self.cursor.execute("truncate table TestLongs") - self.cursor.execute("insert into TestLongs values (1, 'string_4602')") - self.__test_type_change( - "LongCol", - "string_4602", - "to_clob('clob_4602')", - "clob_4602", - "TestLongs", - ) - - @test_env.skip_unless_thin_mode() - def test_4603(self): - "4603 - test data type changing from NVARCHAR to CLOB" - self.__test_type_change( - "cast('string_4603' as NVARCHAR2(15))", - "string_4603", - "to_clob('clob_4603')", - "clob_4603", - ) - - @test_env.skip_unless_thin_mode() - def test_4604(self): - "4604 - test data type changing from NCHAR to CLOB" - self.__test_type_change( - "cast('string_4604' as NCHAR(11))", - "string_4604", - "to_clob('clob_4604')", - "clob_4604", - ) - - @test_env.skip_unless_thin_mode() - def test_4605(self): - "4605 - test data type changing from RAW to BLOB" - self.__test_type_change( - "utl_raw.cast_to_raw('string_4605')", - b"string_4605", - "to_blob(utl_raw.cast_to_raw('blob_4605'))", - b"blob_4605", +import pytest + + +def _test_type_change( + conn, + test_env, + query_frag_1, + query_value_1, + query_frag_2, + query_value_2, + table_name="dual", + type_handler=None, +): + if test_env.is_implicit_pooling: + pytest.skip("sessions can change with implicit pooling") + orig_type_handler = conn.outputtypehandler + conn.outputtypehandler = type_handler + cursor = conn.cursor() + try: + cursor.execute( + f""" + create or replace view TestTypesChanged as + select {query_frag_1} as value + from {table_name} + """ ) - - @test_env.skip_unless_thin_mode() - def test_4606(self): - "4606 - test data type changing from LONGRAW to BLOB" - self.cursor.execute("truncate table TestLongRaws") - data = [1, b"string_4606"] - self.cursor.execute("insert into TestLongRaws values (:1, :2)", data) - self.__test_type_change( - "LongRawCol", - b"string_4606", - "to_blob(utl_raw.cast_to_raw('blob_4606'))", - b"blob_4606", - "TestLongRaws", - ) - - @test_env.skip_unless_thin_mode() - def test_4607(self): - "4607 - test data type changing from VARCHAR to NCLOB" - self.__test_type_change( - "cast('string_4607' as VARCHAR2(15))", - "string_4607", - "to_nclob('nclob_4607')", - "nclob_4607", - ) - - @test_env.skip_unless_thin_mode() - def test_4608(self): - "4608 - test data type changing from CHAR to NCLOB" - self.__test_type_change( - "cast('string_4608' as CHAR(11))", - "string_4608", - "to_nclob('nclob_4608')", - "nclob_4608", - ) - - @test_env.skip_unless_thin_mode() - def test_4609(self): - "4609 - test data type changing from LONG to NCLOB" - self.cursor.execute("truncate table TestLongs") - self.cursor.execute("insert into TestLongs values (1, 'string_4609')") - self.__test_type_change( - "LongCol", - "string_4609", - "to_nclob('nclob_4609')", - "nclob_4609", - "TestLongs", - ) - - @test_env.skip_unless_thin_mode() - def test_4610(self): - "4610 - test data type changing from NVARCHAR to NCLOB" - self.__test_type_change( - "cast('string_4610' as NVARCHAR2(15))", - "string_4610", - "to_nclob('nclob_4610')", - "nclob_4610", - ) - - @test_env.skip_unless_thin_mode() - def test_4611(self): - "4611 - test data type changing from NCHAR to NCLOB" - self.__test_type_change( - "cast('string_4611' as NCHAR(11))", - "string_4611", - "to_nclob('nclob_4611')", - "nclob_4611", - ) - - def test_4612(self): - "4612 - test data type changing from VARCHAR to NUMBER" - self.__test_type_change( - "cast('string_4612' as VARCHAR2(15))", - "string_4612", - "to_number('4612')", - 4612, - ) - - def test_4613(self): - "4613 - test data type changing from NUMBER to VARCHAR" - self.__test_type_change( - "to_number('4613')", - 4613, - "cast('string_4613' as VARCHAR2(15))", - "string_4613", - ) - - def test_4614(self): - "4614 - test data type changing from STRING to DATE" - self.__test_type_change( - "cast('string_4614' as VARCHAR2(15))", - "string_4614", - "to_date('04-JAN-2022')", - datetime.datetime(2022, 1, 4, 0, 0), - ) - - def test_4615(self): - "4615 - test data type changing from DATE to STRING" - self.__test_type_change( - "to_date('04-JAN-2022')", - datetime.datetime(2022, 1, 4, 0, 0), - "cast('string_4615' as VARCHAR2(15))", - "string_4615", - ) - - def test_4616(self): - "4616 - test data type changing from NUMBER to DATE" - self.__test_type_change( - "to_number('4616')", - 4616, - "to_date('05-JAN-2022')", - datetime.datetime(2022, 1, 5, 0, 0), - ) - - @test_env.skip_unless_thin_mode() - def test_4617(self): - "4617 - test data type changing from CLOB to VARCHAR" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_CLOB: - return cursor.var( - oracledb.DB_TYPE_VARCHAR, - size=32768, - arraysize=cursor.arraysize, - ) - - self.__test_type_change( - "to_clob('clob_4617')", - "clob_4617", - "cast('string_4617' as VARCHAR2(15))", - "string_4617", - type_handler=type_handler, - ) - - @test_env.skip_unless_thin_mode() - def test_4618(self): - "4618 - test data type changing from NCLOB to NVARCHAR" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_NCLOB: - return cursor.var( - oracledb.DB_TYPE_NVARCHAR, - size=32768, - arraysize=cursor.arraysize, - ) - - self.__test_type_change( - "to_nclob('nclob_4618')", - "nclob_4618", - "cast('nstring_4618' as NVARCHAR2(15))", - "nstring_4618", - type_handler=type_handler, - ) - - @test_env.skip_unless_thin_mode() - def test_4619(self): - "4619 - test data type changing from CLOB to NVARCHAR" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_CLOB: - return cursor.var( - oracledb.DB_TYPE_NVARCHAR, - size=32768, - arraysize=cursor.arraysize, - ) - - self.__test_type_change( - "to_clob('clob_4619')", - "clob_4619", - "cast('string_4619' as VARCHAR2(15))", - "string_4619", - type_handler=type_handler, + cursor.execute("select * from TestTypesChanged") + assert cursor.fetchall() == [(query_value_1,)] + cursor.execute( + f""" + create or replace view TestTypesChanged as + select {query_frag_2} as value + from dual + """ ) + cursor.execute("select * from TestTypesChanged") + assert cursor.fetchall() == [(query_value_2,)] + finally: + conn.outputtypehandler = orig_type_handler + + +def test_4600(skip_unless_thin_mode, conn, test_env): + "4600 - test data type changing from VARCHAR to CLOB" + _test_type_change( + conn, + test_env, + "cast('string_4600' as VARCHAR2(15))", + "string_4600", + "to_clob('clob_4600')", + "clob_4600", + ) + + +def test_4601(skip_unless_thin_mode, conn, test_env): + "4601 - test data type changing from CHAR to CLOB" + _test_type_change( + conn, + test_env, + "cast('string_4601' as CHAR(11))", + "string_4601", + "to_clob('clob_4601')", + "clob_4601", + ) + + +def test_4602(skip_unless_thin_mode, conn, cursor, test_env): + "4602 - test data type changing from LONG to CLOB" + cursor.execute("truncate table TestLongs") + cursor.execute("insert into TestLongs values (1, 'string_4602')") + _test_type_change( + conn, + test_env, + "LongCol", + "string_4602", + "to_clob('clob_4602')", + "clob_4602", + "TestLongs", + ) + + +def test_4603(skip_unless_thin_mode, conn, test_env): + "4603 - test data type changing from NVARCHAR to CLOB" + _test_type_change( + conn, + test_env, + "cast('string_4603' as NVARCHAR2(15))", + "string_4603", + "to_clob('clob_4603')", + "clob_4603", + ) + + +def test_4604(skip_unless_thin_mode, conn, test_env): + "4604 - test data type changing from NCHAR to CLOB" + _test_type_change( + conn, + test_env, + "cast('string_4604' as NCHAR(11))", + "string_4604", + "to_clob('clob_4604')", + "clob_4604", + ) + + +def test_4605(skip_unless_thin_mode, conn, test_env): + "4605 - test data type changing from RAW to BLOB" + _test_type_change( + conn, + test_env, + "utl_raw.cast_to_raw('string_4605')", + b"string_4605", + "to_blob(utl_raw.cast_to_raw('blob_4605'))", + b"blob_4605", + ) + + +def test_4606(skip_unless_thin_mode, conn, cursor, test_env): + "4606 - test data type changing from LONGRAW to BLOB" + cursor.execute("truncate table TestLongRaws") + data = [1, b"string_4606"] + cursor.execute("insert into TestLongRaws values (:1, :2)", data) + _test_type_change( + conn, + test_env, + "LongRawCol", + b"string_4606", + "to_blob(utl_raw.cast_to_raw('blob_4606'))", + b"blob_4606", + "TestLongRaws", + ) + + +def test_4607(skip_unless_thin_mode, conn, test_env): + "4607 - test data type changing from VARCHAR to NCLOB" + _test_type_change( + conn, + test_env, + "cast('string_4607' as VARCHAR2(15))", + "string_4607", + "to_nclob('nclob_4607')", + "nclob_4607", + ) + + +def test_4608(skip_unless_thin_mode, conn, test_env): + "4608 - test data type changing from CHAR to NCLOB" + _test_type_change( + conn, + test_env, + "cast('string_4608' as CHAR(11))", + "string_4608", + "to_nclob('nclob_4608')", + "nclob_4608", + ) + + +def test_4609(skip_unless_thin_mode, conn, cursor, test_env): + "4609 - test data type changing from LONG to NCLOB" + cursor.execute("truncate table TestLongs") + cursor.execute("insert into TestLongs values (1, 'string_4609')") + _test_type_change( + conn, + test_env, + "LongCol", + "string_4609", + "to_nclob('nclob_4609')", + "nclob_4609", + "TestLongs", + ) + + +def test_4610(skip_unless_thin_mode, conn, test_env): + "4610 - test data type changing from NVARCHAR to NCLOB" + _test_type_change( + conn, + test_env, + "cast('string_4610' as NVARCHAR2(15))", + "string_4610", + "to_nclob('nclob_4610')", + "nclob_4610", + ) + + +def test_4611(skip_unless_thin_mode, conn, test_env): + "4611 - test data type changing from NCHAR to NCLOB" + _test_type_change( + conn, + test_env, + "cast('string_4611' as NCHAR(11))", + "string_4611", + "to_nclob('nclob_4611')", + "nclob_4611", + ) + + +def test_4612(conn, test_env): + "4612 - test data type changing from VARCHAR to NUMBER" + _test_type_change( + conn, + test_env, + "cast('string_4612' as VARCHAR2(15))", + "string_4612", + "to_number('4612')", + 4612, + ) + + +def test_4613(conn, test_env): + "4613 - test data type changing from NUMBER to VARCHAR" + _test_type_change( + conn, + test_env, + "to_number('4613')", + 4613, + "cast('string_4613' as VARCHAR2(15))", + "string_4613", + ) + + +def test_4614(conn, test_env): + "4614 - test data type changing from STRING to DATE" + _test_type_change( + conn, + test_env, + "cast('string_4614' as VARCHAR2(15))", + "string_4614", + "to_date('04-JAN-2022')", + datetime.datetime(2022, 1, 4, 0, 0), + ) + + +def test_4615(conn, test_env): + "4615 - test data type changing from DATE to STRING" + _test_type_change( + conn, + test_env, + "to_date('04-JAN-2022')", + datetime.datetime(2022, 1, 4, 0, 0), + "cast('string_4615' as VARCHAR2(15))", + "string_4615", + ) + + +def test_4616(conn, test_env): + "4616 - test data type changing from NUMBER to DATE" + _test_type_change( + conn, + test_env, + "to_number('4616')", + 4616, + "to_date('05-JAN-2022')", + datetime.datetime(2022, 1, 5, 0, 0), + ) + + +def test_4617(skip_unless_thin_mode, conn, test_env): + "4617 - test data type changing from CLOB to VARCHAR" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_CLOB: + return cursor.var( + oracledb.DB_TYPE_VARCHAR, + size=32768, + arraysize=cursor.arraysize, + ) - @test_env.skip_unless_thin_mode() - def test_4620(self): - "4620 - test data type changing from BLOB to RAW" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_BLOB: - return cursor.var( - oracledb.DB_TYPE_RAW, - size=32768, - arraysize=cursor.arraysize, - ) - - self.__test_type_change( - "to_blob(utl_raw.cast_to_raw('blob_4620'))", - b"blob_4620", - "utl_raw.cast_to_raw('string_4620')", - b"string_4620", - type_handler=type_handler, - ) + _test_type_change( + conn, + test_env, + "to_clob('clob_4617')", + "clob_4617", + "cast('string_4617' as VARCHAR2(15))", + "string_4617", + type_handler=type_handler, + ) + + +def test_4618(skip_unless_thin_mode, conn, test_env): + "4618 - test data type changing from NCLOB to NVARCHAR" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_NCLOB: + return cursor.var( + oracledb.DB_TYPE_NVARCHAR, + size=32768, + arraysize=cursor.arraysize, + ) - @test_env.skip_unless_thin_mode() - def test_4621(self): - "4621 - test data type changing from NVARCHAR to CLOB" - self.__test_type_change( - "cast('string_4621' as NVARCHAR2(15))", - "string_4621", - "to_clob('clob_4621')", - "clob_4621", - ) + _test_type_change( + conn, + test_env, + "to_nclob('nclob_4618')", + "nclob_4618", + "cast('nstring_4618' as NVARCHAR2(15))", + "nstring_4618", + type_handler=type_handler, + ) + + +def test_4619(skip_unless_thin_mode, conn, test_env): + "4619 - test data type changing from CLOB to NVARCHAR" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_CLOB: + return cursor.var( + oracledb.DB_TYPE_NVARCHAR, + size=32768, + arraysize=cursor.arraysize, + ) + _test_type_change( + conn, + test_env, + "to_clob('clob_4619')", + "clob_4619", + "cast('string_4619' as VARCHAR2(15))", + "string_4619", + type_handler=type_handler, + ) + + +def test_4620(skip_unless_thin_mode, conn, test_env): + "4620 - test data type changing from BLOB to RAW" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_BLOB: + return cursor.var( + oracledb.DB_TYPE_RAW, + size=32768, + arraysize=cursor.arraysize, + ) -if __name__ == "__main__": - test_env.run_test_cases() + _test_type_change( + conn, + test_env, + "to_blob(utl_raw.cast_to_raw('blob_4620'))", + b"blob_4620", + "utl_raw.cast_to_raw('string_4620')", + b"string_4620", + type_handler=type_handler, + ) + + +def test_4621(skip_unless_thin_mode, conn, test_env): + "4621 - test data type changing from NVARCHAR to CLOB" + _test_type_change( + conn, + test_env, + "cast('string_4621' as NVARCHAR2(15))", + "string_4621", + "to_clob('clob_4621')", + "clob_4621", + ) diff --git a/tests/test_4700_pool_params.py b/tests/test_4700_pool_params.py index 62b06362..1c00832a 100644 --- a/tests/test_4700_pool_params.py +++ b/tests/test_4700_pool_params.py @@ -29,150 +29,144 @@ import ssl import oracledb -import test_env -class TestCase(test_env.BaseTestCase): - requires_connection = False +def _test_writable_parameter(name, value, params=None): + """ + Tests that a writable parameter can be written to and the modified + value read back successfully. + """ + if params is None: + params = oracledb.PoolParams() + orig_value = getattr(params, name) + copied_params = params.copy() + args = {} + args[name] = value + params.set(**args) + assert getattr(params, name) == value + assert getattr(copied_params, name) == orig_value + args[name] = None + params.set(**args) + assert getattr(params, name) == value - def __test_writable_parameter(self, name, value, params=None): - """ - Tests that a writable parameter can be written to and the modified - value read back successfully. - """ - if params is None: - params = oracledb.PoolParams() - orig_value = getattr(params, name) - copied_params = params.copy() - args = {} - args[name] = value - params.set(**args) - self.assertEqual(getattr(params, name), value) - self.assertEqual(getattr(copied_params, name), orig_value) - args[name] = None - params.set(**args) - self.assertEqual(getattr(params, name), value) - def test_4700(self): - "4700 - test writable parameters" - self.__test_writable_parameter("min", 8, oracledb.PoolParams(max=10)) - self.__test_writable_parameter("max", 12) - self.__test_writable_parameter("increment", 2) - self.__test_writable_parameter("connectiontype", oracledb.Connection) - self.__test_writable_parameter("getmode", oracledb.POOL_GETMODE_NOWAIT) - self.__test_writable_parameter("homogeneous", False) - self.__test_writable_parameter("timeout", 25) - self.__test_writable_parameter("wait_timeout", 45) - self.__test_writable_parameter("max_lifetime_session", 65) - self.__test_writable_parameter("session_callback", lambda c: None) - self.__test_writable_parameter("max_sessions_per_shard", 5) - self.__test_writable_parameter("soda_metadata_cache", True) - self.__test_writable_parameter("ping_interval", 20) - self.__test_writable_parameter("ping_timeout", 3000) +def test_4700(): + "4700 - test writable parameters" + _test_writable_parameter("min", 8, oracledb.PoolParams(max=10)) + _test_writable_parameter("max", 12) + _test_writable_parameter("increment", 2) + _test_writable_parameter("connectiontype", oracledb.Connection) + _test_writable_parameter("getmode", oracledb.POOL_GETMODE_NOWAIT) + _test_writable_parameter("homogeneous", False) + _test_writable_parameter("timeout", 25) + _test_writable_parameter("wait_timeout", 45) + _test_writable_parameter("max_lifetime_session", 65) + _test_writable_parameter("session_callback", lambda c: None) + _test_writable_parameter("max_sessions_per_shard", 5) + _test_writable_parameter("soda_metadata_cache", True) + _test_writable_parameter("ping_interval", 20) + _test_writable_parameter("ping_timeout", 3000) - def test_4701(self): - "4701 - test PoolParams repr()" - values = [ - ("min", 3), - ("max", 10), - ("increment", 4), - ("connectiontype", oracledb.Connection), - ("getmode", oracledb.POOL_GETMODE_WAIT), - ("homogeneous", True), - ("timeout", 60), - ("wait_timeout", 20), - ("max_lifetime_session", 80), - ("session_callback", lambda c: None), - ("max_sessions_per_shard", 4), - ("soda_metadata_cache", False), - ("ping_interval", 50), - ("ping_timeout", 2500), - ("user", test_env.get_main_user()), - ("proxy_user", test_env.get_proxy_user()), - ("host", "my_host1"), - ("port", 1522), - ("protocol", "tcp"), - ("https_proxy", "proxy_4701"), - ("https_proxy_port", 4701), - ("service_name", "my_service_name1"), - ("instance_name", "my_instance_name"), - ("sid", "my_sid1"), - ("server_type", "dedicated"), - ("cclass", "cclass_1"), - ("purity", oracledb.PURITY_SELF), - ("expire_time", 60), - ("retry_count", 6), - ("retry_delay", 10), - ("tcp_connect_timeout", 40.0), - ("ssl_server_dn_match", False), - ("ssl_server_cert_dn", "CN=unknown4701a"), - ("wallet_location", "/tmp/wallet_loc1a"), - ("events", True), - ("externalauth", True), - ("mode", oracledb.AUTH_MODE_SYSDBA), - ("disable_oob", True), - ("stmtcachesize", 25), - ("edition", "edition_4701"), - ("tag", "tag4701"), - ("matchanytag", True), - ("config_dir", "config_dir_4701"), - ("appcontext", [("a", "b", "c")]), - ("shardingkey", [1, 2, 3]), - ("supershardingkey", [4]), - ("debug_jdwp", "host=host;port=1523"), - ("connection_id_prefix", "prefix4701"), - ("ssl_context", None), - ("sdu", 16384), - ("pool_boundary", "transaction"), - ("use_tcp_fast_open", True), - ("ssl_version", ssl.TLSVersion.TLSv1_2), - ("program", "my_program"), - ("machine", "my_machine"), - ("terminal", "my_terminal"), - ("osuser", "me"), - ("driver_name", "custom_driver"), - ("use_sni", True), - ("thick_mode_dsn_passthrough", True), - ("extra_auth_params", dict(extra1="A", extra2="B")), - ("pool_name", "my_pool"), - ] - params = oracledb.PoolParams(**dict(values)) - parts = [f"{name}={value!r}" for name, value in values] - expected_value = f"PoolParams({', '.join(parts)})" - self.assertEqual(repr(params), expected_value) - self.assertIs(params.getmode, oracledb.PoolGetMode.WAIT) - def test_4702(self): - "4702 - test extended connect strings for ConnectParams" - test_scenarios = [ - ("getmode", "NOWAIT", oracledb.POOL_GETMODE_NOWAIT), - ("homogeneous", "true", True), - ("homogeneous", "false", False), - ("increment", "2", 2), - ("max", "50", 50), - ("max_lifetime_session", "6000", 6000), - ("max_sessions_per_shard", "5", 5), - ("min", "3", 3), - ("ping_interval", "-1", -1), - ("ping_timeout", "2500", 2500), - ("homogeneous", "on", True), - ("homogeneous", "off", False), - ("timeout", "3000", 3000), - ("wait_timeout", "300", 300), - ] - host = "host_4702" - service_name = "service_4702" - for name, str_value, actual_value in test_scenarios: - conn_string = f"{host}/{service_name}?pyo.{name}={str_value}" - with self.subTest(name=name, value=str_value): - params = oracledb.PoolParams() - if name == "min" and actual_value > params.max: - params.set(max=actual_value) - params.parse_connect_string(conn_string) - self.assertEqual(params.host, host) - self.assertEqual(params.service_name, service_name) - self.assertEqual(getattr(params, name), actual_value) +def test_4701(test_env): + "4701 - test PoolParams repr()" + values = [ + ("min", 3), + ("max", 10), + ("increment", 4), + ("connectiontype", oracledb.Connection), + ("getmode", oracledb.POOL_GETMODE_WAIT), + ("homogeneous", True), + ("timeout", 60), + ("wait_timeout", 20), + ("max_lifetime_session", 80), + ("session_callback", lambda c: None), + ("max_sessions_per_shard", 4), + ("soda_metadata_cache", False), + ("ping_interval", 50), + ("ping_timeout", 2500), + ("user", test_env.main_user), + ("proxy_user", test_env.proxy_user), + ("host", "my_host1"), + ("port", 1522), + ("protocol", "tcp"), + ("https_proxy", "proxy_4701"), + ("https_proxy_port", 4701), + ("service_name", "my_service_name1"), + ("instance_name", "my_instance_name"), + ("sid", "my_sid1"), + ("server_type", "dedicated"), + ("cclass", "cclass_1"), + ("purity", oracledb.PURITY_SELF), + ("expire_time", 60), + ("retry_count", 6), + ("retry_delay", 10), + ("tcp_connect_timeout", 40.0), + ("ssl_server_dn_match", False), + ("ssl_server_cert_dn", "CN=unknown4701a"), + ("wallet_location", "/tmp/wallet_loc1a"), + ("events", True), + ("externalauth", True), + ("mode", oracledb.AUTH_MODE_SYSDBA), + ("disable_oob", True), + ("stmtcachesize", 25), + ("edition", "edition_4701"), + ("tag", "tag4701"), + ("matchanytag", True), + ("config_dir", "config_dir_4701"), + ("appcontext", [("a", "b", "c")]), + ("shardingkey", [1, 2, 3]), + ("supershardingkey", [4]), + ("debug_jdwp", "host=host;port=1523"), + ("connection_id_prefix", "prefix4701"), + ("ssl_context", None), + ("sdu", 16384), + ("pool_boundary", "transaction"), + ("use_tcp_fast_open", True), + ("ssl_version", ssl.TLSVersion.TLSv1_2), + ("program", "my_program"), + ("machine", "my_machine"), + ("terminal", "my_terminal"), + ("osuser", "me"), + ("driver_name", "custom_driver"), + ("use_sni", True), + ("thick_mode_dsn_passthrough", True), + ("extra_auth_params", dict(extra1="A", extra2="B")), + ("pool_name", "my_pool"), + ] + params = oracledb.PoolParams(**dict(values)) + parts = [f"{name}={value!r}" for name, value in values] + expected_value = f"PoolParams({', '.join(parts)})" + assert repr(params) == expected_value + assert params.getmode is oracledb.PoolGetMode.WAIT -if __name__ == "__main__": - test_env.run_test_cases() +def test_4702(): + "4702 - test extended connect strings for ConnectParams" + test_scenarios = [ + ("getmode", "NOWAIT", oracledb.POOL_GETMODE_NOWAIT), + ("homogeneous", "true", True), + ("homogeneous", "false", False), + ("increment", "2", 2), + ("max", "50", 50), + ("max_lifetime_session", "6000", 6000), + ("max_sessions_per_shard", "5", 5), + ("min", "3", 3), + ("ping_interval", "-1", -1), + ("ping_timeout", "2500", 2500), + ("homogeneous", "on", True), + ("homogeneous", "off", False), + ("timeout", "3000", 3000), + ("wait_timeout", "300", 300), + ] + host = "host_4702" + service_name = "service_4702" + for name, str_value, actual_value in test_scenarios: + conn_string = f"{host}/{service_name}?pyo.{name}={str_value}" + params = oracledb.PoolParams() + if name == "min" and actual_value > params.max: + params.set(max=actual_value) + params.parse_connect_string(conn_string) + assert params.host == host + assert params.service_name == service_name + assert getattr(params, name) == actual_value diff --git a/tests/test_4800_timestamp_ltz_var.py b/tests/test_4800_timestamp_ltz_var.py index 8b9bcd13..867413e8 100644 --- a/tests/test_4800_timestamp_ltz_var.py +++ b/tests/test_4800_timestamp_ltz_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,216 +29,224 @@ import datetime import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - base_date = datetime.datetime(2022, 6, 2) - for i in range(1, 11): - if i % 4 == 0: - tz_hours = i - elif i % 2 == 0: - tz_hours = i + 0.5 - else: - tz_hours = -(i + 0.5) - tz_offset = datetime.timedelta(hours=tz_hours) - microseconds = int(str(i * 50).ljust(6, "0")) +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + base_date = datetime.datetime(2022, 6, 2) + for i in range(1, 11): + if i % 4 == 0: + tz_hours = i + elif i % 2 == 0: + tz_hours = i + 0.5 + else: + tz_hours = -(i + 0.5) + tz_offset = datetime.timedelta(hours=tz_hours) + microseconds = int(str(i * 50).ljust(6, "0")) + offset = datetime.timedelta( + days=i, seconds=i * 2, microseconds=microseconds + ) + col = base_date + tz_offset + offset + if i % 2: + tz_offset = datetime.timedelta(hours=6) + microseconds = int(str(i * 125).ljust(6, "0")) offset = datetime.timedelta( - days=i, seconds=i * 2, microseconds=microseconds + days=i + 1, seconds=i * 3, microseconds=microseconds ) - col = base_date + tz_offset + offset - if i % 2: - tz_offset = datetime.timedelta(hours=6) - microseconds = int(str(i * 125).ljust(6, "0")) - offset = datetime.timedelta( - days=i + 1, seconds=i * 3, microseconds=microseconds - ) - nullable_col = base_date + offset - else: - nullable_col = None - precision_col = datetime.datetime(2009, 12, 14) - data_tuple = (i, col, nullable_col, precision_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_4800(self): - "4800 - test binding in a timestamp" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - select * - from TestTimestampLTZs - where TimestampLTZCol = :value - """, - value=datetime.datetime(2022, 6, 6, 18, 30, 10, 250000), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_4801(self): - "4801 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - select * - from TestTimestampLTZs - where TimestampLTZCol = :value - """, - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_4802(self): - "4802 - test binding out with set input sizes defined" - bv = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - begin - :value := to_timestamp('20220603', 'YYYYMMDD'); - end; - """ - ) - self.assertEqual(bv["value"].getvalue(), datetime.datetime(2022, 6, 3)) - - def test_4803(self): - "4803 - test binding in/out with set input sizes defined" - bv = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=datetime.datetime(2022, 5, 10, 12, 0, 0), - ) - self.assertEqual( - bv["value"].getvalue(), datetime.datetime(2022, 5, 15, 18, 0, 0) - ) - - def test_4804(self): - "4804 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - begin - :value := to_date('20220601 15:38:12', 'YYYYMMDD HH24:MI:SS'); - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2022, 6, 1, 15, 38, 12) - ) - - def test_4805(self): - "4805 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP_LTZ) - var.setvalue(0, datetime.datetime(2022, 5, 30, 6, 0, 0)) - self.cursor.execute( - """ - begin - :value := :value + 5.25; - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2022, 6, 4, 12, 0, 0) - ) - - def test_4806(self): - "4806 - test cursor description is accurate" - self.cursor.execute("select * from TestTimestampLTZs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "TIMESTAMPLTZCOL", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - 23, - None, - 0, - 6, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - 23, - None, - 0, - 6, - True, - ), - ( - "TIMESTAMPLTZPRECISIONCOL", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - 23, - None, - 0, - 5, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_4807(self): - "4807 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestTimestampLTZs order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_4808(self): - "4808 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestTimestampLTZs order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_4809(self): - "4809 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestTimestampLTZs - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertEqual(self.cursor.fetchone(), None) - - def test_4810(self): - "4810 - test binding a timestamp with zero fractional seconds" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - select * - from TestTimestampLTZs - where trunc(TimestampLTZCol) = :value - """, - value=datetime.datetime(2022, 6, 12), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[10]]) - - def test_4811(self): - "4811 - test binding a timestamp with datetime.date as input" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) - self.cursor.execute( - """ - select * - from TestTimestampLTZs - where trunc(TimestampLTZCol) = :value - """, - value=datetime.date(2022, 6, 12), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[10]]) - - -if __name__ == "__main__": - test_env.run_test_cases() + nullable_col = base_date + offset + else: + nullable_col = None + precision_col = datetime.datetime(2009, 12, 14) + data_tuple = (i, col, nullable_col, precision_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def test_4800(cursor, module_data_by_key): + "4800 - test binding in a timestamp" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + select * + from TestTimestampLTZs + where TimestampLTZCol = :value + """, + value=datetime.datetime(2022, 6, 6, 18, 30, 10, 250000), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_4801(cursor): + "4801 - test binding in a null" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + select * + from TestTimestampLTZs + where TimestampLTZCol = :value + """, + value=None, + ) + assert cursor.fetchall() == [] + + +def test_4802(cursor): + "4802 - test binding out with set input sizes defined" + bv = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + begin + :value := to_timestamp('20220603', 'YYYYMMDD'); + end; + """ + ) + assert bv["value"].getvalue() == datetime.datetime(2022, 6, 3) + + +def test_4803(cursor): + "4803 - test binding in/out with set input sizes defined" + bv = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=datetime.datetime(2022, 5, 10, 12, 0, 0), + ) + assert bv["value"].getvalue() == datetime.datetime(2022, 5, 15, 18, 0, 0) + + +def test_4804(cursor): + "4804 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + begin + :value := to_date('20220601 15:38:12', 'YYYYMMDD HH24:MI:SS'); + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2022, 6, 1, 15, 38, 12) + + +def test_4805(cursor): + "4805 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP_LTZ) + var.setvalue(0, datetime.datetime(2022, 5, 30, 6, 0, 0)) + cursor.execute( + """ + begin + :value := :value + 5.25; + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2022, 6, 4, 12, 0, 0) + + +def test_4806(cursor): + "4806 - test cursor description is accurate" + cursor.execute("select * from TestTimestampLTZs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "TIMESTAMPLTZCOL", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + 23, + None, + 0, + 6, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + 23, + None, + 0, + 6, + True, + ), + ( + "TIMESTAMPLTZPRECISIONCOL", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + 23, + None, + 0, + 5, + True, + ), + ] + assert cursor.description == expected_value + + +def test_4807(cursor, module_data): + "4807 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestTimestampLTZs order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_4808(cursor, module_data): + "4808 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestTimestampLTZs order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_4809(cursor, module_data_by_key): + "4809 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestTimestampLTZs + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_4810(cursor, module_data_by_key): + "4810 - test binding a timestamp with zero fractional seconds" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + select * + from TestTimestampLTZs + where trunc(TimestampLTZCol) = :value + """, + value=datetime.datetime(2022, 6, 12), + ) + assert cursor.fetchall() == [module_data_by_key[10]] + + +def test_4811(cursor, module_data_by_key): + "4811 - test binding a timestamp with datetime.date as input" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_LTZ) + cursor.execute( + """ + select * + from TestTimestampLTZs + where trunc(TimestampLTZCol) = :value + """, + value=datetime.date(2022, 6, 12), + ) + assert cursor.fetchall() == [module_data_by_key[10]] diff --git a/tests/test_4900_timestamp_tz_var.py b/tests/test_4900_timestamp_tz_var.py index 197e67e0..b4ac7c67 100644 --- a/tests/test_4900_timestamp_tz_var.py +++ b/tests/test_4900_timestamp_tz_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -29,208 +29,216 @@ import datetime import oracledb -import test_env +import pytest -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - base_date = datetime.datetime(2022, 6, 3) - for i in range(1, 11): - microseconds = int(str(i * 50).ljust(6, "0")) +@pytest.fixture(scope="module") +def module_data(): + data = [] + base_date = datetime.datetime(2022, 6, 3) + for i in range(1, 11): + microseconds = int(str(i * 50).ljust(6, "0")) + offset = datetime.timedelta( + days=i, seconds=i * 2, microseconds=microseconds + ) + col = base_date + offset + if i % 2: + microseconds = int(str(i * 125).ljust(6, "0")) offset = datetime.timedelta( - days=i, seconds=i * 2, microseconds=microseconds + days=i + 1, seconds=i * 3, microseconds=microseconds ) - col = base_date + offset - if i % 2: - microseconds = int(str(i * 125).ljust(6, "0")) - offset = datetime.timedelta( - days=i + 1, seconds=i * 3, microseconds=microseconds - ) - nullable_col = base_date + offset - else: - nullable_col = None - precision_col = datetime.datetime(2009, 12, 14) - data_tuple = (i, col, nullable_col, precision_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_4900(self): - "4900 - test binding in a timestamp" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - select * - from TestTimestampTZs - where TimestampTZCol = :value - """, - value=datetime.datetime(2022, 6, 7, 18, 30, 10, 250000), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_4901(self): - "4901 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - select * - from TestTimestampTZs - where TimestampTZCol = :value - """, - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_4902(self): - "4902 - test binding out with set input sizes defined" - bv = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - begin - :value := to_timestamp('20220603', 'YYYYMMDD'); - end; - """ - ) - self.assertEqual(bv["value"].getvalue(), datetime.datetime(2022, 6, 3)) - - def test_4903(self): - "4903 - test binding in/out with set input sizes defined" - bv = self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - begin - :value := :value + to_dsinterval('5 06:00:00'); - end; - """, - value=datetime.datetime(2022, 5, 25), - ) - self.assertEqual( - bv["value"].getvalue(), datetime.datetime(2022, 5, 30, 6, 0, 0) - ) - - def test_4904(self): - "4904 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - begin - :value := to_date('20021231 12:31:00', 'YYYYMMDD HH24:MI:SS'); - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2002, 12, 31, 12, 31, 0) - ) - - def test_4905(self): - "4905 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_TIMESTAMP_TZ) - var.setvalue(0, datetime.datetime(2022, 6, 3, 6, 0, 0)) - self.cursor.execute( - """ - begin - :value := :value + to_dsinterval('5 06:00:00'); - end; - """, - value=var, - ) - self.assertEqual( - var.getvalue(), datetime.datetime(2022, 6, 8, 12, 0, 0) - ) - - def test_4906(self): - "4906 - test cursor description is accurate" - self.cursor.execute("select * from TestTimestampTZs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "TIMESTAMPTZCOL", - oracledb.DB_TYPE_TIMESTAMP_TZ, - 23, - None, - 0, - 6, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_TIMESTAMP_TZ, - 23, - None, - 0, - 6, - True, - ), - ( - "TIMESTAMPTZPRECISIONCOL", - oracledb.DB_TYPE_TIMESTAMP_TZ, - 23, - None, - 0, - 7, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_4907(self): - "4907 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestTimestampTZs order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_4908(self): - "4908 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestTimestampTZs order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_4909(self): - "4909 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestTimestampTZs - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertEqual(self.cursor.fetchone(), None) - - def test_4910(self): - "4910 - test binding a timestamp with zero fractional seconds" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - select * - from TestTimestampTZs - where trunc(TimestampTZCol) = :value - """, - value=datetime.datetime(2022, 6, 8), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - def test_4911(self): - "4911 - test binding a timestamp with datetime.date as input" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) - self.cursor.execute( - """ - select * - from TestTimestampTZs - where trunc(TimestampTZCol) = :value - """, - value=datetime.date(2022, 6, 8), - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[5]]) - - -if __name__ == "__main__": - test_env.run_test_cases() + nullable_col = base_date + offset + else: + nullable_col = None + precision_col = datetime.datetime(2009, 12, 14) + data_tuple = (i, col, nullable_col, precision_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def test_4900(cursor, module_data_by_key): + "4900 - test binding in a timestamp" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + select * + from TestTimestampTZs + where TimestampTZCol = :value + """, + value=datetime.datetime(2022, 6, 7, 18, 30, 10, 250000), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_4901(cursor): + "4901 - test binding in a null" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + select * + from TestTimestampTZs + where TimestampTZCol = :value + """, + value=None, + ) + assert cursor.fetchall() == [] + + +def test_4902(cursor): + "4902 - test binding out with set input sizes defined" + bv = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + begin + :value := to_timestamp('20220603', 'YYYYMMDD'); + end; + """ + ) + assert bv["value"].getvalue() == datetime.datetime(2022, 6, 3) + + +def test_4903(cursor): + "4903 - test binding in/out with set input sizes defined" + bv = cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + begin + :value := :value + to_dsinterval('5 06:00:00'); + end; + """, + value=datetime.datetime(2022, 5, 25), + ) + assert bv["value"].getvalue() == datetime.datetime(2022, 5, 30, 6, 0, 0) + + +def test_4904(cursor): + "4904 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + begin + :value := to_date('20021231 12:31:00', 'YYYYMMDD HH24:MI:SS'); + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2002, 12, 31, 12, 31, 0) + + +def test_4905(cursor): + "4905 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_TIMESTAMP_TZ) + var.setvalue(0, datetime.datetime(2022, 6, 3, 6, 0, 0)) + cursor.execute( + """ + begin + :value := :value + to_dsinterval('5 06:00:00'); + end; + """, + value=var, + ) + assert var.getvalue() == datetime.datetime(2022, 6, 8, 12, 0, 0) + + +def test_4906(cursor): + "4906 - test cursor description is accurate" + cursor.execute("select * from TestTimestampTZs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "TIMESTAMPTZCOL", + oracledb.DB_TYPE_TIMESTAMP_TZ, + 23, + None, + 0, + 6, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_TIMESTAMP_TZ, + 23, + None, + 0, + 6, + True, + ), + ( + "TIMESTAMPTZPRECISIONCOL", + oracledb.DB_TYPE_TIMESTAMP_TZ, + 23, + None, + 0, + 7, + True, + ), + ] + assert cursor.description == expected_value + + +def test_4907(cursor, module_data): + "4907 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestTimestampTZs order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_4908(cursor, module_data): + "4908 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestTimestampTZs order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_4909(cursor, module_data_by_key): + "4909 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestTimestampTZs + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_4910(cursor, module_data_by_key): + "4910 - test binding a timestamp with zero fractional seconds" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + select * + from TestTimestampTZs + where trunc(TimestampTZCol) = :value + """, + value=datetime.datetime(2022, 6, 8), + ) + assert cursor.fetchall() == [module_data_by_key[5]] + + +def test_4911(cursor, module_data_by_key): + "4911 - test binding a timestamp with datetime.date as input" + cursor.setinputsizes(value=oracledb.DB_TYPE_TIMESTAMP_TZ) + cursor.execute( + """ + select * + from TestTimestampTZs + where trunc(TimestampTZCol) = :value + """, + value=datetime.date(2022, 6, 8), + ) + assert cursor.fetchall() == [module_data_by_key[5]] diff --git a/tests/test_5000_externalauth.py b/tests/test_5000_externalauth.py index 3ce1e7ea..5f05bfd9 100644 --- a/tests/test_5000_externalauth.py +++ b/tests/test_5000_externalauth.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2022, 2024, Oracle and/or its affiliates. +# Copyright (c) 2022, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -26,202 +26,58 @@ 5000 - Module for testing external authentication """ -import unittest - import oracledb -import test_env - - -@unittest.skipIf( - not test_env.get_external_user(), - "external authentication not supported with this setup", -) -class TestCase(test_env.BaseTestCase): - require_connection = False - - def __verify_connection( - self, connection, expected_user, expected_proxy_user=None - ): - with connection.cursor() as cursor: - cursor.execute( - """ - select - sys_context('userenv', 'session_user'), - sys_context('userenv', 'proxy_user') - from dual - """ - ) - actual_user, actual_proxy_user = cursor.fetchone() - self.assertEqual(actual_user, expected_user.upper()) - self.assertEqual( - actual_proxy_user, - expected_proxy_user and expected_proxy_user.upper(), - ) - - def test_5000(self): - """ - 5000 - test error on creating a pool with user and password specified - and externalauth enabled - """ - with self.assertRaisesFullCode("DPI-1032"): - test_env.get_pool( - min=1, - max=2, - increment=1, - getmode=oracledb.POOL_GETMODE_WAIT, - externalauth=True, - homogeneous=False, - ) - - def test_5001(self): - """ - 5001 - test error on creating a pool without password and with user - specified and externalauth enabled - """ - with self.assertRaisesFullCode("DPI-1032"): - oracledb.create_pool( - user=test_env.get_main_user(), - min=1, - max=2, - increment=1, - getmode=oracledb.POOL_GETMODE_WAIT, - externalauth=True, - homogeneous=False, - ) - - def test_5002(self): - """ - 5002 - test error on creating a pool without user and with password - specified and externalauth enabled - """ - with self.assertRaisesFullCode("DPI-1032"): - oracledb.create_pool( - password=test_env.get_main_password(), - min=1, - max=2, - increment=1, - getmode=oracledb.POOL_GETMODE_WAIT, - externalauth=True, - homogeneous=False, - ) - - def test_5003(self): - """ - 5003 - test creating a pool with user and password specified and - externalauth set to False - """ - pool = test_env.get_pool( +import pytest + + +@pytest.fixture(autouse=True) +def skip_if_no_external_auth(test_env): + if not test_env.external_user: + pytest.skip("external authentication not configured") + + +def _verify_connection(conn, expected_user, expected_proxy_user=None): + with conn.cursor() as cursor: + cursor.execute( + """ + select + sys_context('userenv', 'session_user'), + sys_context('userenv', 'proxy_user') + from dual + """ + ) + actual_user, actual_proxy_user = cursor.fetchone() + assert actual_user == expected_user.upper() + assert ( + actual_proxy_user == expected_proxy_user + and expected_proxy_user.upper() + ) + + +def test_5000(test_env): + """ + 5000 - test error on creating a pool with user and password specified + and externalauth enabled + """ + with test_env.assert_raises_full_code("DPI-1032"): + test_env.get_pool( min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT, - externalauth=False, + externalauth=True, homogeneous=False, ) - with pool.acquire() as conn: - self.__verify_connection(conn, test_env.get_main_user()) - - def test_5004(self): - """ - 5004 - test error when connecting with user and password specified - and externalauth enabled - """ - with self.assertRaisesFullCode("DPI-1032"): - oracledb.connect( - user=test_env.get_main_user(), - password=test_env.get_main_password(), - dsn=test_env.get_connect_string(), - externalauth=True, - ) - - def test_5005(self): - """ - 5005 - test error when connecting without username and with password - specified and externalauth enabled - """ - with self.assertRaisesFullCode("DPI-1032"): - oracledb.connect( - password=test_env.get_main_password(), - dsn=test_env.get_connect_string(), - externalauth=True, - ) - - # by default externalauth is False - with self.assertRaisesFullCode("ORA-01017"): - oracledb.connect( - password=test_env.get_main_password(), - dsn=test_env.get_connect_string(), - ) - - def test_5006(self): - """ - 5006 - test error when connecting without password and with user - specified and externalauth enabled - """ - with self.assertRaisesFullCode("ORA-01017"): - oracledb.connect( - user="[invalid_user]", - dsn=test_env.get_connect_string(), - externalauth=True, - ) - - # by default externalauth is False - with self.assertRaisesFullCode("ORA-01017"): - oracledb.connect( - user="[invalid_user]", dsn=test_env.get_connect_string() - ) - - def test_5007(self): - "5007 - test external authentication with invalid proxy user" - with self.assertRaisesFullCode("DPI-1069"): - oracledb.connect( - user=test_env.get_main_user(), - dsn=test_env.get_connect_string(), - externalauth=True, - ) - - # by default externalauth is False - with self.assertRaisesFullCode("DPY-4001"): - oracledb.connect( - user=test_env.get_main_user(), - dsn=test_env.get_connect_string(), - ) - - def test_5008(self): - """ - 5008 - test creating a connection with user and password specified and - externalauth set to False - """ - conn = oracledb.connect( - user=test_env.get_main_user(), - password=test_env.get_main_password(), - dsn=test_env.get_connect_string(), - externalauth=False, - ) - self.__verify_connection(conn, test_env.get_main_user()) - - def test_5009(self): - """ - 5009 - test creating standalone connection with externalauth set to - True explicitly - """ - conn = oracledb.connect( - dsn=test_env.get_connect_string(), externalauth=True - ) - self.__verify_connection(conn, test_env.get_external_user()) - - def test_5010(self): - """ - 5010 - test creating standalone connection with no user and password - specified and externalauth not set - """ - conn = oracledb.connect(dsn=test_env.get_connect_string()) - self.__verify_connection(conn, test_env.get_external_user()) - - def test_5011(self): - "5011 - test creating a pool with external authentication" - pool = oracledb.create_pool( - dsn=test_env.get_connect_string(), + + +def test_5001(test_env): + """ + 5001 - test error on creating a pool without password and with user + specified and externalauth enabled + """ + with test_env.assert_raises_full_code("DPI-1032"): + oracledb.create_pool( + user=test_env.main_user, min=1, max=2, increment=1, @@ -229,91 +85,237 @@ def test_5011(self): externalauth=True, homogeneous=False, ) - self.assertEqual(pool.opened, 0) - with pool.acquire() as conn: - self.__verify_connection(conn, test_env.get_external_user()) - - def test_5012( - self, - ): - """ - 5012 - test creating a pool without user and password specified and - externalauth not set - """ - pool = oracledb.create_pool( - dsn=test_env.get_connect_string(), + + +def test_5002(test_env): + """ + 5002 - test error on creating a pool without user and with password + specified and externalauth enabled + """ + with test_env.assert_raises_full_code("DPI-1032"): + oracledb.create_pool( + password=test_env.main_password, min=1, max=2, increment=1, getmode=oracledb.POOL_GETMODE_WAIT, - homogeneous=False, - ) - with self.assertRaisesFullCode("ORA-24415"): - pool.acquire() - - def test_5013(self): - "5013 - test pool min is always 0 under external authentication" - pool = oracledb.create_pool( - dsn=test_env.get_connect_string(), - min=5, - max=10, - increment=3, - getmode=oracledb.POOL_GETMODE_WAIT, externalauth=True, homogeneous=False, ) - self.assertEqual(pool.opened, 0) - - def test_5014(self): - "5014 - test pool increment is always 1 under external authentication" - pool = oracledb.create_pool( - dsn=test_env.get_connect_string(), - min=5, - max=10, - increment=3, - getmode=oracledb.POOL_GETMODE_WAIT, + + +def test_5003(test_env): + """ + 5003 - test creating a pool with user and password specified and + externalauth set to False + """ + pool = test_env.get_pool( + min=1, + max=2, + increment=1, + getmode=oracledb.POOL_GETMODE_WAIT, + externalauth=False, + homogeneous=False, + ) + with pool.acquire() as conn: + _verify_connection(conn, test_env.main_user) + + +def test_5004(test_env): + """ + 5004 - test error when connecting with user and password specified + and externalauth enabled + """ + with test_env.assert_raises_full_code("DPI-1032"): + oracledb.connect( + user=test_env.main_user, + password=test_env.main_password, + dsn=test_env.connect_string, externalauth=True, - homogeneous=False, ) - conn1 = pool.acquire() - self.assertEqual(pool.opened, 1) - conn2 = pool.acquire() - self.assertEqual(pool.opened, 2) - conn1.close() - conn2.close() - - def test_5015(self): - "5015 - test external authentication with proxy" - proxy_user = test_env.get_external_user() # proxy user - schema_user = test_env.get_main_user() # schema user - conn1 = oracledb.connect( - user=f"[{schema_user}]", - dsn=test_env.get_connect_string(), + + +def test_5005(test_env): + """ + 5005 - test error when connecting without username and with password + specified and externalauth enabled + """ + with test_env.assert_raises_full_code("DPI-1032"): + oracledb.connect( + password=test_env.main_password, + dsn=test_env.connect_string, externalauth=True, ) - self.__verify_connection(conn1, schema_user, proxy_user) - conn2 = oracledb.connect( - user=f"[{schema_user}]", dsn=test_env.get_connect_string() + + # by default externalauth is False + with test_env.assert_raises_full_code("ORA-01017"): + oracledb.connect( + password=test_env.main_password, + dsn=test_env.connect_string, ) - self.__verify_connection(conn2, schema_user, proxy_user) - def test_5016(self): - "5016 - test creating pool using external authentication with proxy" - proxy_user = test_env.get_external_user() - schema_user = test_env.get_main_user() - pool = oracledb.create_pool( + +def test_5006(test_env): + """ + 5006 - test error when connecting without password and with user + specified and externalauth enabled + """ + with test_env.assert_raises_full_code("ORA-01017"): + oracledb.connect( + user="[invalid_user]", + dsn=test_env.connect_string, + externalauth=True, + ) + + # by default externalauth is False + with test_env.assert_raises_full_code("ORA-01017"): + oracledb.connect(user="[invalid_user]", dsn=test_env.connect_string) + + +def test_5007(test_env): + "5007 - test external authentication with invalid proxy user" + with test_env.assert_raises_full_code("DPI-1069"): + oracledb.connect( + user=test_env.main_user, + dsn=test_env.connect_string, externalauth=True, - homogeneous=False, - dsn=test_env.get_connect_string(), - min=1, - max=2, - increment=1, - getmode=oracledb.POOL_GETMODE_WAIT, ) - self.assertEqual(pool.opened, 0) - conn = pool.acquire(user=f"[{schema_user}]") - self.__verify_connection(conn, schema_user, proxy_user) + + # by default externalauth is False + with test_env.assert_raises_full_code("DPY-4001"): + oracledb.connect( + user=test_env.main_user, + dsn=test_env.connect_string, + ) + + +def test_5008(test_env): + """ + 5008 - test creating a connection with user and password specified and + externalauth set to False + """ + conn = oracledb.connect( + user=test_env.main_user, + password=test_env.main_password, + dsn=test_env.connect_string, + externalauth=False, + ) + _verify_connection(conn, test_env.main_user) + + +def test_5009(test_env): + """ + 5009 - test creating standalone connection with externalauth set to + True explicitly + """ + conn = oracledb.connect(dsn=test_env.connect_string, externalauth=True) + _verify_connection(conn, test_env.external_user) + + +def test_5010(test_env): + """ + 5010 - test creating standalone connection with no user and password + specified and externalauth not set + """ + conn = oracledb.connect(dsn=test_env.connect_string) + _verify_connection(conn, test_env.external_user) + + +def test_5011(test_env): + "5011 - test creating a pool with external authentication" + pool = oracledb.create_pool( + dsn=test_env.connect_string, + min=1, + max=2, + increment=1, + getmode=oracledb.POOL_GETMODE_WAIT, + externalauth=True, + homogeneous=False, + ) + assert pool.opened == 0 + with pool.acquire() as conn: + _verify_connection(conn, test_env.external_user) + + +def test_5012(test_env): + """ + 5012 - test creating a pool without user and password specified and + externalauth not set + """ + pool = oracledb.create_pool( + dsn=test_env.connect_string, + min=1, + max=2, + increment=1, + getmode=oracledb.POOL_GETMODE_WAIT, + homogeneous=False, + ) + with test_env.assert_raises_full_code("ORA-24415"): + pool.acquire() + + +def test_5013(test_env): + "5013 - test pool min is always 0 under external authentication" + pool = oracledb.create_pool( + dsn=test_env.connect_string, + min=5, + max=10, + increment=3, + getmode=oracledb.POOL_GETMODE_WAIT, + externalauth=True, + homogeneous=False, + ) + assert pool.opened == 0 + + +def test_5014(test_env): + "5014 - test pool increment is always 1 under external authentication" + pool = oracledb.create_pool( + dsn=test_env.connect_string, + min=5, + max=10, + increment=3, + getmode=oracledb.POOL_GETMODE_WAIT, + externalauth=True, + homogeneous=False, + ) + conn1 = pool.acquire() + assert pool.opened == 1 + conn2 = pool.acquire() + assert pool.opened == 2 + conn1.close() + conn2.close() + + +def test_5015(test_env): + "5015 - test external authentication with proxy" + proxy_user = test_env.external_user + schema_user = test_env.main_user + conn1 = oracledb.connect( + user=f"[{schema_user}]", + dsn=test_env.connect_string, + externalauth=True, + ) + _verify_connection(conn1, schema_user, proxy_user) + conn2 = oracledb.connect( + user=f"[{schema_user}]", dsn=test_env.connect_string + ) + _verify_connection(conn2, schema_user, proxy_user) -if __name__ == "__main__": - test_env.run_test_cases() +def test_5016(test_env): + "5016 - test creating pool using external authentication with proxy" + proxy_user = test_env.external_user + schema_user = test_env.main_user + pool = oracledb.create_pool( + externalauth=True, + homogeneous=False, + dsn=test_env.connect_string, + min=1, + max=2, + increment=1, + getmode=oracledb.POOL_GETMODE_WAIT, + ) + assert pool.opened == 0 + conn = pool.acquire(user=f"[{schema_user}]") + _verify_connection(conn, schema_user, proxy_user) diff --git a/tests/test_5100_arrayvar.py b/tests/test_5100_arrayvar.py index 8b0d012a..665e1fe4 100644 --- a/tests/test_5100_arrayvar.py +++ b/tests/test_5100_arrayvar.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,69 +27,68 @@ """ import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_5100(self): - "5100 - checking the attributes of an array variable" - var = self.cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 1000) - self.assertEqual(var.size, 0) - self.assertEqual(var.values, []) - self.assertEqual(var.num_elements, 1000) - self.assertEqual(var.actual_elements, 0) - - var = self.cursor.arrayvar(oracledb.DB_TYPE_NUMBER, [1, 2]) - self.assertEqual(var.values, [1, 2]) - self.assertEqual(var.num_elements, 2) - self.assertEqual(var.actual_elements, 2) - - def test_5101(self): - "5101 - setting values in an array variable" - var = self.cursor.arrayvar(oracledb.DB_TYPE_VARCHAR, 10, 2000) - self.assertEqual(var.values, []) - self.assertEqual(var.actual_elements, 0) - data = [str(i) for i in range(5)] - var.setvalue(0, data) - self.assertEqual(var.values, data) - self.assertEqual(var.actual_elements, len(data)) - - def test_5102(self): - "5102 - checking the default size of VARCHAR and RAW types" - types = [oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_RAW] - for typ in types: - var = self.cursor.arrayvar(typ, ["ab"]) - self.assertEqual(var.size, 4000) - - def test_5103(self): - "5103 - creating array variables with invalid parameters" - self.assertRaises( - TypeError, self.cursor.arrayvar, oracledb.DB_TYPE_NUMBER, "10", 40 - ) - self.assertRaises( - TypeError, self.cursor.arrayvar, oracledb.DB_TYPE_NUMBER, 10, "40" - ) - self.assertRaises( - TypeError, self.cursor.arrayvar, oracledb.DB_TYPE_NUMBER, 10, [] - ) - - def test_5104(self): - "5104 - declaring an array variable with an incorrect Python type" - with self.assertRaisesFullCode("DPY-3013"): - self.cursor.arrayvar(oracledb.DB_TYPE_NUMBER, [3, "ab"]) - - def test_5105(self): - "5105 - adding more elements than declared to an array variable" - var = self.cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 4) - with self.assertRaisesFullCode("DPY-2016"): - var.setvalue(0, [i for i in range(5)]) - - def test_5106(self): - "5106 - creating an invalid array of arrays" - var = self.cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 4) - with self.assertRaisesFullCode("DPY-3005"): - var.setvalue(1, [1, 2]) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +def test_5100(cursor): + "5100 - checking the attributes of an array variable" + var = cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 1000) + assert var.size == 0 + assert var.values == [] + assert var.num_elements == 1000 + assert var.actual_elements == 0 + + var = cursor.arrayvar(oracledb.DB_TYPE_NUMBER, [1, 2]) + assert var.values == [1, 2] + assert var.num_elements == 2 + assert var.actual_elements == 2 + + +def test_5101(cursor): + "5101 - setting values in an array variable" + var = cursor.arrayvar(oracledb.DB_TYPE_VARCHAR, 10, 2000) + assert var.values == [] + assert var.actual_elements == 0 + data = [str(i) for i in range(5)] + var.setvalue(0, data) + assert var.values == data + assert var.actual_elements == len(data) + + +def test_5102(cursor): + "5102 - checking the default size of VARCHAR and RAW types" + types = [oracledb.DB_TYPE_VARCHAR, oracledb.DB_TYPE_RAW] + for typ in types: + var = cursor.arrayvar(typ, ["ab"]) + assert var.size == 4000 + + +def test_5103(cursor): + "5103 - creating array variables with invalid parameters" + pytest.raises( + TypeError, cursor.arrayvar, oracledb.DB_TYPE_NUMBER, "10", 40 + ) + pytest.raises( + TypeError, cursor.arrayvar, oracledb.DB_TYPE_NUMBER, 10, "40" + ) + pytest.raises(TypeError, cursor.arrayvar, oracledb.DB_TYPE_NUMBER, 10, []) + + +def test_5104(cursor, test_env): + "5104 - declaring an array variable with an incorrect Python type" + with test_env.assert_raises_full_code("DPY-3013"): + cursor.arrayvar(oracledb.DB_TYPE_NUMBER, [3, "ab"]) + + +def test_5105(cursor, test_env): + "5105 - adding more elements than declared to an array variable" + var = cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 4) + with test_env.assert_raises_full_code("DPY-2016"): + var.setvalue(0, [i for i in range(5)]) + + +def test_5106(cursor, test_env): + "5106 - creating an invalid array of arrays" + var = cursor.arrayvar(oracledb.DB_TYPE_NUMBER, 4) + with test_env.assert_raises_full_code("DPY-3005"): + var.setvalue(1, [1, 2]) diff --git a/tests/test_5200_sql_parser.py b/tests/test_5200_sql_parser.py index 39c00b32..0628f8e4 100644 --- a/tests/test_5200_sql_parser.py +++ b/tests/test_5200_sql_parser.py @@ -26,211 +26,220 @@ 5200 - Module for testing the SQL parser. """ -import unittest - -import test_env - - -class TestCase(test_env.BaseTestCase): - def test_5200(self): - "5200 - single line comment" - self.cursor.prepare( - "--begin :value2 := :a + :b + :c +:a +3; end;\n" - "begin :value2 := :a + :c +3; end; -- not a :bind_variable" - ) - self.assertEqual(self.cursor.bindnames(), ["VALUE2", "A", "C"]) - - def test_5201(self): - "5201 - multiple line comment" - self.cursor.prepare( - "/*--select * from :a where :a = 1\n" - "select * from table_names where :a = 1*/\n" - "select :table_name, :value from dual" - ) - self.assertEqual(self.cursor.bindnames(), ["TABLE_NAME", "VALUE"]) - - def test_5202(self): - "5202 - constant strings" - statement = """ - begin - :value := to_date('20021231 12:31:00', :format); - end;""" - self.cursor.prepare(statement) - self.assertEqual(self.cursor.bindnames(), ["VALUE", "FORMAT"]) - - def test_5203(self): - "5203 - multiple division operators" - self.cursor.prepare( - """ - select :a / :b, :c / :d - from dual - """ - ) - self.assertEqual(self.cursor.bindnames(), ["A", "B", "C", "D"]) - - def test_5204(self): - "5204 - starting with parentheses" - sql = "(select :a from dual) union (select :b from dual)" - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["A", "B"]) - - def test_5205(self): - "5205 - invalid quoted bind" - sql = 'select ":test", :a from dual' - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["A"]) - - def test_5206(self): - "5206 - non-ascii character in the bind name" - sql = "select :méil$ from dual" - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["MÉIL$"]) - - def test_5207(self): - "5207 - various quoted bind names" - tests = [ - ('select :"percent%" from dual', ["percent%"]), - ('select : "q?marks" from dual', ["q?marks"]), - ('select :"percent%(ens)yah" from dual', ["percent%(ens)yah"]), - ('select : "per % cent" from dual', ["per % cent"]), - ('select :"per cent" from dual', ["per cent"]), - ('select :"par(ens)" from dual', ["par(ens)"]), - ('select :"more/slashes" from dual', ["more/slashes"]), - ('select :"%percent" from dual', ["%percent"]), - ('select :"/slashes/" from dual', ["/slashes/"]), - ('select :"1col:on" from dual', ["1col:on"]), - ('select :"col:ons" from dual', ["col:ons"]), - ('select :"more :: %colons%" from dual', ["more :: %colons%"]), - ('select :"more/slashes" from dual', ["more/slashes"]), - ('select :"spaces % spaces" from dual', ["spaces % spaces"]), - ('select "col:nns", :"col:ons", :id from dual', ["col:ons", "ID"]), - ] - for sql, expected in tests: - with self.subTest(sql=sql, expected=expected): - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), expected) - - def test_5208(self): - "5208 - sql containing quoted identifiers and strings" - sql = 'select "/*_value1" + : "VaLue_2" + :"*/3VALUE" from dual' - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["VaLue_2", "*/3VALUE"]) - - def test_5209(self): - "5209 - statement containing simple strings" - sql = """select '"string_1"', :bind_1, ':string_2' from dual""" - self.cursor.prepare(sql) - self.assertEqual(self.cursor.bindnames(), ["BIND_1"]) - - def test_5210(self): - "5210 - bind variables between comment blocks" - self.cursor.prepare( - """ - select - /* comment 1 with /* */ - :a, - /* comment 2 with another /* */ - :b - /* comment 3 * * * / */, - :c - from dual - """ - ) - self.assertEqual(self.cursor.bindnames(), ["A", "B", "C"]) - - def test_5211(self): - "5211 - bind variables between q-strings" - self.cursor.prepare( - """ - select - :a, - q'{This contains ' and " and : just fine}', - :b, - q'[This contains ' and " and : just fine]', - :c, - q'', - :d, - q'(This contains ' and " and : just fine)', - :e, - q'$This contains ' and " and : just fine$', - :f - from dual - """ - ) - self.assertEqual( - self.cursor.bindnames(), ["A", "B", "C", "D", "E", "F"] - ) - - @unittest.skipUnless(test_env.has_client_version(19), "unsupported client") - def test_5212(self): - "5212 - bind variables between JSON constants" - self.cursor.prepare( - """ - select - json_object('foo':dummy), - :bv1, - json_object('foo'::bv2), - :bv3, - json { 'key1': 57, 'key2' : 58 }, - :bv4 - from dual - """ - ) - self.assertEqual(self.cursor.bindnames(), ["BV1", "BV2", "BV3", "BV4"]) - - def test_5213(self): - "5213 - multiple line comment with multiple asterisks" - self.cursor.prepare( - "/****--select * from :a where :a = 1\n" - "select * from table_names where :a = 1****/\n" - "select :table_name, :value from dual" - ) - self.assertEqual(self.cursor.bindnames(), ["TABLE_NAME", "VALUE"]) - - def test_5214(self): - "5214 - qstring without a closing quote" - with self.assertRaisesFullCode("DPY-2041"): - self.cursor.prepare("select q'[something from dual") - - def test_5215_different_space_combinations(self): - "5215 - different space combinations with :=" - self.cursor.prepare( - """ - begin :value2 := - :a + :b + :c +:a +3; end; - begin :value2 - := - :a + :c +3; end; - """ - ) - self.assertEqual(self.cursor.bindnames(), ["VALUE2", "A", "B", "C"]) - - def test_5216_binds_between_comment_blocks_with_quotes(self): - "5216 - bind variables between multiple comment blocks with quotes" - self.cursor.prepare( - """ - select - /* ' comment 1 */ - :a, - /* "comment " 2 ' */:b - /* comment 3 '*/, - :c - /* comment 4 ""*/ - from dual - """ - ) - self.assertEqual(self.cursor.bindnames(), ["A", "B", "C"]) - - def test_5217_binds_missing_quote_exception(self): - "5217 - query with a missing end quote" - with self.assertRaisesFullCode("DPY-2041"): - self.cursor.prepare("select 'abc, :a from dual") - - def test_5218_binds_missing_quote_exception(self): - "5218 - q-string with wrong closing symbols" - with self.assertRaisesFullCode("DPY-2041"): - self.cursor.prepare("select q'[abc'], 5 from dual") - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +def test_5200(cursor): + "5200 - single line comment" + cursor.prepare( + "--begin :value2 := :a + :b + :c +:a +3; end;\n" + "begin :value2 := :a + :c +3; end; -- not a :bind_variable" + ) + assert cursor.bindnames() == ["VALUE2", "A", "C"] + + +def test_5201(cursor): + "5201 - multiple line comment" + cursor.prepare( + "/*--select * from :a where :a = 1\n" + "select * from table_names where :a = 1*/\n" + "select :table_name, :value from dual" + ) + assert cursor.bindnames() == ["TABLE_NAME", "VALUE"] + + +def test_5202(cursor): + "5202 - constant strings" + statement = """ + begin + :value := to_date('20021231 12:31:00', :format); + end;""" + cursor.prepare(statement) + assert cursor.bindnames() == ["VALUE", "FORMAT"] + + +def test_5203(cursor): + "5203 - multiple division operators" + cursor.prepare( + """ + select :a / :b, :c / :d + from dual + """ + ) + assert cursor.bindnames() == ["A", "B", "C", "D"] + + +def test_5204(cursor): + "5204 - starting with parentheses" + sql = "(select :a from dual) union (select :b from dual)" + cursor.prepare(sql) + assert cursor.bindnames() == ["A", "B"] + + +def test_5205(cursor): + "5205 - invalid quoted bind" + sql = 'select ":test", :a from dual' + cursor.prepare(sql) + assert cursor.bindnames() == ["A"] + + +def test_5206(cursor): + "5206 - non-ascii character in the bind name" + sql = "select :méil$ from dual" + cursor.prepare(sql) + assert cursor.bindnames() == ["MÉIL$"] + + +def test_5207(cursor): + "5207 - various quoted bind names" + tests = [ + ('select :"percent%" from dual', ["percent%"]), + ('select : "q?marks" from dual', ["q?marks"]), + ('select :"percent%(ens)yah" from dual', ["percent%(ens)yah"]), + ('select : "per % cent" from dual', ["per % cent"]), + ('select :"per cent" from dual', ["per cent"]), + ('select :"par(ens)" from dual', ["par(ens)"]), + ('select :"more/slashes" from dual', ["more/slashes"]), + ('select :"%percent" from dual', ["%percent"]), + ('select :"/slashes/" from dual', ["/slashes/"]), + ('select :"1col:on" from dual', ["1col:on"]), + ('select :"col:ons" from dual', ["col:ons"]), + ('select :"more :: %colons%" from dual', ["more :: %colons%"]), + ('select :"more/slashes" from dual', ["more/slashes"]), + ('select :"spaces % spaces" from dual', ["spaces % spaces"]), + ('select "col:nns", :"col:ons", :id from dual', ["col:ons", "ID"]), + ] + for sql, expected in tests: + cursor.prepare(sql) + assert cursor.bindnames() == expected + + +def test_5208(cursor): + "5208 - sql containing quoted identifiers and strings" + sql = 'select "/*_value1" + : "VaLue_2" + :"*/3VALUE" from dual' + cursor.prepare(sql) + assert cursor.bindnames() == ["VaLue_2", "*/3VALUE"] + + +def test_5209(cursor): + "5209 - statement containing simple strings" + sql = """select '"string_1"', :bind_1, ':string_2' from dual""" + cursor.prepare(sql) + assert cursor.bindnames() == ["BIND_1"] + + +def test_5210(cursor): + "5210 - bind variables between comment blocks" + cursor.prepare( + """ + select + /* comment 1 with /* */ + :a, + /* comment 2 with another /* */ + :b + /* comment 3 * * * / */, + :c + from dual + """ + ) + assert cursor.bindnames() == ["A", "B", "C"] + + +def test_5211(cursor): + "5211 - bind variables between q-strings" + cursor.prepare( + """ + select + :a, + q'{This contains ' and " and : just fine}', + :b, + q'[This contains ' and " and : just fine]', + :c, + q'', + :d, + q'(This contains ' and " and : just fine)', + :e, + q'$This contains ' and " and : just fine$', + :f + from dual + """ + ) + assert cursor.bindnames() == ["A", "B", "C", "D", "E", "F"] + + +def test_5212(cursor, test_env): + "5212 - bind variables between JSON constants" + if not test_env.has_client_version(19): + pytest.skip("unsupported client") + cursor.prepare( + """ + select + json_object('foo':dummy), + :bv1, + json_object('foo'::bv2), + :bv3, + json { 'key1': 57, 'key2' : 58 }, + :bv4 + from dual + """ + ) + assert cursor.bindnames() == ["BV1", "BV2", "BV3", "BV4"] + + +def test_5213(cursor): + "5213 - multiple line comment with multiple asterisks" + cursor.prepare( + "/****--select * from :a where :a = 1\n" + "select * from table_names where :a = 1****/\n" + "select :table_name, :value from dual" + ) + assert cursor.bindnames() == ["TABLE_NAME", "VALUE"] + + +def test_5214(cursor, test_env): + "5214 - qstring without a closing quote" + with test_env.assert_raises_full_code("DPY-2041"): + cursor.prepare("select q'[something from dual") + + +def test_5215(cursor): + "5215 - different space combinations with :=" + cursor.prepare( + """ + begin :value2 := + :a + :b + :c +:a +3; end; + begin :value2 + := + :a + :c +3; end; + """ + ) + assert cursor.bindnames() == ["VALUE2", "A", "B", "C"] + + +def test_5216(cursor): + "5216 - bind variables between multiple comment blocks with quotes" + cursor.prepare( + """ + select + /* ' comment 1 */ + :a, + /* "comment " 2 ' */:b + /* comment 3 '*/, + :c + /* comment 4 ""*/ + from dual + """ + ) + assert cursor.bindnames() == ["A", "B", "C"] + + +def test_5217(cursor, test_env): + "5217 - query with a missing end quote" + with test_env.assert_raises_full_code("DPY-2041"): + cursor.prepare("select 'abc, :a from dual") + + +def test_5218(cursor, test_env): + "5218 - q-string with wrong closing symbols" + with test_env.assert_raises_full_code("DPY-2041"): + cursor.prepare("select q'[abc'], 5 from dual") diff --git a/tests/test_5300_connection_async.py b/tests/test_5300_connection_async.py index d781b31d..0ea2ab89 100644 --- a/tests/test_5300_connection_async.py +++ b/tests/test_5300_connection_async.py @@ -27,712 +27,670 @@ """ import asyncio -import random -import string import oracledb -import test_env +import pytest -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - requires_connection = False +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass - async def __connect_and_drop(self): - """ - Connect to the database, perform a query and drop the connection. - """ - await asyncio.sleep(0.1) - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute("select count(*) from TestNumbers") - (count,) = await cursor.fetchone() - self.assertEqual(count, 10) - - async def __verify_fetched_data(self, connection): - expected_data = [f"String {i + 1}" for i in range(10)] - sql = "select StringCol from TestStrings order by IntCol" - for i in range(5): - with connection.cursor() as cursor: - await cursor.execute(sql) - fetched_data = [s async for s, in cursor] - self.assertEqual(fetched_data, expected_data) - - async def __verify_attributes(self, connection, attr_name, value, sql): - setattr(connection, attr_name, value) - cursor = connection.cursor() - await cursor.execute(sql) - (result,) = await cursor.fetchone() - self.assertEqual(result, value, f"{attr_name} value mismatch") - async def __verify_connect_arg(self, arg_name, arg_value, sql): - args = {} - args[arg_name] = arg_value - conn = await test_env.get_connection_async(**args) +async def _connect_and_drop(test_env): + """ + Connect to the database, perform a query and drop the connection. + """ + await asyncio.sleep(0.1) + async with test_env.get_connection_async() as conn: cursor = conn.cursor() - await cursor.execute(sql) - (fetched_value,) = await cursor.fetchone() - self.assertEqual(fetched_value, arg_value) - - async def test_5300(self): - "5300 - simple connection to database" - async with test_env.get_connection_async() as conn: - self.assertEqual( - conn.username, test_env.get_main_user(), "user name differs" - ) - self.assertEqual( - conn.dsn, test_env.get_connect_string(), "dsn differs" - ) - self.assertTrue(conn.thin) - - async def test_5303(self): - "5303 - test connection end-to-end tracing attributes" - async with test_env.get_connection_async() as conn: - if not await self.is_on_oracle_cloud(conn): - sql = """select dbop_name from v$sql_monitor - where sid = sys_context('userenv', 'sid') - and status = 'EXECUTING'""" - await self.__verify_attributes( - conn, "dbop", "oracledb_dbop", sql - ) - sql = "select sys_context('userenv', 'action') from dual" - await self.__verify_attributes( - conn, "action", "oracledb_Action", sql - ) - await self.__verify_attributes(conn, "action", None, sql) - sql = "select sys_context('userenv', 'module') from dual" - await self.__verify_attributes( - conn, "module", "oracledb_Module", sql - ) - await self.__verify_attributes(conn, "module", None, sql) - sql = "select sys_context('userenv', 'client_info') from dual" - await self.__verify_attributes( - conn, "clientinfo", "oracledb_cinfo", sql - ) - await self.__verify_attributes(conn, "clientinfo", None, sql) - sql = ( - "select sys_context('userenv', 'client_identifier') from dual" - ) - await self.__verify_attributes( - conn, "client_identifier", "oracledb_cid", sql - ) - await self.__verify_attributes( - conn, "client_identifier", None, sql - ) - - async def test_5304(self): - "5304 - test use of autocommit" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - other_conn = await test_env.get_connection_async() - other_cursor = other_conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - await other_cursor.execute("select IntCol from TestTempTable") - self.assertEqual(await other_cursor.fetchall(), []) - conn.autocommit = True - await cursor.execute( - "insert into TestTempTable (IntCol) values (2)" - ) - await other_cursor.execute( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(await other_cursor.fetchall(), [(1,), (2,)]) - - async def test_5305(self): - "5305 - connection to database with bad connect string" - with self.assertRaisesFullCode( - "DPY-4000", "DPY-4026", "DPY-4027", "ORA-12154" + await cursor.execute("select count(*) from TestNumbers") + (count,) = await cursor.fetchone() + assert count == 10 + + +async def _verify_fetched_data(conn): + expected_data = [f"String {i + 1}" for i in range(10)] + sql = "select StringCol from TestStrings order by IntCol" + for i in range(5): + with conn.cursor() as cursor: + await cursor.execute(sql) + fetched_data = [s async for s, in cursor] + assert fetched_data == expected_data + + +async def _verify_attributes(conn, attr_name, value, sql): + setattr(conn, attr_name, value) + cursor = conn.cursor() + await cursor.execute(sql) + (result,) = await cursor.fetchone() + assert result == value, f"{attr_name} value mismatch" + + +async def _verify_connect_arg(test_env, arg_name, arg_value, sql): + args = {} + args[arg_name] = arg_value + conn = await test_env.get_connection_async(**args) + cursor = conn.cursor() + await cursor.execute(sql) + (fetched_value,) = await cursor.fetchone() + assert fetched_value == arg_value + + +async def test_5300(test_env): + "5300 - simple connection to database" + async with test_env.get_connection_async() as conn: + assert conn.username == test_env.main_user, "user name differs" + assert conn.dsn == test_env.connect_string, "dsn differs" + assert conn.thin + + +async def test_5303(test_env): + "5303 - test connection end-to-end tracing attributes" + async with test_env.get_connection_async() as conn: + if not test_env.is_on_oracle_cloud: + sql = """select dbop_name from v$sql_monitor + where sid = sys_context('userenv', 'sid') + and status = 'EXECUTING'""" + await _verify_attributes(conn, "dbop", "oracledb_dbop", sql) + sql = "select sys_context('userenv', 'action') from dual" + await _verify_attributes(conn, "action", "oracledb_Action", sql) + await _verify_attributes(conn, "action", None, sql) + sql = "select sys_context('userenv', 'module') from dual" + await _verify_attributes(conn, "module", "oracledb_Module", sql) + await _verify_attributes(conn, "module", None, sql) + sql = "select sys_context('userenv', 'client_info') from dual" + await _verify_attributes(conn, "clientinfo", "oracledb_cinfo", sql) + await _verify_attributes(conn, "clientinfo", None, sql) + sql = "select sys_context('userenv', 'client_identifier') from dual" + await _verify_attributes( + conn, "client_identifier", "oracledb_cid", sql + ) + await _verify_attributes(conn, "client_identifier", None, sql) + + +async def test_5304(test_env): + "5304 - test use of autocommit" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + other_conn = await test_env.get_connection_async() + other_cursor = other_conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute("insert into TestTempTable (IntCol) values (1)") + await other_cursor.execute("select IntCol from TestTempTable") + assert await other_cursor.fetchall() == [] + conn.autocommit = True + await cursor.execute("insert into TestTempTable (IntCol) values (2)") + await other_cursor.execute( + "select IntCol from TestTempTable order by IntCol" + ) + assert await other_cursor.fetchall() == [(1,), (2,)] + + +async def test_5305(test_env): + "5305 - connection to database with bad connect string" + with test_env.assert_raises_full_code( + "DPY-4000", "DPY-4026", "DPY-4027", "ORA-12154" + ): + await oracledb.connect_async("not a valid connect string!!") + with test_env.assert_raises_full_code("DPY-4000", "DPY-4001"): + await oracledb.connect( + test_env.main_user + "@" + test_env.connect_string + ) + + +async def test_5306(test_env): + "5306 - connection to database with bad password" + with test_env.assert_raises_full_code("ORA-01017"): + await test_env.get_connection_async( + password=test_env.main_password + "X", + ) + + +async def test_5307(skip_if_drcp, test_env): + "5307 - test changing password" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + async with test_env.get_connection_async() as conn: + new_password = test_env.get_random_string(20) + await conn.changepassword(test_env.main_password, new_password) + conn = await test_env.get_connection_async(password=new_password) + await conn.changepassword(new_password, test_env.main_password) + + +async def test_5308(skip_if_drcp, test_env): + "5308 - test changing password to an invalid value" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + async with test_env.get_connection_async() as conn: + new_password = "1" * 1500 + with test_env.assert_raises_full_code("ORA-01017", "ORA-00988"): + await conn.changepassword(test_env.main_password, new_password) + with test_env.assert_raises_full_code( + "ORA-01017", "ORA-28008", "ORA-00988" ): - await oracledb.connect_async("not a valid connect string!!") - with self.assertRaisesFullCode("DPY-4000", "DPY-4001"): - await oracledb.connect( - test_env.get_main_user() + "@" + test_env.get_connect_string() - ) - - async def test_5306(self): - "5306 - connection to database with bad password" - with self.assertRaisesFullCode("ORA-01017"): - await test_env.get_connection_async( - password=test_env.get_main_password() + "X", - ) - - @test_env.skip_if_drcp() - async def test_5307(self): - "5307 - test changing password" - async with test_env.get_connection_async() as conn: - if await self.is_on_oracle_cloud(conn): - self.skipTest( - "passwords on Oracle Cloud are strictly controlled" - ) - sys_random = random.SystemRandom() - new_password = "".join( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - await conn.changepassword( - test_env.get_main_password(), new_password - ) - conn = await test_env.get_connection_async(password=new_password) - await conn.changepassword( - new_password, test_env.get_main_password() - ) - - @test_env.skip_if_drcp() - async def test_5308(self): - "5308 - test changing password to an invalid value" - async with test_env.get_connection_async() as conn: - if await self.is_on_oracle_cloud(conn): - self.skipTest( - "passwords on Oracle Cloud are strictly controlled" - ) - new_password = "1" * 1500 - with self.assertRaisesFullCode("ORA-01017", "ORA-00988"): - await conn.changepassword( - test_env.get_main_password(), new_password - ) - with self.assertRaisesFullCode( - "ORA-01017", "ORA-28008", "ORA-00988" - ): - await conn.changepassword( - "incorrect old password", new_password - ) - - @test_env.skip_if_drcp() - async def test_5309(self): - "5309 - test connecting with password containing / and @ symbols" - async with test_env.get_connection_async() as conn: - if await self.is_on_oracle_cloud(conn): - self.skipTest( - "passwords on Oracle Cloud are strictly controlled" - ) - sys_random = random.SystemRandom() - chars = list( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - chars[4] = "/" - chars[8] = "@" - new_password = "".join(chars) - await conn.changepassword( - test_env.get_main_password(), new_password - ) - try: - async with test_env.get_connection_async( - password=new_password - ): - pass - finally: - await conn.changepassword( - new_password, test_env.get_main_password() - ) - - async def test_5310(self): - "5310 - confirm an exception is raised after closing a connection" - async with await test_env.get_connection_async() as conn: - await conn.close() - with self.assertRaisesFullCode("DPY-1001"): - await conn.rollback() - - async def test_5312(self): - "5312 - connection version is a string" - async with test_env.get_connection_async() as conn: - self.assertIsInstance(conn.version, str) - - async def test_5313(self): - "5313 - connection rolls back before close" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - other_conn = await test_env.get_connection_async() - other_cursor = other_conn.cursor() - await other_cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - other_cursor.close() - await other_conn.close() - await cursor.execute("select count(*) from TestTempTable") - (count,) = await cursor.fetchone() - self.assertEqual(count, 0) - - async def test_5315(self): - "5315 - multiple connections to database with multiple threads" - coroutines = [self.__connect_and_drop() for i in range(20)] - await asyncio.gather(*coroutines) - - async def test_5316(self): - "5316 - test string format of connection" - async with test_env.get_connection_async() as conn: - expected_value = "" % ( - test_env.get_main_user(), - test_env.get_connect_string(), - ) - self.assertEqual(str(conn), expected_value) - - async def test_5317(self): - "5317 - test context manager - close" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - await conn.commit() - await cursor.execute( - "insert into TestTempTable (IntCol) values (2)" - ) - with self.assertRaisesFullCode("DPY-1001"): - await conn.ping() - conn = await test_env.get_connection_async() + await conn.changepassword("incorrect old password", new_password) + + +async def test_5309(skip_if_drcp, test_env): + "5309 - test connecting with password containing / and @ symbols" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + async with test_env.get_connection_async() as conn: + chars = list(test_env.get_random_string(20)) + chars[4] = "/" + chars[8] = "@" + new_password = "".join(chars) + await conn.changepassword(test_env.main_password, new_password) + try: + async with test_env.get_connection_async(password=new_password): + pass + finally: + await conn.changepassword(new_password, test_env.main_password) + + +async def test_5310(test_env): + "5310 - confirm an exception is raised after closing a connection" + async with await test_env.get_connection_async() as conn: + await conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + await conn.rollback() + + +async def test_5312(test_env): + "5312 - connection version is a string" + async with test_env.get_connection_async() as conn: + assert isinstance(conn.version, str) + + +async def test_5313(test_env): + "5313 - connection rolls back before close" + async with test_env.get_connection_async() as conn: cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + other_conn = await test_env.get_connection_async() + other_cursor = other_conn.cursor() + await other_cursor.execute( + "insert into TestTempTable (IntCol) values (1)" + ) + other_cursor.close() + await other_conn.close() await cursor.execute("select count(*) from TestTempTable") (count,) = await cursor.fetchone() - self.assertEqual(count, 1) - - async def test_5318(self): - "5318 - test connection attribute values" - async with test_env.get_connection_async() as conn: - self.assertEqual(conn.ltxid, b"") - self.assertIsNone(conn.current_schema) - conn.current_schema = "test_schema" - self.assertEqual(conn.current_schema, "test_schema") - self.assertIsNone(conn.edition) - conn.external_name = "test_external" - self.assertEqual(conn.external_name, "test_external") - conn.internal_name = "test_internal" - self.assertEqual(conn.internal_name, "test_internal") - conn.stmtcachesize = 30 - self.assertEqual(conn.stmtcachesize, 30) - self.assertRaises(TypeError, conn.stmtcachesize, 20.5) - self.assertRaises(TypeError, conn.stmtcachesize, "value") - self.assertIsNone(conn.warning) - - async def test_5319(self): - "5319 - test closed connection attribute values" - conn = await test_env.get_connection_async() - await conn.close() - attr_names = [ - "current_schema", - "edition", - "external_name", - "internal_name", - "ltxid", - "stmtcachesize", - "warning", - ] - for name in attr_names: - with self.assertRaisesFullCode("DPY-1001"): - getattr(conn, name) - - async def test_5320(self): - "5320 - test connection ping makes a round trip" - self.conn = test_env.get_connection_async() - async with self.conn: - await self.setup_round_trip_checker() - await self.conn.ping() - await self.assertRoundTrips(1) - - async def test_5325(self): - "5325 - single connection to database with multiple threads" - async with test_env.get_connection_async() as conn: - coroutines = [self.__verify_fetched_data(conn) for i in range(3)] - await asyncio.gather(*coroutines) - - @test_env.skip_if_implicit_pooling() - async def test_5326(self): - "5326 - test connection cancel" - async with test_env.get_connection_async() as conn: - sleep_proc_name = test_env.get_sleep_proc_name() - - async def perform_cancel(): - await asyncio.sleep(0.1) - conn.cancel() - - async def perform_work(): - with self.assertRaises(oracledb.OperationalError): - with conn.cursor() as cursor: - await cursor.callproc(sleep_proc_name, [2]) - - await asyncio.gather(perform_work(), perform_cancel()) + assert count == 0 - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - (user,) = await cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - - @test_env.skip_if_drcp() - async def test_5327(self): - "5327 - test changing password during connect" - async with test_env.get_connection_async() as conn: - if await self.is_on_oracle_cloud(conn): - self.skipTest( - "passwords on Oracle Cloud are strictly controlled" - ) - sys_random = random.SystemRandom() - new_password = "".join( - sys_random.choice(string.ascii_letters) for i in range(20) - ) - conn = await test_env.get_connection_async( - newpassword=new_password - ) - conn = await test_env.get_connection_async(password=new_password) - await conn.changepassword( - new_password, test_env.get_main_password() - ) - - async def test_5328(self): - "5328 - test use of autocommit during reexecute" - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - data_to_insert = [(1, "Test String #1"), (2, "Test String #2")] - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - other_conn = await test_env.get_connection_async() - other_cursor = other_conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute(sql, data_to_insert[0]) - await other_cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await other_cursor.fetchall(), []) - conn.autocommit = True - await cursor.execute(sql, data_to_insert[1]) - await other_cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await other_cursor.fetchall(), data_to_insert) - - async def test_5329(self): - "5329 - test current_schema is set properly" - async with test_env.get_connection_async() as conn: - self.assertIsNone(conn.current_schema) - - user = test_env.get_main_user().upper() - proxy_user = test_env.get_proxy_user().upper() - cursor = conn.cursor() - await cursor.execute( - f"alter session set current_schema={proxy_user}" - ) - self.assertEqual(conn.current_schema, proxy_user) - - conn.current_schema = user - self.assertEqual(conn.current_schema, user) - - await cursor.execute( - "select sys_context('userenv', 'current_schema') from dual" - ) - (result,) = await cursor.fetchone() - self.assertEqual(result, user) - - async def test_5330(self): - "5330 - test dbms_output package" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - test_string = "Testing DBMS_OUTPUT package" - await cursor.callproc("dbms_output.enable") - await cursor.callproc("dbms_output.put_line", [test_string]) - string_var = cursor.var(str) - number_var = cursor.var(int) - await cursor.callproc( - "dbms_output.get_line", (string_var, number_var) - ) - self.assertEqual(string_var.getvalue(), test_string) - - async def test_5331(self): - "5331 - test connection call_timeout" - async with test_env.get_connection_async() as conn: - conn.call_timeout = 500 # milliseconds - self.assertEqual(conn.call_timeout, 500) - with self.assertRaisesFullCode("DPY-4011", "DPY-4024"): - with conn.cursor() as cursor: - await cursor.callproc(test_env.get_sleep_proc_name(), [2]) - - async def test_5332(self): - "5332 - test Connection repr()" - - class MyConnection(oracledb.AsyncConnection): - pass - - conn = await test_env.get_connection_async(conn_class=MyConnection) - qual_name = conn.__class__.__qualname__ - expected_value = ( - f"<{__name__}.{qual_name} to {conn.username}@{conn.dsn}>" - ) - self.assertEqual(repr(conn), expected_value) - await conn.close() - expected_value = f"<{__name__}.{qual_name} disconnected>" - self.assertEqual(repr(conn), expected_value) - - async def test_5333(self): - "5333 - test getting write-only attributes" - async with test_env.get_connection_async() as conn: - with self.assertRaises(AttributeError): - conn.action - with self.assertRaises(AttributeError): - conn.dbop - with self.assertRaises(AttributeError): - conn.clientinfo - with self.assertRaises(AttributeError): - conn.econtext_id - with self.assertRaises(AttributeError): - conn.module - with self.assertRaises(AttributeError): - conn.client_identifier - - async def test_5334(self): - "5334 - test error for invalid type for params and pool" - pool = test_env.get_pool_async() - await pool.close() - with self.assertRaisesFullCode("DPY-1002"): - await test_env.get_connection_async(pool=pool) - with self.assertRaises(TypeError): - await test_env.get_connection_async( - pool="This isn't an instance of a pool" - ) - with self.assertRaisesFullCode("DPY-2025"): - await oracledb.connect_async(params={"number": 7}) - - async def test_5335(self): - "5335 - test connection instance name" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - """ - select upper(sys_context('userenv', 'instance_name')) - from dual - """ - ) - (instance_name,) = await cursor.fetchone() - self.assertEqual(conn.instance_name.upper(), instance_name) - - @test_env.skip_if_drcp() - @test_env.skip_unless_long_passwords_supported() - async def test_5337(self): - "5337 - test maximum allowed length for password" - async with test_env.get_connection_async() as conn: - if await self.is_on_oracle_cloud(conn): - self.skipTest( - "passwords on Oracle Cloud are strictly controlled" - ) - - original_password = test_env.get_main_password() - new_password_32 = "a" * 32 - await conn.changepassword(original_password, new_password_32) - conn = await test_env.get_connection_async( - password=new_password_32 - ) - - new_password_1024 = "a" * 1024 - await conn.changepassword(new_password_32, new_password_1024) - conn = await test_env.get_connection_async( - password=new_password_1024 - ) - await conn.changepassword(new_password_1024, original_password) - - new_password_1025 = "a" * 1025 - with self.assertRaisesFullCode("ORA-28218", "ORA-00972"): - await conn.changepassword(original_password, new_password_1025) - - async def test_5338(self): - "5338 - test getting db_name" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute("select name from V$DATABASE") - (db_name,) = await cursor.fetchone() - self.assertEqual(conn.db_name.upper(), db_name.upper()) - - async def test_5339(self): - "5339 - test getting max_open_cursors" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - "select value from V$PARAMETER where name='open_cursors'" - ) - (max_open_cursors,) = await cursor.fetchone() - self.assertEqual(conn.max_open_cursors, int(max_open_cursors)) - - async def test_5340(self): - "5340 - test getting service_name" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - "select sys_context('userenv', 'service_name') from dual" - ) - (service_name,) = await cursor.fetchone() - self.assertEqual(conn.service_name, service_name) - - async def test_5341(self): - "5341 - test transaction_in_progress" - async with test_env.get_connection_async() as conn: - self.assertFalse(conn.transaction_in_progress) - - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - self.assertFalse(conn.transaction_in_progress) - - await cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - self.assertTrue(conn.transaction_in_progress) - - await conn.commit() - self.assertFalse(conn.transaction_in_progress) - - async def test_5342(self): - "5342 - test getting db_domain" - async with test_env.get_connection_async() as conn: - (db_domain,) = await conn.fetchone( - "select value from V$PARAMETER where name='db_domain'" - ) - self.assertEqual(conn.db_domain, db_domain) - - async def test_5343(self): - "5343 - test connection with invalid conn_class" - with self.assertRaisesFullCode("DPY-2023"): - await test_env.get_connection_async( - conn_class=oracledb.ConnectionPool - ) - - async def test_5344(self): - "5344 - test connection with an invalid pool" - with self.assertRaises(TypeError): - await oracledb.connect_async(pool="not a pool object") - - async def test_5346(self): - "5346 - test passing program when creating a connection" - sql = ( - "select program from v$session " - "where sid = sys_context('userenv', 'sid')" +async def test_5315(test_env): + "5315 - multiple connections to database with multiple threads" + coroutines = [_connect_and_drop(test_env) for i in range(20)] + await asyncio.gather(*coroutines) + + +async def test_5316(test_env): + "5316 - test string format of connection" + async with test_env.get_connection_async() as conn: + expected_value = "" % ( + test_env.main_user, + test_env.connect_string, ) - await self.__verify_connect_arg("program", "newprogram", sql) + assert str(conn) == expected_value + + +async def test_5317(test_env): + "5317 - test context manager - close" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute("insert into TestTempTable (IntCol) values (1)") + await conn.commit() + await cursor.execute("insert into TestTempTable (IntCol) values (2)") + with test_env.assert_raises_full_code("DPY-1001"): + await conn.ping() + conn = await test_env.get_connection_async() + cursor = conn.cursor() + await cursor.execute("select count(*) from TestTempTable") + (count,) = await cursor.fetchone() + assert count == 1 + + +async def test_5318(test_env): + "5318 - test connection attribute values" + async with test_env.get_connection_async() as conn: + assert conn.ltxid == b"" + assert conn.current_schema is None + conn.current_schema = "test_schema" + assert conn.current_schema == "test_schema" + assert conn.edition is None + conn.external_name = "test_external" + assert conn.external_name == "test_external" + conn.internal_name = "test_internal" + assert conn.internal_name == "test_internal" + conn.stmtcachesize = 30 + assert conn.stmtcachesize == 30 + with pytest.raises(TypeError): + conn.stmtcachesize = "value" + assert conn.warning is None + + +async def test_5319(test_env): + "5319 - test closed connection attribute values" + conn = await test_env.get_connection_async() + await conn.close() + attr_names = [ + "current_schema", + "edition", + "external_name", + "internal_name", + "ltxid", + "stmtcachesize", + "warning", + ] + for name in attr_names: + with test_env.assert_raises_full_code("DPY-1001"): + getattr(conn, name) + + +async def test_5320(test_env, async_conn, round_trip_checker_async): + "5320 - test connection ping makes a round trip" + await async_conn.ping() + assert (await round_trip_checker_async.get_value_async()) == 1 + + +async def test_5325(async_conn): + "5325 - single connection to database with multiple threads" + coroutines = [_verify_fetched_data(async_conn) for i in range(3)] + await asyncio.gather(*coroutines) + + +async def test_5326(skip_if_implicit_pooling, async_conn, test_env): + "5326 - test connection cancel" + + async def perform_cancel(): + await asyncio.sleep(0.1) + async_conn.cancel() + + async def perform_work(): + with pytest.raises(oracledb.OperationalError): + with async_conn.cursor() as cursor: + await cursor.callproc(test_env.sleep_proc_name, [2]) + + await asyncio.gather(perform_work(), perform_cancel()) + + with async_conn.cursor() as cursor: + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + assert user == test_env.main_user.upper() - async def test_5347(self): - "5347 - test passing machine when creating a connection" - sql = ( - "select machine from v$session " - "where sid = sys_context('userenv', 'sid')" + +async def test_5327(skip_if_drcp, test_env): + "5327 - test changing password during connect" + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + async with test_env.get_connection_async() as conn: + new_password = test_env.get_random_string(20) + conn = await test_env.get_connection_async(newpassword=new_password) + conn = await test_env.get_connection_async(password=new_password) + await conn.changepassword(new_password, test_env.main_password) + + +async def test_5328(test_env): + "5328 - test use of autocommit during reexecute" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + data_to_insert = [(1, "Test String #1"), (2, "Test String #2")] + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + other_conn = await test_env.get_connection_async() + other_cursor = other_conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute(sql, data_to_insert[0]) + await other_cursor.execute( + "select IntCol, StringCol1 from TestTempTable" + ) + assert await other_cursor.fetchall() == [] + conn.autocommit = True + await cursor.execute(sql, data_to_insert[1]) + await other_cursor.execute( + "select IntCol, StringCol1 from TestTempTable" ) - await self.__verify_connect_arg("machine", "newmachine", sql) + assert await other_cursor.fetchall() == data_to_insert + - async def test_5348(self): - "5348 - test passing terminal when creating a connection" - sql = ( - "select terminal from v$session " - "where sid = sys_context('userenv', 'sid')" +async def test_5329(test_env): + "5329 - test current_schema is set properly" + async with test_env.get_connection_async() as conn: + assert conn.current_schema is None + + user = test_env.main_user.upper() + proxy_user = test_env.proxy_user.upper() + cursor = conn.cursor() + await cursor.execute(f"alter session set current_schema={proxy_user}") + assert conn.current_schema == proxy_user + + conn.current_schema = user + assert conn.current_schema == user + + await cursor.execute( + "select sys_context('userenv', 'current_schema') from dual" ) - await self.__verify_connect_arg("terminal", "newterminal", sql) + (result,) = await cursor.fetchone() + assert result == user + - async def test_5349(self): - "5349 - test passing osuser when creating a connection" - sql = ( - "select osuser from v$session " - "where sid = sys_context('userenv', 'sid')" +async def test_5330(test_env): + "5330 - test dbms_output package" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + test_string = "Testing DBMS_OUTPUT package" + await cursor.callproc("dbms_output.enable") + await cursor.callproc("dbms_output.put_line", [test_string]) + string_var = cursor.var(str) + number_var = cursor.var(int) + await cursor.callproc("dbms_output.get_line", (string_var, number_var)) + assert string_var.getvalue() == test_string + + +async def test_5331(test_env): + "5331 - test connection call_timeout" + async with test_env.get_connection_async() as conn: + conn.call_timeout = 500 # milliseconds + assert conn.call_timeout == 500 + with test_env.assert_raises_full_code("DPY-4011", "DPY-4024"): + with conn.cursor() as cursor: + await cursor.callproc(test_env.sleep_proc_name, [2]) + + +async def test_5332(test_env): + "5332 - test Connection repr()" + + class MyConnection(oracledb.AsyncConnection): + pass + + conn = await test_env.get_connection_async(conn_class=MyConnection) + qual_name = conn.__class__.__qualname__ + expected_value = f"<{__name__}.{qual_name} to {conn.username}@{conn.dsn}>" + assert repr(conn) == expected_value + + await conn.close() + expected_value = f"<{__name__}.{qual_name} disconnected>" + assert repr(conn) == expected_value + + +async def test_5333(test_env): + "5333 - test getting write-only attributes" + async with test_env.get_connection_async() as conn: + with pytest.raises(AttributeError): + conn.action + with pytest.raises(AttributeError): + conn.dbop + with pytest.raises(AttributeError): + conn.clientinfo + with pytest.raises(AttributeError): + conn.econtext_id + with pytest.raises(AttributeError): + conn.module + with pytest.raises(AttributeError): + conn.client_identifier + + +async def test_5334(test_env): + "5334 - test error for invalid type for params and pool" + pool = test_env.get_pool_async() + await pool.close() + with test_env.assert_raises_full_code("DPY-1002"): + await test_env.get_connection_async(pool=pool) + with pytest.raises(TypeError): + await test_env.get_connection_async( + pool="This isn't an instance of a pool" ) - await self.__verify_connect_arg("osuser", "newosuser", sql) + with test_env.assert_raises_full_code("DPY-2025"): + await oracledb.connect_async(params={"number": 7}) - async def test_5350(self): - "5350 - test passing driver_name when creating a connection" - sql = ( - "select distinct client_driver from v$session_connect_info " - "where sid = sys_context('userenv', 'sid')" + +async def test_5335(test_env): + "5335 - test connection instance name" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + """ + select upper(sys_context('userenv', 'instance_name')) + from dual + """ ) - await self.__verify_connect_arg("driver_name", "newdriver", sql) - - async def test_5351(self): - "5351 - test getting session id" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - "select dbms_debug_jdwp.current_session_id from dual" - ) - (fetched_value,) = await cursor.fetchone() - self.assertEqual(conn.session_id, fetched_value) - - async def test_5352(self): - "5352 - test getting session serial number" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - "select dbms_debug_jdwp.current_session_serial from dual" - ) - (fetched_value,) = await cursor.fetchone() - self.assertEqual(conn.serial_num, fetched_value) - - async def test_5353(self): - "5353 - test passed params in hook with standalone connection" - sdu = 4096 - params = test_env.get_connect_params() - protocol = "proto-test" - orig_connect_string = test_env.get_connect_string() - connect_string = f"{protocol}://{orig_connect_string}" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - self.assertEqual(passed_protocol, protocol) - self.assertEqual(passed_protocol_arg, orig_connect_string) - passed_params.parse_connect_string(passed_protocol_arg) - passed_params.set(sdu=sdu) + (instance_name,) = await cursor.fetchone() + assert conn.instance_name.upper() == instance_name - try: - oracledb.register_protocol(protocol, hook) - await oracledb.connect_async(dsn=connect_string, params=params) - self.assertEqual(params.sdu, sdu) - finally: - oracledb.register_protocol(protocol, None) - async def test_5354(self): - "5354 - test altering connection edition" - conn = await test_env.get_admin_connection_async() - self.assertIsNone(conn.edition) +async def test_5337( + skip_if_drcp, skip_unless_long_passwords_supported, test_env +): + if test_env.is_on_oracle_cloud: + pytest.skip("passwords on Oracle Cloud are strictly controlled") + "5337 - test maximum allowed length for password" + async with test_env.get_connection_async() as conn: + + original_password = test_env.main_password + new_password_32 = "a" * 32 + await conn.changepassword(original_password, new_password_32) + conn = await test_env.get_connection_async(password=new_password_32) + + new_password_1024 = "a" * 1024 + await conn.changepassword(new_password_32, new_password_1024) + conn = await test_env.get_connection_async(password=new_password_1024) + await conn.changepassword(new_password_1024, original_password) + + new_password_1025 = "a" * 1025 + with test_env.assert_raises_full_code("ORA-28218", "ORA-00972"): + await conn.changepassword(original_password, new_password_1025) + + +async def test_5338(test_env): + "5338 - test getting db_name" + async with test_env.get_connection_async() as conn: cursor = conn.cursor() - sql = "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" - default_edition = "ORA$BASE" - test_edition = test_env.get_edition_name() - for edition in [test_edition, default_edition]: - with self.subTest(edition=edition): - await cursor.execute(f"alter session set edition = {edition}") - await cursor.execute(sql) - (fetched_edition,) = await cursor.fetchone() - self.assertEqual(fetched_edition, edition.upper()) - self.assertEqual(conn.edition, edition.upper()) - - async def test_5355(self): - "5355 - test connect() with edition" - edition = test_env.get_edition_name() - conn = await test_env.get_connection_async(edition=edition) + await cursor.execute("select name from V$DATABASE") + (db_name,) = await cursor.fetchone() + assert conn.db_name.upper() == db_name.upper() + + +async def test_5339(test_env): + "5339 - test getting max_open_cursors" + async with test_env.get_connection_async() as conn: cursor = conn.cursor() await cursor.execute( - "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" + "select value from V$PARAMETER where name='open_cursors'" ) - (fetched_edition,) = await cursor.fetchone() - self.assertEqual(fetched_edition, edition.upper()) - self.assertEqual(conn.edition, edition) + (max_open_cursors,) = await cursor.fetchone() + assert conn.max_open_cursors == int(max_open_cursors) - async def test_5356(self): - "5356 - test error in the middle of a database response" - conn = await test_env.get_connection_async() + +async def test_5340(test_env): + "5340 - test getting service_name" + async with test_env.get_connection_async() as conn: cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] - await cursor.executemany( - "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", - data, + await cursor.execute( + "select sys_context('userenv', 'service_name') from dual" ) + (service_name,) = await cursor.fetchone() + assert conn.service_name == service_name + + +async def test_5341(test_env): + "5341 - test transaction_in_progress" + async with test_env.get_connection_async() as conn: + assert not conn.transaction_in_progress + + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + assert not conn.transaction_in_progress + + await cursor.execute("insert into TestTempTable (IntCol) values (1)") + assert conn.transaction_in_progress + await conn.commit() - cursor.arraysize = 1500 - with self.assertRaisesFullCode("ORA-01476"): - await cursor.execute( - """ - select IntCol, 1 / NumberCol - from TestTempTable - where IntCol < 1500 - union all - select IntCol, 1 / NumberCol - from TestTempTable - where IntCol = 1500 - """ - ) - await cursor.fetchall() - - -if __name__ == "__main__": - test_env.run_test_cases() + assert not conn.transaction_in_progress + + +async def test_5342(test_env): + "5342 - test getting db_domain" + async with test_env.get_connection_async() as conn: + (db_domain,) = await conn.fetchone( + "select value from V$PARAMETER where name='db_domain'" + ) + assert conn.db_domain == db_domain + + +async def test_5343(test_env): + "5343 - test connection with invalid conn_class" + with test_env.assert_raises_full_code("DPY-2023"): + await test_env.get_connection_async(conn_class=oracledb.ConnectionPool) + + +async def test_5344(test_env): + "5344 - test connection with an invalid pool" + with pytest.raises(TypeError): + await oracledb.connect_async(pool="not a pool object") + + +async def test_5346(test_env): + "5346 - test passing program when creating a connection" + sql = ( + "select program from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_connect_arg(test_env, "program", "newprogram", sql) + + +async def test_5347(test_env): + "5347 - test passing machine when creating a connection" + sql = ( + "select machine from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_connect_arg(test_env, "machine", "newmachine", sql) + + +async def test_5348(test_env): + "5348 - test passing terminal when creating a connection" + sql = ( + "select terminal from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_connect_arg(test_env, "terminal", "newterminal", sql) + + +async def test_5349(test_env): + "5349 - test passing osuser when creating a connection" + sql = ( + "select osuser from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_connect_arg(test_env, "osuser", "newosuser", sql) + + +async def test_5350(test_env): + "5350 - test passing driver_name when creating a connection" + sql = ( + "select distinct client_driver from v$session_connect_info " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_connect_arg(test_env, "driver_name", "newdriver", sql) + + +async def test_5351(test_env): + "5351 - test getting session id" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + "select dbms_debug_jdwp.current_session_id from dual" + ) + (fetched_value,) = await cursor.fetchone() + assert conn.session_id == fetched_value + + +async def test_5352(test_env): + "5352 - test getting session serial number" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + "select dbms_debug_jdwp.current_session_serial from dual" + ) + (fetched_value,) = await cursor.fetchone() + assert conn.serial_num == fetched_value + + +async def test_5353(test_env): + "5353 - test passed params in hook with standalone connection" + sdu = 4096 + params = test_env.get_connect_params() + protocol = "proto-test" + orig_connect_string = test_env.connect_string + connect_string = f"{protocol}://{orig_connect_string}" + + def hook(passed_protocol, passed_protocol_arg, passed_params): + assert passed_protocol == protocol + assert passed_protocol_arg == orig_connect_string + passed_params.parse_connect_string(passed_protocol_arg) + passed_params.set(sdu=sdu) + + try: + oracledb.register_protocol(protocol, hook) + await oracledb.connect_async(dsn=connect_string, params=params) + assert params.sdu == sdu + finally: + oracledb.register_protocol(protocol, None) + + +async def test_5354(test_env): + "5354 - test altering connection edition" + conn = await test_env.get_admin_connection_async() + assert conn.edition is None + cursor = conn.cursor() + sql = "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" + default_edition = "ORA$BASE" + test_edition = test_env.edition_name + for edition in [test_edition, default_edition]: + await cursor.execute(f"alter session set edition = {edition}") + await cursor.execute(sql) + (fetched_edition,) = await cursor.fetchone() + assert fetched_edition == edition.upper() + assert conn.edition == edition.upper() + + +async def test_5355(test_env): + "5355 - test connect() with edition" + edition = test_env.edition_name + conn = await test_env.get_connection_async(edition=edition) + cursor = conn.cursor() + await cursor.execute( + "select sys_context('USERENV', 'CURRENT_EDITION_NAME') from dual" + ) + (fetched_edition,) = await cursor.fetchone() + assert fetched_edition == edition.upper() + assert conn.edition == edition + + +async def test_5356(test_env): + "5356 - test error in the middle of a database response" + conn = await test_env.get_connection_async() + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + data = [(i + 1, 2 if i < 1499 else 0) for i in range(1500)] + await cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + await conn.commit() + cursor.arraysize = 1500 + with test_env.assert_raises_full_code("ORA-01476"): + await cursor.execute( + """ + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol < 1500 + union all + select IntCol, 1 / NumberCol + from TestTempTable + where IntCol = 1500 + """ + ) + await cursor.fetchall() diff --git a/tests/test_5400_cursor_execute_async.py b/tests/test_5400_cursor_execute_async.py index 68dd18ac..7f52a4a4 100644 --- a/tests/test_5400_cursor_execute_async.py +++ b/tests/test_5400_cursor_execute_async.py @@ -29,577 +29,584 @@ import collections import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_5400(self): - "5400 - test executing a statement without any arguments" - result = await self.cursor.execute("begin null; end;") - self.assertIsNone(result) - - async def test_5401(self): - "5401 - test executing a None statement with bind variables" - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-2001"): - await cursor.execute(None, x=5) - - async def test_5402(self): - "5402 - test executing a statement with args and empty keyword args" - simple_var = self.cursor.var(oracledb.NUMBER) - args = [simple_var] - kwargs = {} - result = await self.cursor.execute( - "begin :1 := 25; end;", args, **kwargs - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 25) - - async def test_5403(self): - "5403 - test executing a statement with keyword arguments" - simple_var = self.cursor.var(oracledb.NUMBER) - result = await self.cursor.execute( - "begin :value := 5; end;", value=simple_var - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 5) - - async def test_5404(self): - "5404 - test executing a statement with a dictionary argument" - simple_var = self.cursor.var(oracledb.NUMBER) - dict_arg = dict(value=simple_var) - result = await self.cursor.execute( - "begin :value := 10; end;", dict_arg - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - async def test_5405(self): - "5405 - test executing a statement with both a dict and keyword args" - simple_var = self.cursor.var(oracledb.NUMBER) - dict_arg = dict(value=simple_var) - with self.assertRaisesFullCode("DPY-2005"): - await self.cursor.execute( - "begin :value := 15; end;", dict_arg, value=simple_var - ) +import pytest - async def test_5406(self): - "5406 - test executing a statement and then changing the array size" - await self.cursor.execute("select IntCol from TestNumbers") - self.cursor.arraysize = 5 - self.assertEqual(len(await self.cursor.fetchall()), 10) - - async def test_5407(self): - "5407 - test that subsequent executes succeed after bad execute" - sql = "begin raise_application_error(-20000, 'this); end;" - with self.assertRaisesFullCode("DPY-2041"): - await self.cursor.execute(sql) - await self.cursor.execute("begin null; end;") - - async def test_5408(self): - "5408 - test that subsequent fetches fail after bad execute" - with self.assertRaisesFullCode("ORA-00904"): - await self.cursor.execute("select y from dual") - with self.assertRaisesFullCode("DPY-1003"): - await self.cursor.fetchall() - - async def test_5409(self): - "5409 - test executing a statement with an incorrect named bind" - sql = "select * from TestStrings where IntCol = :value" - with self.assertRaisesFullCode("DPY-4008", "ORA-01036"): - await self.cursor.execute(sql, value2=3) - - async def test_5410(self): - "5410 - test executing a statement with named binds" - await self.cursor.execute( - """ - select * - from TestNumbers - where IntCol = :value1 and LongIntCol = :value2 - """, - value1=1, - value2=38, + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_5400(async_cursor): + "5400 - test executing a statement without any arguments" + result = await async_cursor.execute("begin null; end;") + assert result is None + + +async def test_5401(async_cursor, test_env): + "5401 - test executing a None statement with bind variables" + with test_env.assert_raises_full_code("DPY-2001"): + await async_cursor.execute(None, x=5) + + +async def test_5402(async_cursor): + "5402 - test executing a statement with args and empty keyword args" + simple_var = async_cursor.var(oracledb.NUMBER) + args = [simple_var] + kwargs = {} + result = await async_cursor.execute("begin :1 := 25; end;", args, **kwargs) + assert result is None + assert simple_var.getvalue() == 25 + + +async def test_5403(async_cursor): + "5403 - test executing a statement with keyword arguments" + simple_var = async_cursor.var(oracledb.NUMBER) + result = await async_cursor.execute( + "begin :value := 5; end;", value=simple_var + ) + assert result is None + assert simple_var.getvalue() == 5 + + +async def test_5404(async_cursor): + "5404 - test executing a statement with a dictionary argument" + simple_var = async_cursor.var(oracledb.NUMBER) + dict_arg = dict(value=simple_var) + result = await async_cursor.execute("begin :value := 10; end;", dict_arg) + assert result is None + assert simple_var.getvalue() == 10 + + +async def test_5405(async_cursor, test_env): + "5405 - test executing a statement with both a dict and keyword args" + simple_var = async_cursor.var(oracledb.NUMBER) + dict_arg = dict(value=simple_var) + with test_env.assert_raises_full_code("DPY-2005"): + await async_cursor.execute( + "begin :value := 15; end;", dict_arg, value=simple_var ) - self.assertEqual(len(await self.cursor.fetchall()), 1) - - async def test_5411(self): - "5411 - test executing a statement with an incorrect positional bind" - sql = """ - select * - from TestNumbers - where IntCol = :value and LongIntCol = :value2""" - with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): - await self.cursor.execute(sql, [3]) - - async def test_5412(self): - "5412 - test executing a statement with positional binds" - await self.cursor.execute( - """ + + +async def test_5406(async_cursor): + "5406 - test executing a statement and then changing the array size" + await async_cursor.execute("select IntCol from TestNumbers") + async_cursor.arraysize = 5 + assert len(await async_cursor.fetchall()) == 10 + + +async def test_5407(async_cursor, test_env): + "5407 - test that subsequent executes succeed after bad execute" + sql = "begin raise_application_error(-20000, 'this); end;" + with test_env.assert_raises_full_code("DPY-2041"): + await async_cursor.execute(sql) + await async_cursor.execute("begin null; end;") + + +async def test_5408(async_cursor, test_env): + "5408 - test that subsequent fetches fail after bad execute" + with test_env.assert_raises_full_code("ORA-00904"): + await async_cursor.execute("select y from dual") + with test_env.assert_raises_full_code("DPY-1003"): + await async_cursor.fetchall() + + +async def test_5409(async_cursor, test_env): + "5409 - test executing a statement with an incorrect named bind" + sql = "select * from TestStrings where IntCol = :value" + with test_env.assert_raises_full_code("DPY-4008", "ORA-01036"): + await async_cursor.execute(sql, value2=3) + + +async def test_5410(async_cursor): + "5410 - test executing a statement with named binds" + await async_cursor.execute( + """ + select * + from TestNumbers + where IntCol = :value1 and LongIntCol = :value2 + """, + value1=1, + value2=38, + ) + assert len(await async_cursor.fetchall()) == 1 + + +async def test_5411(async_cursor, test_env): + "5411 - test executing a statement with an incorrect positional bind" + sql = """ select * from TestNumbers - where IntCol = :value and LongIntCol = :value2 - """, - [1, 38], + where IntCol = :value and LongIntCol = :value2""" + with test_env.assert_raises_full_code("DPY-4009", "ORA-01008"): + await async_cursor.execute(sql, [3]) + + +async def test_5412(async_cursor): + "5412 - test executing a statement with positional binds" + await async_cursor.execute( + """ + select * + from TestNumbers + where IntCol = :value and LongIntCol = :value2 + """, + [1, 38], + ) + assert len(await async_cursor.fetchall()) == 1 + + +async def test_5413(async_cursor): + "5413 - test executing a statement after rebinding a named bind" + statement = "begin :value := :value2 + 5; end;" + simple_var = async_cursor.var(oracledb.NUMBER) + simple_var2 = async_cursor.var(oracledb.NUMBER) + simple_var2.setvalue(0, 5) + result = await async_cursor.execute( + statement, value=simple_var, value2=simple_var2 + ) + assert result is None + assert simple_var.getvalue() == 10 + + simple_var = async_cursor.var(oracledb.NATIVE_FLOAT) + simple_var2 = async_cursor.var(oracledb.NATIVE_FLOAT) + simple_var2.setvalue(0, 10) + result = await async_cursor.execute( + statement, value=simple_var, value2=simple_var2 + ) + assert result is None + assert simple_var.getvalue() == 15 + + +async def test_5414(async_cursor): + "5414 - test executing a PL/SQL statement with duplicate binds" + simple_var = async_cursor.var(oracledb.NUMBER) + simple_var.setvalue(0, 5) + result = await async_cursor.execute( + """ + begin + :value := :value + 5; + end; + """, + value=simple_var, + ) + assert result is None + assert simple_var.getvalue() == 10 + + +async def test_5415(async_cursor): + "5415 - test executing a PL/SQL statement with duplicate binds" + simple_var = async_cursor.var(oracledb.NUMBER) + simple_var.setvalue(0, 5) + await async_cursor.execute( + "begin :value := :value + 5; end;", [simple_var] + ) + assert simple_var.getvalue() == 10 + + +async def test_5416(async_cursor, test_env): + "5416 - test executing a statement with an incorrect number of binds" + statement = "begin :value := :value2 + 5; end;" + var = async_cursor.var(oracledb.NUMBER) + var.setvalue(0, 5) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + await async_cursor.execute(statement) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + await async_cursor.execute(statement, value=var) + with test_env.assert_raises_full_code("DPY-4008", "ORA-01036"): + await async_cursor.execute( + statement, value=var, value2=var, value3=var ) - self.assertEqual(len(await self.cursor.fetchall()), 1) - - async def test_5413(self): - "5413 - test executing a statement after rebinding a named bind" - statement = "begin :value := :value2 + 5; end;" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var2 = self.cursor.var(oracledb.NUMBER) - simple_var2.setvalue(0, 5) - result = await self.cursor.execute( - statement, value=simple_var, value2=simple_var2 - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - simple_var = self.cursor.var(oracledb.NATIVE_FLOAT) - simple_var2 = self.cursor.var(oracledb.NATIVE_FLOAT) - simple_var2.setvalue(0, 10) - result = await self.cursor.execute( - statement, value=simple_var, value2=simple_var2 - ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 15) - - async def test_5414(self): - "5414 - test executing a PL/SQL statement with duplicate binds" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var.setvalue(0, 5) - result = await self.cursor.execute( + + +async def test_5417(async_conn, async_cursor): + "5417 - change in size on subsequent binds does not use optimised path" + await async_cursor.execute("truncate table TestTempTable") + data = [(1, "Test String #1"), (2, "ABC" * 100)] + for row in data: + await async_cursor.execute( """ - begin - :value := :value + 5; - end; + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) """, - value=simple_var, + row, ) - self.assertIsNone(result) - self.assertEqual(simple_var.getvalue(), 10) - - async def test_5415(self): - "5415 - test executing a PL/SQL statement with duplicate binds" - simple_var = self.cursor.var(oracledb.NUMBER) - simple_var.setvalue(0, 5) - await self.cursor.execute( - "begin :value := :value + 5; end;", [simple_var] - ) - self.assertEqual(simple_var.getvalue(), 10) - - async def test_5416(self): - "5416 - test executing a statement with an incorrect number of binds" - statement = "begin :value := :value2 + 5; end;" - var = self.cursor.var(oracledb.NUMBER) - var.setvalue(0, 5) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - await self.cursor.execute(statement) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - await self.cursor.execute(statement, value=var) - with self.assertRaisesFullCode("DPY-4008", "ORA-01036"): - await self.cursor.execute( - statement, value=var, value2=var, value3=var - ) - - async def test_5417(self): - "5417 - change in size on subsequent binds does not use optimised path" - await self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test String #1"), (2, "ABC" * 100)] - for row in data: - await self.cursor.execute( + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await async_cursor.fetchall() == data + + +async def test_5418(async_conn, async_cursor): + "5418 - test that dml can use optimised path" + data_to_insert = [(i + 1, f"Test String #{i + 1}") for i in range(3)] + await async_cursor.execute("truncate table TestTempTable") + for row in data_to_insert: + with async_conn.cursor() as cursor: + await cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, row, ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_5418(self): - "5418 - test that dml can use optimised path" - data_to_insert = [(i + 1, f"Test String #{i + 1}") for i in range(3)] - await self.cursor.execute("truncate table TestTempTable") - for row in data_to_insert: - with self.conn.cursor() as cursor: - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - row, - ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(await self.cursor.fetchall(), data_to_insert) - - async def test_5419(self): - "5419 - test calling execute() with invalid parameters" - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - with self.assertRaisesFullCode("DPY-2003"): - await self.cursor.execute(sql, "These are not valid parameters") - - async def test_5420(self): - "5420 - test calling execute() with mixed binds" - await self.cursor.execute("truncate table TestTempTable") - self.cursor.setinputsizes(None, None, str) - data = dict(val1=1, val2="Test String 1") - with self.assertRaisesFullCode("DPY-2006"): - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - returning StringCol1 into :out_var - """, - data, - ) - - async def test_5421(self): - "5421 - test binding by name with double quotes" - data = {'"_value1"': 1, '"VaLue_2"': 2, '"3VALUE"': 3} - await self.cursor.execute( - 'select :"_value1" + :"VaLue_2" + :"3VALUE" from dual', - data, - ) - (result,) = await self.cursor.fetchone() - self.assertEqual(result, 6) - - async def test_5422(self): - "5422 - test executing a statement with different input buffer sizes" - sql = """ - insert into TestTempTable (IntCol, StringCol1, StringCol2) - values (:int_col, :str_val1, :str_val2) returning IntCol - into :ret_data""" - values1 = {"int_col": 1, "str_val1": '{"a", "b"}', "str_val2": None} - values2 = {"int_col": 2, "str_val1": None, "str_val2": '{"a", "b"}'} - values3 = {"int_col": 3, "str_val1": '{"a"}', "str_val2": None} - - await self.cursor.execute("truncate table TestTempTable") - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - await self.cursor.execute(sql, values1) - self.assertEqual(ret_bind.values, [["1"]]) - - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - await self.cursor.execute(sql, values2) - self.assertEqual(ret_bind.values, [["2"]]) - - ret_bind = self.cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) - self.cursor.setinputsizes(ret_data=ret_bind) - await self.cursor.execute(sql, values3) - self.assertEqual(ret_bind.values, [["3"]]) - - async def test_5423(self): - "5423 - test using rowfactory" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Test 1') - """ - ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - column_names = [col[0] for col in self.cursor.description] - - def rowfactory(*row): - return dict(zip(column_names, row)) - - self.cursor.rowfactory = rowfactory - self.assertEqual(self.cursor.rowfactory, rowfactory) - self.assertEqual( - await self.cursor.fetchall(), - [{"INTCOL": 1, "STRINGCOL1": "Test 1"}], - ) - - async def test_5424(self): - "5424 - test executing same query after setting rowfactory" - await self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test 1"), (2, "Test 2")] - await self.cursor.executemany( + await async_conn.commit() + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert await async_cursor.fetchall() == data_to_insert + + +async def test_5419(async_cursor, test_env): + "5419 - test calling execute() with invalid parameters" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + with test_env.assert_raises_full_code("DPY-2003"): + await async_cursor.execute(sql, "These are not valid parameters") + + +async def test_5420(async_cursor, test_env): + "5420 - test calling execute() with mixed binds" + await async_cursor.execute("truncate table TestTempTable") + async_cursor.setinputsizes(None, None, str) + data = dict(val1=1, val2="Test String 1") + with test_env.assert_raises_full_code("DPY-2006"): + await async_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) + returning StringCol1 into :out_var """, data, ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - column_names = [col[0] for col in self.cursor.description] - self.cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - results1 = await self.cursor.fetchall() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - results2 = await self.cursor.fetchall() - self.assertEqual(results1, results2) - - async def test_5425(self): - "5425 - test executing different query after setting rowfactory" - await self.cursor.execute("truncate table TestTempTable") - data = [(1, "Test 1"), (2, "Test 2")] - await self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - column_names = [col[0] for col in self.cursor.description] - self.cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - await self.cursor.execute( - """ - select IntCol, StringCol - from TestSTrings - where IntCol between 1 and 3 order by IntCol - """ - ) - expected_data = [(1, "String 1"), (2, "String 2"), (3, "String 3")] - self.assertEqual(await self.cursor.fetchall(), expected_data) - - async def test_5426(self): - "5426 - test setting rowfactory on a REF cursor" - with self.conn.cursor() as cursor: - sql_function = "pkg_TestRefCursors.TestReturnCursor" - ref_cursor = await cursor.callfunc( - sql_function, oracledb.DB_TYPE_CURSOR, [2] - ) - column_names = [col[0] for col in ref_cursor.description] - ref_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) - expected_value = [ - {"INTCOL": 1, "STRINGCOL": "String 1"}, - {"INTCOL": 2, "STRINGCOL": "String 2"}, - ] - self.assertEqual(await ref_cursor.fetchall(), expected_value) - - async def test_5427(self): - "5427 - test using a subclassed string as bind parameter keys" - - class my_str(str): - pass - - await self.cursor.execute("truncate table TestTempTable") - keys = {my_str("str_val"): oracledb.DB_TYPE_VARCHAR} - self.cursor.setinputsizes(**keys) - values = { - my_str("int_val"): 5427, - my_str("str_val"): "5427 - String Value", - } - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - """, - values, - ) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual( - await self.cursor.fetchall(), [(5427, "5427 - String Value")] - ) - async def test_5428(self): - "5428 - test using a sequence of parameters other than a list or tuple" - class MySeq(collections.abc.Sequence): - def __init__(self, *data): - self.data = data - - def __len__(self): - return len(self.data) - - def __getitem__(self, index): - return self.data[index] - - values_to_insert = [MySeq(1, "String 1"), MySeq(2, "String 2")] - expected_data = [tuple(value) for value in values_to_insert] - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - """, - values_to_insert, +async def test_5421(async_cursor): + "5421 - test binding by name with double quotes" + data = {'"_value1"': 1, '"VaLue_2"': 2, '"3VALUE"': 3} + await async_cursor.execute( + 'select :"_value1" + :"VaLue_2" + :"3VALUE" from dual', + data, + ) + (result,) = await async_cursor.fetchone() + assert result == 6 + + +async def test_5422(async_cursor): + "5422 - test executing a statement with different input buffer sizes" + sql = """ + insert into TestTempTable (IntCol, StringCol1, StringCol2) + values (:int_col, :str_val1, :str_val2) returning IntCol + into :ret_data""" + values1 = {"int_col": 1, "str_val1": '{"a", "b"}', "str_val2": None} + values2 = {"int_col": 2, "str_val1": None, "str_val2": '{"a", "b"}'} + values3 = {"int_col": 3, "str_val1": '{"a"}', "str_val2": None} + + await async_cursor.execute("truncate table TestTempTable") + ret_bind = async_cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + async_cursor.setinputsizes(ret_data=ret_bind) + await async_cursor.execute(sql, values1) + assert ret_bind.values == [["1"]] + + ret_bind = async_cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + async_cursor.setinputsizes(ret_data=ret_bind) + await async_cursor.execute(sql, values2) + assert ret_bind.values == [["2"]] + + ret_bind = async_cursor.var(oracledb.DB_TYPE_VARCHAR, arraysize=1) + async_cursor.setinputsizes(ret_data=ret_bind) + await async_cursor.execute(sql, values3) + assert ret_bind.values == [["3"]] + + +async def test_5423(async_conn, async_cursor): + "5423 - test using rowfactory" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Test 1') + """ + ) + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in async_cursor.description] + + def rowfactory(*row): + return dict(zip(column_names, row)) + + async_cursor.rowfactory = rowfactory + assert async_cursor.rowfactory == rowfactory + assert await async_cursor.fetchall() == [ + {"INTCOL": 1, "STRINGCOL1": "Test 1"} + ] + + +async def test_5424(async_conn, async_cursor): + "5424 - test executing same query after setting rowfactory" + await async_cursor.execute("truncate table TestTempTable") + data = [(1, "Test 1"), (2, "Test 2")] + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in async_cursor.description] + async_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + results1 = await async_cursor.fetchall() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + results2 = await async_cursor.fetchall() + assert results1 == results2 + + +async def test_5425(async_conn, async_cursor): + "5425 - test executing different query after setting rowfactory" + await async_cursor.execute("truncate table TestTempTable") + data = [(1, "Test 1"), (2, "Test 2")] + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + column_names = [col[0] for col in async_cursor.description] + async_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + await async_cursor.execute( + """ + select IntCol, StringCol + from TestSTrings + where IntCol between 1 and 3 order by IntCol + """ + ) + expected_data = [(1, "String 1"), (2, "String 2"), (3, "String 3")] + assert await async_cursor.fetchall() == expected_data + + +async def test_5426(async_conn): + "5426 - test setting rowfactory on a REF cursor" + with async_conn.cursor() as cursor: + sql_function = "pkg_TestRefCursors.TestReturnCursor" + ref_cursor = await cursor.callfunc( + sql_function, oracledb.DB_TYPE_CURSOR, [2] ) - await self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(await self.cursor.fetchall(), expected_data) - - async def test_5429(self): - "5429 - test an output type handler with prefetch > arraysize" - - def type_handler(cursor, metadata): - return cursor.var(metadata.type_code, arraysize=cursor.arraysize) - - self.cursor.arraysize = 2 - self.cursor.prefetchrows = 3 - self.cursor.outputtypehandler = type_handler - await self.cursor.execute( - "select level from dual connect by level <= 5" - ) - self.assertEqual( - await self.cursor.fetchall(), [(1,), (2,), (3,), (4,), (5,)] - ) - - async def test_5430(self): - "5430 - test setinputsizes() but without binding" - self.cursor.setinputsizes(None, int) - sql = "select :1, : 2 from dual" - - with self.assertRaisesFullCode("ORA-01008", "DPY-4010"): - await self.cursor.execute(sql, []) - - async def test_5431(self): - "5431 - test getting FetchInfo attributes" - type_obj = await self.conn.gettype("UDT_OBJECT") - varchar_ratio, _ = await test_env.get_charset_ratios_async() - test_values = [ - ( - "select IntCol from TestObjects", - 10, - None, - False, - "INTCOL", - False, - 9, - 0, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_NUMBER, - ), - ( - "select ObjectCol from TestObjects", - None, - None, - False, - "OBJECTCOL", - True, - None, - None, - type_obj, - oracledb.DB_TYPE_OBJECT, - ), - ( - "select JsonVarchar from TestJsonCols", - 4000, - 4000 * varchar_ratio, - True, - "JSONVARCHAR", - False, - None, - None, - oracledb.DB_TYPE_VARCHAR, - oracledb.DB_TYPE_VARCHAR, - ), - ( - "select FLOATCOL from TestNumbers", - 127, - None, - False, - "FLOATCOL", - False, - 126, - -127, - oracledb.DB_TYPE_NUMBER, - oracledb.DB_TYPE_NUMBER, - ), + column_names = [col[0] for col in ref_cursor.description] + ref_cursor.rowfactory = lambda *row: dict(zip(column_names, row)) + expected_value = [ + {"INTCOL": 1, "STRINGCOL": "String 1"}, + {"INTCOL": 2, "STRINGCOL": "String 2"}, ] - for ( - sql, - display_size, - internal_size, - is_json, - name, - null_ok, - precision, - scale, - typ, - type_code, - ) in test_values: - await self.cursor.execute(sql) - (fetch_info,) = self.cursor.description - self.assertIsInstance(fetch_info, oracledb.FetchInfo) - self.assertEqual(fetch_info.display_size, display_size) - self.assertEqual(fetch_info.internal_size, internal_size) - self.assertEqual(fetch_info.is_json, is_json) - self.assertEqual(fetch_info.name, name) - self.assertEqual(fetch_info.null_ok, null_ok) - self.assertEqual(fetch_info.precision, precision) - self.assertEqual(fetch_info.scale, scale) - self.assertEqual(fetch_info.type, typ) - self.assertEqual(fetch_info.type_code, type_code) - - async def test_5432(self): - "5432 - test FetchInfo repr() and str()" - await self.cursor.execute("select IntCol from TestObjects") - (fetch_info,) = self.cursor.description - self.assertEqual( - str(fetch_info), - "('INTCOL', , 10, None, 9, 0, False)", - ) - self.assertEqual( - repr(fetch_info), - "('INTCOL', , 10, None, 9, 0, False)", - ) - - async def test_5433(self): - "5433 - test slicing FetchInfo" - await self.cursor.execute("select IntCol from TestObjects") - (fetch_info,) = self.cursor.description - self.assertEqual(fetch_info[1:3], (oracledb.DB_TYPE_NUMBER, 10)) - - async def test_5434(self): - "5434 - test async context manager" - expected_value = test_env.get_main_user().upper() - with self.conn.cursor() as cursor: - await cursor.execute("select user from dual") - self.assertEqual(await cursor.fetchone(), (expected_value,)) - async with self.conn.cursor() as cursor: - await cursor.execute("select user from dual") - self.assertEqual(await cursor.fetchone(), (expected_value,)) - - async def test_5435(self): - "5435 - test metadata requiring multiple packets" - values = [f"Test value 5435 - {i}" for i in range(1, 301)] - columns = ", ".join(f"'{v}'" for v in values) - query = f"select {columns} from dual" - await self.cursor.execute(query) - row = await self.cursor.fetchone() - self.assertEqual(row, tuple(values)) - - async def test_5436(self): - "5436 - test raising no_data_found in PL/SQL" - with self.assertRaisesFullCode("ORA-01403"): - await self.cursor.execute("begin raise no_data_found; end;") - - async def test_5437(self): - "5437 - test executing an empty statement" - with self.assertRaisesFullCode("DPY-2066"): - await self.cursor.execute("") - with self.assertRaisesFullCode("DPY-2066"): - await self.cursor.execute(" ") - - -if __name__ == "__main__": - test_env.run_test_cases() + assert await ref_cursor.fetchall() == expected_value + + +async def test_5427(async_cursor): + "5427 - test using a subclassed string as bind parameter keys" + + class my_str(str): + pass + + await async_cursor.execute("truncate table TestTempTable") + keys = {my_str("str_val"): oracledb.DB_TYPE_VARCHAR} + async_cursor.setinputsizes(**keys) + values = { + my_str("int_val"): 5427, + my_str("str_val"): "5427 - String Value", + } + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + """, + values, + ) + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await async_cursor.fetchall() == [(5427, "5427 - String Value")] + + +async def test_5428(async_cursor): + "5428 - test using a sequence of parameters other than a list or tuple" + + class MySeq(collections.abc.Sequence): + def __init__(self, *data): + self.data = data + + def __len__(self): + return len(self.data) + + def __getitem__(self, index): + return self.data[index] + + values_to_insert = [MySeq(1, "String 1"), MySeq(2, "String 2")] + expected_data = [tuple(value) for value in values_to_insert] + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + """, + values_to_insert, + ) + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert await async_cursor.fetchall() == expected_data + + +async def test_5429(async_cursor): + "5429 - test an output type handler with prefetch > arraysize" + + def type_handler(cursor, metadata): + return cursor.var(metadata.type_code, arraysize=cursor.arraysize) + + async_cursor.arraysize = 2 + async_cursor.prefetchrows = 3 + async_cursor.outputtypehandler = type_handler + await async_cursor.execute("select level from dual connect by level <= 5") + assert await async_cursor.fetchall() == [(1,), (2,), (3,), (4,), (5,)] + + +async def test_5430(async_cursor, test_env): + "5430 - test setinputsizes() but without binding" + async_cursor.setinputsizes(None, int) + sql = "select :1, : 2 from dual" + + with test_env.assert_raises_full_code("ORA-01008", "DPY-4010"): + await async_cursor.execute(sql, []) + + +async def test_5431(async_conn, async_cursor, test_env): + "5431 - test getting FetchInfo attributes" + type_obj = await async_conn.gettype("UDT_OBJECT") + varchar_ratio, _ = test_env.charset_ratios + test_values = [ + ( + "select IntCol from TestObjects", + 10, + None, + False, + "INTCOL", + False, + 9, + 0, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_NUMBER, + ), + ( + "select ObjectCol from TestObjects", + None, + None, + False, + "OBJECTCOL", + True, + None, + None, + type_obj, + oracledb.DB_TYPE_OBJECT, + ), + ( + "select JsonVarchar from TestJsonCols", + 4000, + 4000 * varchar_ratio, + True, + "JSONVARCHAR", + False, + None, + None, + oracledb.DB_TYPE_VARCHAR, + oracledb.DB_TYPE_VARCHAR, + ), + ( + "select FLOATCOL from TestNumbers", + 127, + None, + False, + "FLOATCOL", + False, + 126, + -127, + oracledb.DB_TYPE_NUMBER, + oracledb.DB_TYPE_NUMBER, + ), + ] + for ( + sql, + display_size, + internal_size, + is_json, + name, + null_ok, + precision, + scale, + typ, + type_code, + ) in test_values: + await async_cursor.execute(sql) + (fetch_info,) = async_cursor.description + assert isinstance(fetch_info, oracledb.FetchInfo) + assert fetch_info.display_size == display_size + assert fetch_info.internal_size == internal_size + assert fetch_info.is_json == is_json + assert fetch_info.name == name + assert fetch_info.null_ok == null_ok + assert fetch_info.precision == precision + assert fetch_info.scale == scale + assert fetch_info.type == typ + assert fetch_info.type_code == type_code + + +async def test_5432(async_cursor): + "5432 - test FetchInfo repr() and str()" + await async_cursor.execute("select IntCol from TestObjects") + (fetch_info,) = async_cursor.description + expected = "('INTCOL', , 10, None, 9, 0, False)" + assert str(fetch_info) == expected + assert repr(fetch_info) == expected + + +async def test_5433(async_cursor): + "5433 - test slicing FetchInfo" + await async_cursor.execute("select IntCol from TestObjects") + (fetch_info,) = async_cursor.description + assert fetch_info[1:3] == (oracledb.DB_TYPE_NUMBER, 10) + + +async def test_5434(async_conn, test_env): + "5434 - test async context manager" + expected_value = test_env.main_user.upper() + with async_conn.cursor() as cursor: + await cursor.execute("select user from dual") + assert await cursor.fetchone() == (expected_value,) + async with async_conn.cursor() as cursor: + await cursor.execute("select user from dual") + assert await cursor.fetchone() == (expected_value,) + + +async def test_5435(async_cursor): + "5435 - test metadata requiring multiple packets" + values = [f"Test value 5435 - {i}" for i in range(1, 301)] + columns = ", ".join(f"'{v}'" for v in values) + query = f"select {columns} from dual" + await async_cursor.execute(query) + row = await async_cursor.fetchone() + assert row == tuple(values) + + +async def test_5436(async_cursor, test_env): + "5436 - test raising no_data_found in PL/SQL" + with test_env.assert_raises_full_code("ORA-01403"): + await async_cursor.execute("begin raise no_data_found; end;") + + +async def test_5437(async_cursor, test_env): + "5437 - test executing an empty statement" + with test_env.assert_raises_full_code("DPY-2066"): + await async_cursor.execute("") + with test_env.assert_raises_full_code("DPY-2066"): + await async_cursor.execute(" ") diff --git a/tests/test_5500_pool_async.py b/tests/test_5500_pool_async.py index 4e8aa82a..e60bc68e 100644 --- a/tests/test_5500_pool_async.py +++ b/tests/test_5500_pool_async.py @@ -29,626 +29,651 @@ import asyncio import oracledb -import test_env +import pytest -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - require_connection = False +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass - async def __connect_and_drop(self, pool): - async with pool.acquire() as conn: - cursor = conn.cursor() - await cursor.execute("select count(*) from TestNumbers") - (count,) = await cursor.fetchone() - self.assertEqual(count, 10) - - async def __connect_and_generate_error(self, pool): - async with pool.acquire() as conn: - cursor = conn.cursor() - with self.assertRaisesFullCode("ORA-01476"): - await cursor.execute("select 1 / 0 from dual") - - async def __verify_connection( - self, connection, expected_user, expected_proxy_user=None - ): - cursor = connection.cursor() - await cursor.execute( - """ - select - sys_context('userenv', 'session_user'), - sys_context('userenv', 'proxy_user') - from dual - """ - ) - actual_user, actual_proxy_user = await cursor.fetchone() - self.assertEqual(actual_user, expected_user.upper()) - self.assertEqual( - actual_proxy_user, - expected_proxy_user and expected_proxy_user.upper(), - ) - await connection.close() - - async def __verify_create_arg(self, arg_name, arg_value, sql): - args = {} - args[arg_name] = arg_value - pool = test_env.get_pool_async(**args) - async with pool.acquire() as conn: - cursor = conn.cursor() - await cursor.execute(sql) - (fetched_value,) = await cursor.fetchone() - self.assertEqual(fetched_value, arg_value) - await pool.close() - async def test_5500(self): - "5500 - test getting default pool parameters" - pool = test_env.get_pool_async() - try: - self.assertEqual(pool.busy, 0) - self.assertEqual(pool.dsn, test_env.get_connect_string()) - self.assertEqual(pool.getmode, oracledb.POOL_GETMODE_WAIT) - self.assertTrue(pool.homogeneous) - self.assertEqual(pool.increment, 1) - self.assertEqual(pool.max, 2) - self.assertEqual(pool.max_lifetime_session, 0) - self.assertEqual(pool.min, 1) - self.assertEqual(pool.ping_interval, 60) - self.assertEqual( - pool.stmtcachesize, oracledb.defaults.stmtcachesize - ) - self.assertEqual(pool.thin, True) - self.assertEqual(pool.timeout, 0) - self.assertEqual(pool.username, test_env.get_main_user()) - finally: - await pool.close(force=True) +async def _connect_and_drop(pool): + async with pool.acquire() as conn: + cursor = conn.cursor() + await cursor.execute("select count(*) from TestNumbers") + (count,) = await cursor.fetchone() + assert count == 10 - async def test_5501(self): - "5501 - test setting pool attributes" - pool = test_env.get_pool_async() - test_values = [ - ((11, 2), "ping_interval", 30), - ((11, 2), "stmtcachesize", 100), - ((11, 2), "timeout", 10), - ((12, 2), "getmode", oracledb.POOL_GETMODE_TIMEDWAIT), - ((12, 1), "max_lifetime_session", 3), - ] - try: - for version, attr_name, value in test_values: - setattr(pool, attr_name, value) - self.assertEqual(getattr(pool, attr_name), value) - self.assertRaises( - TypeError, setattr, pool, attr_name, "invalid value" - ) - finally: - await pool.close(force=True) - async def test_5502(self): - "5502 - connection rolls back before released back to the pool" - pool = test_env.get_pool_async() - conn = await pool.acquire() +async def _connect_and_generate_error(test_env, pool): + async with pool.acquire() as conn: cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.execute("insert into TestTempTable (IntCol) values (1)") - cursor.close() - await pool.release(conn) - pool = test_env.get_pool_async() - conn = await pool.acquire() + with test_env.assert_raises_full_code("ORA-01476"): + await cursor.execute("select 1 / 0 from dual") + + +async def _verify_connection( + connection, expected_user, expected_proxy_user=None +): + cursor = connection.cursor() + await cursor.execute( + """ + select + sys_context('userenv', 'session_user'), + sys_context('userenv', 'proxy_user') + from dual + """ + ) + actual_user, actual_proxy_user = await cursor.fetchone() + assert actual_user == expected_user.upper() + if expected_proxy_user is not None: + expected_proxy_user = expected_proxy_user.upper() + assert actual_proxy_user == expected_proxy_user + await connection.close() + + +async def _verify_create_arg(test_env, arg_name, arg_value, sql): + args = {} + args[arg_name] = arg_value + pool = test_env.get_pool_async(**args) + async with pool.acquire() as conn: cursor = conn.cursor() - await cursor.execute("select count(*) from TestTempTable") - (count,) = await cursor.fetchone() - self.assertEqual(count, 0) - await conn.close() - - async def test_5503(self): - "5503 - test session pool with multiple coroutines" - pool = test_env.get_pool_async(min=5, max=20, increment=2) - try: - coroutines = [self.__connect_and_drop(pool) for i in range(20)] - await asyncio.gather(*coroutines) - finally: + await cursor.execute(sql) + (fetched_value,) = await cursor.fetchone() + assert fetched_value == arg_value + await pool.close() + + +async def test_5500(test_env): + "5500 - test getting default pool parameters" + pool = test_env.get_pool_async() + try: + assert pool.busy == 0 + assert pool.dsn == test_env.connect_string + assert pool.getmode == oracledb.POOL_GETMODE_WAIT + assert pool.homogeneous + assert pool.increment == 1 + assert pool.max == 2 + assert pool.max_lifetime_session == 0 + assert pool.min == 1 + assert pool.ping_interval == 60 + assert pool.stmtcachesize == oracledb.defaults.stmtcachesize + assert pool.thin + assert pool.timeout == 0 + assert pool.username == test_env.main_user + finally: + await pool.close(force=True) + + +async def test_5501(test_env): + "5501 - test setting pool attributes" + pool = test_env.get_pool_async() + test_values = [ + ((11, 2), "ping_interval", 30), + ((11, 2), "stmtcachesize", 100), + ((11, 2), "timeout", 10), + ((12, 2), "getmode", oracledb.POOL_GETMODE_TIMEDWAIT), + ((12, 1), "max_lifetime_session", 3), + ] + try: + for version, attr_name, value in test_values: + setattr(pool, attr_name, value) + assert getattr(pool, attr_name) == value + pytest.raises(TypeError, setattr, pool, attr_name, "invalid value") + finally: + await pool.close(force=True) + + +async def test_5502(test_env): + "5502 - connection rolls back before released back to the pool" + pool = test_env.get_pool_async() + conn = await pool.acquire() + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.execute("insert into TestTempTable (IntCol) values (1)") + cursor.close() + await pool.release(conn) + pool = test_env.get_pool_async() + conn = await pool.acquire() + cursor = conn.cursor() + await cursor.execute("select count(*) from TestTempTable") + (count,) = await cursor.fetchone() + assert count == 0 + await conn.close() + + +async def test_5503(test_env): + "5503 - test session pool with multiple coroutines" + pool = test_env.get_pool_async(min=5, max=20, increment=2) + try: + coroutines = [_connect_and_drop(pool) for i in range(20)] + await asyncio.gather(*coroutines) + finally: + await pool.close(force=True) + + +async def test_5504(test_env): + "5504 - test session pool with multiple coroutines (with errors)" + pool = test_env.get_pool_async(min=5, max=20, increment=2) + try: + coroutines = [ + _connect_and_generate_error(test_env, pool) for i in range(20) + ] + await asyncio.gather(*coroutines) + finally: + await pool.close(force=True) + + +async def test_5505(skip_if_drcp, test_env): + "5505 - test session pool with various types of purity" + pool = test_env.get_pool_async(min=1, max=8, increment=1) + + # get connection and set the action + action = "TEST_ACTION" + conn = await pool.acquire() + conn.action = action + cursor = conn.cursor() + await cursor.execute("select 1 from dual") + cursor.close() + await pool.release(conn) + assert pool.opened == 1, "opened (1)" + + # verify that the connection still has the action set on it + conn = await pool.acquire() + cursor = conn.cursor() + await cursor.execute("select sys_context('userenv', 'action') from dual") + (result,) = await cursor.fetchone() + assert result == action + cursor.close() + await pool.release(conn) + assert pool.opened == 1, "opened (2)" + + # get a new connection with new purity (should not have state) + conn = await pool.acquire(purity=oracledb.PURITY_NEW) + cursor = conn.cursor() + await cursor.execute("select sys_context('userenv', 'action') from dual") + (result,) = await cursor.fetchone() + assert result is None + cursor.close() + await pool.release(conn) + + +async def test_5506(test_env): + "5506 - test dropping/closing a connection from the pool" + pool = test_env.get_pool_async(min=1, max=5, increment=2) + try: + conns1 = [await pool.acquire() for _ in range(2)] + conns2 = [await oracledb.connect_async(pool=pool) for _ in range(3)] + assert pool.busy == 5 + assert pool.opened == 5 + + for conn in conns1: + await pool.drop(conn) + assert pool.busy == 3 + assert pool.opened == 3 + + for conn in conns2: + await conn.close() + assert pool.busy == 0 + assert pool.opened == 3 + finally: + await pool.close(force=True) + + +async def test_5507(test_env): + "5507 - test to ensure pure connections are being created correctly" + pool = test_env.get_pool_async(min=1, max=2, increment=1) + try: + conn1 = await pool.acquire() + conn2 = await pool.acquire() + assert pool.opened == 2, "opened (1)" + await pool.release(conn1) + await pool.release(conn2) + conn3 = await pool.acquire(purity=oracledb.PURITY_NEW) + assert pool.opened == 2, "opened (2)" + await pool.release(conn3) + finally: + await pool.close(force=True) + + +async def test_5508(test_env): + "5508 - test closing a pool normally with no connections checked out" + pool = test_env.get_pool_async(min=1, max=8, increment=1) + await pool.close() + + +async def test_5509(test_env): + "5509 - test closing a pool normally with connections checked out" + pool = test_env.get_pool_async(min=1, max=8, increment=1) + closed = False + try: + async with pool.acquire(): + with test_env.assert_raises_full_code("DPY-1005"): + await pool.close() + closed = True + finally: + if not closed: await pool.close(force=True) - async def test_5504(self): - "5504 - test session pool with multiple coroutines (with errors)" - pool = test_env.get_pool_async(min=5, max=20, increment=2) - try: - coroutines = [ - self.__connect_and_generate_error(pool) for i in range(20) - ] - await asyncio.gather(*coroutines) - finally: - await pool.close(force=True) - @test_env.skip_if_drcp() - async def test_5505(self): - "5505 - test session pool with various types of purity" - pool = test_env.get_pool_async(min=1, max=8, increment=1) +async def test_5510(test_env): + "5510 - test closing a pool forcibly" + pool = test_env.get_pool_async(min=1, max=8, increment=1) + async with pool.acquire(): + await pool.close(force=True) - # get connection and set the action - action = "TEST_ACTION" - conn = await pool.acquire() - conn.action = action - cursor = conn.cursor() - await cursor.execute("select 1 from dual") - cursor.close() - await pool.release(conn) - self.assertEqual(pool.opened, 1, "opened (1)") - # verify that the connection still has the action set on it - conn = await pool.acquire() - cursor = conn.cursor() - await cursor.execute( - "select sys_context('userenv', 'action') from dual" - ) - (result,) = await cursor.fetchone() - self.assertEqual(result, action) - cursor.close() - await pool.release(conn) - self.assertEqual(pool.opened, 1, "opened (2)") - - # get a new connection with new purity (should not have state) - conn = await pool.acquire(purity=oracledb.PURITY_NEW) - cursor = conn.cursor() - await cursor.execute( - "select sys_context('userenv', 'action') from dual" - ) - (result,) = await cursor.fetchone() - self.assertIsNone(result) - cursor.close() - await pool.release(conn) - - async def test_5506(self): - "5506 - test dropping/closing a connection from the pool" - pool = test_env.get_pool_async(min=1, max=5, increment=2) - try: - conns1 = [await pool.acquire() for _ in range(2)] - conns2 = [ - await oracledb.connect_async(pool=pool) for _ in range(3) - ] - self.assertEqual(pool.busy, 5) - self.assertEqual(pool.opened, 5) - - for conn in conns1: - await pool.drop(conn) - self.assertEqual(pool.busy, 3) - self.assertEqual(pool.opened, 3) - - for conn in conns2: - await conn.close() - self.assertEqual(pool.busy, 0) - self.assertEqual(pool.opened, 3) - finally: - await pool.close(force=True) +async def test_5511(test_env): + "5511 - using the pool after it is closed raises an exception" + pool = test_env.get_pool_async(min=1, max=8, increment=1) + await pool.close() + with test_env.assert_raises_full_code("DPY-1002"): + await pool.acquire() - async def test_5507(self): - "5507 - test to ensure pure connections are being created correctly" - pool = test_env.get_pool_async(min=1, max=2, increment=1) - try: - conn1 = await pool.acquire() - conn2 = await pool.acquire() - self.assertEqual(pool.opened, 2, "opened (1)") - await pool.release(conn1) - await pool.release(conn2) - conn3 = await pool.acquire(purity=oracledb.PURITY_NEW) - self.assertEqual(pool.opened, 2, "opened (2)") - await pool.release(conn3) - finally: - await pool.close(force=True) - async def test_5508(self): - "5508 - test closing a pool normally with no connections checked out" - pool = test_env.get_pool_async(min=1, max=8, increment=1) - await pool.close() +async def test_5512(test_env): + "5512 - using the pool beyond max limit raises an error" + pool = test_env.get_pool_async(min=1, max=2, increment=1) + try: + async with pool.acquire(), pool.acquire(): + pool.getmode = oracledb.POOL_GETMODE_NOWAIT + with test_env.assert_raises_full_code("DPY-4005"): + await pool.acquire() + finally: + await pool.close(force=True) - async def test_5509(self): - "5509 - test closing a pool normally with connections checked out" - pool = test_env.get_pool_async(min=1, max=8, increment=1) - closed = False - try: - async with pool.acquire(): - with self.assertRaisesFullCode("DPY-1005"): - await pool.close() - closed = True - finally: - if not closed: - await pool.close(force=True) - - async def test_5510(self): - "5510 - test closing a pool forcibly" - pool = test_env.get_pool_async(min=1, max=8, increment=1) - async with pool.acquire(): - await pool.close(force=True) - async def test_5511(self): - "5511 - using the pool after it is closed raises an exception" - pool = test_env.get_pool_async(min=1, max=8, increment=1) - await pool.close() - with self.assertRaisesFullCode("DPY-1002"): - await pool.acquire() - - async def test_5512(self): - "5512 - using the pool beyond max limit raises an error" - pool = test_env.get_pool_async(min=1, max=2, increment=1) - try: - async with pool.acquire(), pool.acquire(): - pool.getmode = oracledb.POOL_GETMODE_NOWAIT - with self.assertRaisesFullCode("DPY-4005"): - await pool.acquire() - finally: - await pool.close(force=True) +async def test_5513(test_env): + "5513 - callable session callback is executed for new connections" - async def test_5513(self): - "5513 - callable session callback is executed for new connections" + class Counter: + num_calls = 0 - class Counter: - num_calls = 0 + @classmethod + async def session_callback(cls, conn, requested_tag): + cls.num_calls += 1 - @classmethod - async def session_callback(cls, conn, requested_tag): - cls.num_calls += 1 + pool = test_env.get_pool_async( + min=1, + max=2, + increment=1, + session_callback=Counter.session_callback, + ) + try: + async with pool.acquire(), pool.acquire(): + pass + async with pool.acquire(), pool.acquire(): + pass + assert Counter.num_calls == 2 + finally: + await pool.close(force=True) - pool = test_env.get_pool_async( - min=1, - max=2, - increment=1, - session_callback=Counter.session_callback, - ) - try: - async with pool.acquire(), pool.acquire(): - pass - async with pool.acquire(), pool.acquire(): - pass - self.assertEqual(Counter.num_calls, 2) - finally: - await pool.close(force=True) - @test_env.skip_if_drcp() - async def test_5514(self): - "5514 - drop the pooled connection on receiving dead connection error" - admin_conn = await test_env.get_admin_connection_async() - pool = test_env.get_pool_async(min=2, max=2, increment=2) - try: - - # acquire connections from the pool and kill all the sessions - with admin_conn.cursor() as admin_cursor: - for conn in [await pool.acquire() for i in range(2)]: - sid, serial = await self.get_sid_serial(conn) - sql = f"alter system kill session '{sid},{serial}'" - await admin_cursor.execute(sql) - await conn.close() - self.assertEqual(pool.opened, 2) - - # when try to re-use the killed sessions error will be raised; - # release all such connections - for conn in [await pool.acquire() for i in range(2)]: - with conn.cursor() as cursor: - with self.assertRaisesFullCode("DPY-4011"): - await cursor.execute("select user from dual") - await conn.close() +async def test_5514(skip_if_drcp, test_env): + "5514 - drop the pooled connection on receiving dead connection error" + admin_conn = await test_env.get_admin_connection_async() + pool = test_env.get_pool_async(min=2, max=2, increment=2) + try: - # if a free connection is available, it can be used; otherwise a - # new connection will be created + # acquire connections from the pool and kill all the sessions + with admin_conn.cursor() as admin_cursor: for conn in [await pool.acquire() for i in range(2)]: - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - (user,) = await cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) + sid, serial = conn.session_id, conn.serial_num + sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(sql) await conn.close() - self.assertEqual(pool.opened, 2) - finally: - await pool.close(force=True) + assert pool.opened == 2 - async def test_5515(self): - "5515 - acquire a connection from an empty pool (min=0)" - pool = test_env.get_pool_async(min=0, max=2, increment=2) - try: - async with pool.acquire() as conn: - with conn.cursor() as cursor: + # when try to re-use the killed sessions error will be raised; + # release all such connections + for conn in [await pool.acquire() for i in range(2)]: + with conn.cursor() as cursor: + with test_env.assert_raises_full_code("DPY-4011"): await cursor.execute("select user from dual") - (result,) = await cursor.fetchone() - self.assertEqual(result, test_env.get_main_user().upper()) - finally: - await pool.close(force=True) - - async def test_5516(self): - "5516 - get different object types from different connections" - pool = test_env.get_pool_async(min=1, max=2, increment=1) - try: - async with pool.acquire() as conn: - typ = await conn.gettype("UDT_SUBOBJECT") - self.assertEqual(typ.name, "UDT_SUBOBJECT") - async with pool.acquire() as conn: - typ = await conn.gettype("UDT_OBJECTARRAY") - self.assertEqual(typ.name, "UDT_OBJECTARRAY") - finally: - await pool.close(force=True) + await conn.close() - async def test_5517(self): - "5517 - test creating a pool using a proxy user" - user_str = f"{test_env.get_main_user()}[{test_env.get_proxy_user()}]" - pool = test_env.get_pool_async(user=user_str) - try: - await self.__verify_connection( - await pool.acquire(), - test_env.get_proxy_user(), - test_env.get_main_user(), - ) - finally: - await pool.close(force=True) - - @test_env.skip_if_drcp() - async def test_5518(self): - "5518 - test acquiring conn from pool in LIFO order" - pool = test_env.get_pool_async(min=5, max=10, increment=1) - try: - sql = "select sys_context('userenv', 'sid') from dual" - conns = [await pool.acquire() for i in range(3)] - sids = [] - for conn in conns: - with conn.cursor() as cursor: - await cursor.execute(sql) - (sid,) = await cursor.fetchone() - sids.append(sid) - await conns[1].close() - await conns[2].close() - await conns[0].close() - - async with pool.acquire() as conn: - with conn.cursor() as cursor: - await cursor.execute(sql) - (sid,) = await cursor.fetchone() - self.assertEqual(sid, sids[0], "not LIFO") - finally: - await pool.close(force=True) + # if a free connection is available, it can be used; otherwise a + # new connection will be created + for conn in [await pool.acquire() for i in range(2)]: + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + assert user == test_env.main_user.upper() + await conn.close() + assert pool.opened == 2 + finally: + await pool.close(force=True) - async def test_5519(self): - "5519 - verify that dynamic pool cannot have an increment of zero" - pool = test_env.get_pool_async(min=1, max=3, increment=0) - try: - self.assertEqual(pool.increment, 1) - async with pool.acquire(), pool.acquire(): - pass - finally: - await pool.close(force=True) - async def test_5520(self): - "5520 - verify that static pool can have an increment of zero" - pool = test_env.get_pool_async(min=1, max=1, increment=0) - try: - self.assertEqual(pool.increment, 0) - async with pool.acquire(): - pass - finally: - await pool.close(force=True) +async def test_5515(test_env): + "5515 - acquire a connection from an empty pool (min=0)" + pool = test_env.get_pool_async(min=0, max=2, increment=2) + try: + async with pool.acquire() as conn: + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + (result,) = await cursor.fetchone() + assert result == test_env.main_user.upper() + finally: + await pool.close(force=True) - async def test_5521(self): - "5521 - verify that connection with different cclass is reused" - cclass = "cclass2431" - pool = test_env.get_pool_async(min=1, max=1) - # ignore the first acquire which, depending on the speed with which the - # minimum connections are created, may create a connection that is - # discarded; instead, use the second acquire which should remain in the - # pool - try: - async with pool.acquire(cclass=cclass) as conn: - pass - async with pool.acquire(cclass=cclass) as conn: - sid_serial = await self.get_sid_serial(conn) - async with pool.acquire(cclass=cclass) as conn: - next_sid_serial = await self.get_sid_serial(conn) - self.assertEqual(next_sid_serial, sid_serial) - self.assertEqual(pool.opened, 1) - finally: - await pool.close(force=True) - async def test_5522(self): - "5522 - test creating a pool invalid params" - with self.assertRaisesFullCode("DPY-2027"): - oracledb.create_pool_async(params="bad params") - - async def test_5523(self): - "5523 - test releasing and dropping an invalid connection" - pool = test_env.get_pool_async() - try: - with self.assertRaises(TypeError): - await pool.release("invalid connection") - with self.assertRaises(TypeError): - await pool.drop("invalid connection") - finally: - await pool.close(force=True) +async def test_5516(test_env): + "5516 - get different object types from different connections" + pool = test_env.get_pool_async(min=1, max=2, increment=1) + try: + async with pool.acquire() as conn: + typ = await conn.gettype("UDT_SUBOBJECT") + assert typ.name == "UDT_SUBOBJECT" + async with pool.acquire() as conn: + typ = await conn.gettype("UDT_OBJECTARRAY") + assert typ.name == "UDT_OBJECTARRAY" + finally: + await pool.close(force=True) + + +async def test_5517(test_env): + "5517 - test creating a pool using a proxy user" + user_str = f"{test_env.main_user}[{test_env.proxy_user}]" + pool = test_env.get_pool_async(user=user_str) + try: + await _verify_connection( + await pool.acquire(), + test_env.proxy_user, + test_env.main_user, + ) + finally: + await pool.close(force=True) + + +async def test_5518(skip_if_drcp, test_env): + "5518 - test acquiring conn from pool in LIFO order" + pool = test_env.get_pool_async(min=5, max=10, increment=1) + try: + sql = "select sys_context('userenv', 'sid') from dual" + conns = [await pool.acquire() for i in range(3)] + sids = [] + for conn in conns: + with conn.cursor() as cursor: + await cursor.execute(sql) + (sid,) = await cursor.fetchone() + sids.append(sid) + await conns[1].close() + await conns[2].close() + await conns[0].close() - async def test_5524(self): - "5524 - test creating a pool with invalid pool_class" - with self.assertRaisesFullCode("DPY-2026"): - oracledb.create_pool_async(pool_class=int) + async with pool.acquire() as conn: + with conn.cursor() as cursor: + await cursor.execute(sql) + (sid,) = await cursor.fetchone() + assert sid == sids[0], "not LIFO" + finally: + await pool.close(force=True) + + +async def test_5519(test_env): + "5519 - verify that dynamic pool cannot have an increment of zero" + pool = test_env.get_pool_async(min=1, max=3, increment=0) + try: + assert pool.increment == 1 + async with pool.acquire(), pool.acquire(): + pass + finally: + await pool.close(force=True) - async def test_5525(self): - "5525 - test creating a pool with a subclassed connection type" - class MyConnection(oracledb.AsyncConnection): +async def test_5520(test_env): + "5520 - verify that static pool can have an increment of zero" + pool = test_env.get_pool_async(min=1, max=1, increment=0) + try: + assert pool.increment == 0 + async with pool.acquire(): pass + finally: + await pool.close(force=True) + + +async def test_5521(test_env): + "5521 - verify that connection with different cclass is reused" + cclass = "cclass2431" + pool = test_env.get_pool_async(min=1, max=1) + # ignore the first acquire which, depending on the speed with which the + # minimum connections are created, may create a connection that is + # discarded; instead, use the second acquire which should remain in the + # pool + try: + async with pool.acquire(cclass=cclass) as conn: + pass + async with pool.acquire(cclass=cclass) as conn: + sid_serial = (conn.session_id, conn.serial_num) + async with pool.acquire(cclass=cclass) as conn: + next_sid_serial = (conn.session_id, conn.serial_num) + assert next_sid_serial == sid_serial + assert pool.opened == 1 + finally: + await pool.close(force=True) + - pool = test_env.get_pool_async(connectiontype=MyConnection) - async with pool.acquire() as conn: - self.assertIsInstance(conn, MyConnection) - - async def test_5526(self): - "5526 - test creating a pool with a subclassed pool type" +async def test_5522(test_env): + "5522 - test creating a pool invalid params" + with test_env.assert_raises_full_code("DPY-2027"): + oracledb.create_pool_async(params="bad params") - class MyPool(oracledb.AsyncConnectionPool): + +async def test_5523(test_env): + "5523 - test releasing and dropping an invalid connection" + pool = test_env.get_pool_async() + try: + with pytest.raises(TypeError): + await pool.release("invalid connection") + with pytest.raises(TypeError): + await pool.drop("invalid connection") + finally: + await pool.close(force=True) + + +async def test_5524(test_env): + "5524 - test creating a pool with invalid pool_class" + with test_env.assert_raises_full_code("DPY-2026"): + oracledb.create_pool_async(pool_class=int) + + +async def test_5525(test_env): + "5525 - test creating a pool with a subclassed connection type" + + class MyConnection(oracledb.AsyncConnection): + pass + + pool = test_env.get_pool_async(connectiontype=MyConnection) + async with pool.acquire() as conn: + assert isinstance(conn, MyConnection) + + +async def test_5526(test_env): + "5526 - test creating a pool with a subclassed pool type" + + class MyPool(oracledb.AsyncConnectionPool): + pass + + pool = test_env.get_pool_async(pool_class=MyPool) + try: + assert isinstance(pool, MyPool) + finally: + await pool.close(force=True) + + +async def test_5527(test_env): + "5527 - test connectiontype with an invalid connection class" + with test_env.assert_raises_full_code("DPY-2023"): + test_env.get_pool_async(connectiontype=oracledb.Connection) + with test_env.assert_raises_full_code("DPY-2023"): + test_env.get_pool_async(connectiontype=int) + + +async def test_5528(skip_unless_pool_timed_wait_supported, test_env): + "5528 - ensure that timed wait times out with appropriate exception" + pool = test_env.get_pool_async( + getmode=oracledb.POOL_GETMODE_TIMEDWAIT, min=0, wait_timeout=1 + ) + with test_env.assert_raises_full_code("DPY-4005"): + await pool.acquire() + + +async def test_5529(test_env): + "5529 - ensure call timeout is reset on connections returned by pool" + pool = test_env.get_pool_async(ping_timeout=1000, ping_interval=0) + async with pool.acquire() as conn: + assert conn.call_timeout == 0 + async with pool.acquire() as conn: + assert conn.call_timeout == 0 + + +async def test_5530(test_env): + "5530 - test passing program when creating a pool" + sql = ( + "select program from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_create_arg(test_env, "program", "newprogram", sql) + + +async def test_5531(test_env): + "5531 - test passing machine when creating a pool" + sql = ( + "select machine from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_create_arg(test_env, "machine", "newmachine", sql) + + +async def test_5532(test_env): + "5532 - test passing terminal when creating a pool" + sql = ( + "select terminal from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_create_arg(test_env, "terminal", "newterminal", sql) + + +async def test_5533(test_env): + "5533 - test passing osuser when creating a pool" + sql = ( + "select osuser from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_create_arg(test_env, "osuser", "newosuser", sql) + + +async def test_5534(test_env): + "5534 - test passing driver_name when creating a pool" + sql = ( + "select distinct client_driver from v$session_connect_info " + "where sid = sys_context('userenv', 'sid')" + ) + await _verify_create_arg(test_env, "driver_name", "newdriver", sql) + + +async def test_5535(test_env): + "5535 - test register_parameter with pooled connection" + sdu = 4096 + params = test_env.get_pool_params() + protocol = "proto-test" + orig_connect_string = test_env.connect_string + connect_string = f"{protocol}://{orig_connect_string}" + + def hook(passed_protocol, passed_protocol_arg, passed_params): + assert passed_protocol == protocol + assert passed_protocol_arg == orig_connect_string + passed_params.parse_connect_string(passed_protocol_arg) + passed_params.set(sdu=sdu) + + try: + oracledb.register_protocol(protocol, hook) + pool = oracledb.create_pool_async(dsn=connect_string, params=params) + assert params.sdu == sdu + async with pool.acquire(): pass + await pool.close() + finally: + oracledb.register_protocol(protocol, None) - pool = test_env.get_pool_async(pool_class=MyPool) - try: - self.assertIsInstance(pool, MyPool) - finally: - await pool.close(force=True) - async def test_5527(self): - "5527 - test connectiontype with an invalid connection class" - with self.assertRaisesFullCode("DPY-2023"): - test_env.get_pool_async(connectiontype=oracledb.Connection) - with self.assertRaisesFullCode("DPY-2023"): - test_env.get_pool_async(connectiontype=int) - - @test_env.skip_unless_pool_timed_wait_supported() - async def test_5528(self): - "5528 - ensure that timed wait times out with appropriate exception" - pool = test_env.get_pool_async( - getmode=oracledb.POOL_GETMODE_TIMEDWAIT, min=0, wait_timeout=1 - ) - with self.assertRaisesFullCode("DPY-4005"): - await pool.acquire() +async def test_5536(test_env): + "5536 - test create_pool() with edition" + edition = test_env.edition_name + pool = test_env.get_pool_async(edition=edition) + async with pool.acquire() as conn: + assert conn.edition == edition + await pool.close() - async def test_5529(self): - "5529 - ensure call timeout is reset on connections returned by pool" - pool = test_env.get_pool_async(ping_timeout=1000, ping_interval=0) - async with pool.acquire() as conn: - self.assertEqual(conn.call_timeout, 0) - async with pool.acquire() as conn: - self.assertEqual(conn.call_timeout, 0) - async def test_5530(self): - "5530 - test passing program when creating a pool" - sql = ( - "select program from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - await self.__verify_create_arg("program", "newprogram", sql) +async def test_5537(test_env): + "5537 - test create_pool() and get_pool() with alias" + alias = "pool_alias_5537" + pool = test_env.get_pool_async(pool_alias=alias) + assert pool is oracledb.get_pool(alias) + await pool.close() - async def test_5531(self): - "5531 - test passing machine when creating a pool" - sql = ( - "select machine from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - await self.__verify_create_arg("machine", "newmachine", sql) - async def test_5532(self): - "5532 - test passing terminal when creating a pool" - sql = ( - "select terminal from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - await self.__verify_create_arg("terminal", "newterminal", sql) +async def test_5538(test_env): + "5538 - test create_pool() twice with the same alias" + alias = "pool_alias_5538" + pool = test_env.get_pool_async(pool_alias=alias) + with test_env.assert_raises_full_code("DPY-2055"): + test_env.get_pool_async(pool_alias=alias) + await pool.close() + assert oracledb.get_pool(alias) is None + + +async def test_5539(test_env): + "5539 - test acquire() with pool alias and stmtcachesize" + alias = "pool_5539" + stmtcachesize = 35 + test_env.get_pool_async(pool_alias=alias, stmtcachesize=stmtcachesize) + async with oracledb.connect_async(pool_alias=alias) as conn: + assert conn.stmtcachesize == stmtcachesize + await oracledb.get_pool(alias).close() + + +async def test_5540(test_env): + "5540 - test pool alias is case sensitive" + alias = "pool_5540" + test_env.get_pool_async(pool_alias=alias) + assert oracledb.get_pool(alias.upper()) is None + with test_env.assert_raises_full_code("DPY-2054"): + await test_env.get_connection_async(pool_alias=alias.upper()) + await oracledb.get_pool(alias).close() + + +async def test_5541(test_env): + "5541 - test pool alias with invalid types" + aliases = [5, set(), dict(), bytearray(1)] + for alias in aliases: + with pytest.raises(TypeError): + test_env.get_pool_async(pool_alias=alias) - async def test_5533(self): - "5533 - test passing osuser when creating a pool" - sql = ( - "select osuser from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - await self.__verify_create_arg("osuser", "newosuser", sql) - async def test_5534(self): - "5534 - test passing driver_name when creating a pool" - sql = ( - "select distinct client_driver from v$session_connect_info " - "where sid = sys_context('userenv', 'sid')" - ) - await self.__verify_create_arg("driver_name", "newdriver", sql) - - async def test_5535(self): - "5535 - test register_parameter with pooled connection" - sdu = 4096 - params = test_env.get_pool_params() - protocol = "proto-test" - orig_connect_string = test_env.get_connect_string() - connect_string = f"{protocol}://{orig_connect_string}" - - def hook(passed_protocol, passed_protocol_arg, passed_params): - self.assertEqual(passed_protocol, protocol) - self.assertEqual(passed_protocol_arg, orig_connect_string) - passed_params.parse_connect_string(passed_protocol_arg) - passed_params.set(sdu=sdu) - - try: - oracledb.register_protocol(protocol, hook) - pool = oracledb.create_pool_async( - dsn=connect_string, params=params - ) - self.assertEqual(params.sdu, sdu) - async with pool.acquire(): - pass - await pool.close() - finally: - oracledb.register_protocol(protocol, None) - - async def test_5536(self): - "5536 - test create_pool() with edition" - edition = test_env.get_edition_name() - pool = test_env.get_pool_async(edition=edition) - async with pool.acquire() as conn: - self.assertEqual(conn.edition, edition) - await pool.close() +async def test_5542(test_env): + "5542 - test creation of pool with min > max" + with test_env.assert_raises_full_code("DPY-2064"): + test_env.get_pool_async(min=3, max=2) - async def test_5537(self): - "5537 - test create_pool() and get_pool() with alias" - alias = "pool_alias_5537" - pool = test_env.get_pool_async(pool_alias=alias) - self.assertIs(pool, oracledb.get_pool(alias)) - await pool.close() - async def test_5538(self): - "5538 - test create_pool() twice with the same alias" - alias = "pool_alias_5538" - pool = test_env.get_pool_async(pool_alias=alias) - with self.assertRaisesFullCode("DPY-2055"): - test_env.get_pool_async(pool_alias=alias) - await pool.close() - self.assertIsNone(oracledb.get_pool(alias)) - - async def test_5539(self): - "5539 - test acquire() with pool alias and stmtcachesize" - alias = "pool_5539" - stmtcachesize = 35 - test_env.get_pool_async(pool_alias=alias, stmtcachesize=stmtcachesize) - async with oracledb.connect_async(pool_alias=alias) as conn: - self.assertEqual(conn.stmtcachesize, stmtcachesize) - await oracledb.get_pool(alias).close() - - async def test_5540(self): - "5540 - test pool alias is case sensitive" - alias = "pool_5540" - test_env.get_pool_async(pool_alias=alias) - self.assertIsNone(oracledb.get_pool(alias.upper())) - with self.assertRaisesFullCode("DPY-2054"): - await test_env.get_connection_async(pool_alias=alias.upper()) - await oracledb.get_pool(alias).close() - - async def test_5541(self): - "5541 - test pool alias with invalid types" - aliases = [5, set(), dict(), bytearray(1)] - for alias in aliases: - with self.subTest(alias=alias): - with self.assertRaises(TypeError): - test_env.get_pool_async(pool_alias=alias) - - async def test_5542(self): - "5542 - test creation of pool with min > max" - with self.assertRaisesFullCode("DPY-2064"): - test_env.get_pool_async(min=3, max=2) - - @test_env.skip_if_drcp() - async def test_5543(self): - "5543 - ping pooled connection on receiving dead connection error" - admin_conn = await test_env.get_admin_connection_async() - pool = test_env.get_pool_async(min=1, max=1, ping_interval=0) - - # kill connection in pool - with admin_conn.cursor() as admin_cursor: - async with pool.acquire() as conn: - sid, serial = await self.get_sid_serial(conn) - sql = f"alter system kill session '{sid},{serial}'" - await admin_cursor.execute(sql) +async def test_5543(skip_if_drcp, test_env): + "5543 - ping pooled connection on receiving dead connection error" + admin_conn = await test_env.get_admin_connection_async() + pool = test_env.get_pool_async(min=1, max=1, ping_interval=0) - # acquire connection which should succeed without failure + # kill connection in pool + with admin_conn.cursor() as admin_cursor: async with pool.acquire() as conn: - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - (user,) = await cursor.fetchone() - self.assertEqual(user, test_env.get_main_user().upper()) - - -if __name__ == "__main__": - test_env.run_test_cases() + sid, serial = (conn.session_id, conn.serial_num) + sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(sql) + + # acquire connection which should succeed without failure + async with pool.acquire() as conn: + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + (user,) = await cursor.fetchone() + assert user == test_env.main_user.upper() diff --git a/tests/test_5600_dbobject_async.py b/tests/test_5600_dbobject_async.py index 0222abe3..1745eb1f 100644 --- a/tests/test_5600_dbobject_async.py +++ b/tests/test_5600_dbobject_async.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2023, 2024, Oracle and/or its affiliates. +# Copyright (c) 2023, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -30,620 +30,640 @@ import decimal import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - maxDiff = None - - async def __test_data( - self, expected_int_value, expected_obj_value, expected_array_value - ): - int_value, object_value, array_value = await self.cursor.fetchone() - if object_value is not None: - object_value = await self.get_db_object_as_plain_object( - object_value - ) - if array_value is not None: - array_value = array_value.aslist() - self.assertEqual(int_value, expected_int_value) - self.assertEqual(object_value, expected_obj_value) - self.assertEqual(array_value, expected_array_value) - - async def test_5600(self): - "5600 - test binding an object (IN)" - type_obj = await self.conn.gettype("UDT_OBJECT") - obj = type_obj.newobject() - obj.NUMBERVALUE = 13 - obj.STRINGVALUE = "Test String" - result = await self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - exp = "udt_Object(13, 'Test String', null, null, null, null, null)" - self.assertEqual(result, exp) - obj.NUMBERVALUE = None - obj.STRINGVALUE = "Test With Dates" - obj.DATEVALUE = datetime.datetime(2016, 2, 10) - obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 10, 14, 13, 50) - result = await self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - expected_value = ( - "udt_Object(null, 'Test With Dates', null, " - "to_date('2016-02-10', 'YYYY-MM-DD'), " - "to_timestamp('2016-02-10 14:13:50', " - "'YYYY-MM-DD HH24:MI:SS'), " - "null, null)" - ) - self.assertEqual(result, expected_value) - obj.DATEVALUE = None - obj.TIMESTAMPVALUE = None - sub_type_obj = await self.conn.gettype("UDT_SUBOBJECT") - sub_obj = sub_type_obj.newobject() - sub_obj.SUBNUMBERVALUE = decimal.Decimal("18.25") - sub_obj.SUBSTRINGVALUE = "Sub String" - obj.SUBOBJECTVALUE = sub_obj - result = await self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, [obj] - ) - expected_value = ( - "udt_Object(null, 'Test With Dates', null, null, " - "null, udt_SubObject(18.25, 'Sub String'), null)" - ) - self.assertEqual(result, expected_value) - - async def test_5601(self): - "5601 - test copying an object" - type_obj = await self.conn.gettype("UDT_OBJECT") - obj = type_obj() - obj.NUMBERVALUE = 5124 - obj.STRINGVALUE = "A test string" - obj.DATEVALUE = datetime.datetime(2016, 2, 24) - obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 24, 13, 39, 10) - copied_obj = obj.copy() - self.assertEqual(obj.NUMBERVALUE, copied_obj.NUMBERVALUE) - self.assertEqual(obj.STRINGVALUE, copied_obj.STRINGVALUE) - self.assertEqual(obj.DATEVALUE, copied_obj.DATEVALUE) - self.assertEqual(obj.TIMESTAMPVALUE, copied_obj.TIMESTAMPVALUE) - - async def test_5602(self): - "5602 - test fetching objects" - await self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - order by IntCol - """ - ) - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "OBJECTCOL", - oracledb.DB_TYPE_OBJECT, - None, - None, - None, - None, - True, - ), - ( - "ARRAYCOL", - oracledb.DB_TYPE_OBJECT, - None, - None, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - expected_value = ( - 1, - "First row", - "First ", - "N First Row", - "N First ", - b"Raw Data 1", - 2, - 5, - 12.125, - 0.5, - 12.5, - 25.25, - 50.125, - datetime.datetime(2007, 3, 6, 0, 0, 0), - datetime.datetime(2008, 9, 12, 16, 40), - datetime.datetime(2009, 10, 13, 17, 50), - oracledb.Timestamp(2010, 11, 14, 18, 55), - "Short CLOB value", - "Short NCLOB Value", - b"Short BLOB value", - (11, "Sub object 1"), - [(5, "first element"), (6, "second element")], - ) - await self.__test_data(1, expected_value, [5, 10, None, 20]) - await self.__test_data(2, None, [3, None, 9, 12, 15]) - expected_value = ( - 3, - "Third row", - "Third ", - "N Third Row", - "N Third ", - b"Raw Data 3", - 4, - 10, - 6.5, - 0.75, - 43.25, - 86.5, - 192.125, - datetime.datetime(2007, 6, 21, 0, 0, 0), - datetime.datetime(2007, 12, 13, 7, 30, 45), - datetime.datetime(2017, 6, 21, 23, 18, 45), - oracledb.Timestamp(2017, 7, 21, 8, 27, 13), - "Another short CLOB value", - "Another short NCLOB Value", - b"Yet another short BLOB value", - (13, "Sub object 3"), - [ - (10, "element #1"), - (20, "element #2"), - (30, "element #3"), - (40, "element #4"), - ], - ) - await self.__test_data(3, expected_value, None) - - async def test_5603(self): - "5603 - test getting object type" - type_obj = await self.conn.gettype("UDT_OBJECT") - self.assertFalse(type_obj.iscollection) - self.assertEqual(type_obj.schema, self.conn.username.upper()) - self.assertEqual(type_obj.name, "UDT_OBJECT") - sub_object_value_type = await self.conn.gettype("UDT_SUBOBJECT") - sub_object_array_type = await self.conn.gettype("UDT_OBJECTARRAY") - expected_metadata = [ - ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), - ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), - ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), - ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), - ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), - ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), - ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), - ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ( - "BINARYFLOATVALUE", - oracledb.DB_TYPE_BINARY_FLOAT, - None, - None, - None, - ), - ( - "BINARYDOUBLEVALUE", - oracledb.DB_TYPE_BINARY_DOUBLE, - None, - None, - None, - ), - ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), - ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), - ( - "TIMESTAMPTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_TZ, - None, - None, - None, - ), - ( - "TIMESTAMPLTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - None, - None, - None, - ), - ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), - ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), - ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), - ("SUBOBJECTVALUE", sub_object_value_type, None, None, None), - ("SUBOBJECTARRAY", sub_object_array_type, None, None, None), - ] - actual_metadata = [ - (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) - for attr in type_obj.attributes - ] - self.assertEqual(actual_metadata, expected_metadata) - self.assertEqual(sub_object_array_type.iscollection, True) - self.assertEqual(sub_object_array_type.attributes, []) - - async def test_5604(self): - "5604 - test object type data" - await self.cursor.execute( - """ - select ObjectCol - from TestObjects - where ObjectCol is not null - and rownum <= 1 - """ - ) - (obj,) = await self.cursor.fetchone() - self.assertEqual(obj.type.schema, self.conn.username.upper()) - self.assertEqual(obj.type.name, "UDT_OBJECT") - self.assertEqual(obj.type.attributes[0].name, "NUMBERVALUE") - - async def test_5605(self): - "5605 - test inserting and then querying object with all data types" - await self.cursor.execute("delete from TestClobs") - await self.cursor.execute("delete from TestNClobs") - await self.cursor.execute("delete from TestBlobs") - await self.cursor.execute("delete from TestObjects where IntCol > 3") - await self.cursor.execute( - """ - insert into TestClobs (IntCol, ClobCol) - values (1, 'A short CLOB') - """ - ) - await self.cursor.execute( - """ - insert into TestNClobs (IntCol, NClobCol) - values (1, 'A short NCLOB') - """ - ) - await self.cursor.execute( - """ - insert into TestBlobs (IntCol, BlobCol) - values (1, utl_raw.cast_to_raw('A short BLOB')) - """ - ) - await self.conn.commit() - await self.cursor.execute("select CLOBCol from TestClobs") - (clob,) = await self.cursor.fetchone() - await self.cursor.execute("select NCLOBCol from TestNClobs") - (nclob,) = await self.cursor.fetchone() - await self.cursor.execute("select BLOBCol from TestBlobs") - (blob,) = await self.cursor.fetchone() - type_obj = await self.conn.gettype("UDT_OBJECT") - obj = type_obj.newobject() - obj.NUMBERVALUE = 5 - obj.STRINGVALUE = "A string" - obj.FIXEDCHARVALUE = "Fixed str" - obj.NSTRINGVALUE = "A NCHAR string" - obj.NFIXEDCHARVALUE = "Fixed N" - obj.RAWVALUE = b"Raw Value" - obj.INTVALUE = 27 - obj.SMALLINTVALUE = 13 - obj.REALVALUE = 184.875 - obj.DOUBLEPRECISIONVALUE = 1.375 - obj.FLOATVALUE = 23.75 - obj.DATEVALUE = datetime.date(2017, 5, 9) - obj.TIMESTAMPVALUE = datetime.datetime(2017, 5, 9, 9, 41, 13) - obj.TIMESTAMPTZVALUE = datetime.datetime(1986, 8, 2, 15, 27, 38) - obj.TIMESTAMPLTZVALUE = datetime.datetime(1999, 11, 12, 23, 5, 2) - obj.BINARYFLOATVALUE = 14.25 - obj.BINARYDOUBLEVALUE = 29.1625 - obj.CLOBVALUE = clob - obj.NCLOBVALUE = nclob - obj.BLOBVALUE = blob - sub_type_obj = await self.conn.gettype("UDT_SUBOBJECT") - sub_obj = sub_type_obj.newobject() - sub_obj.SUBNUMBERVALUE = 23 - sub_obj.SUBSTRINGVALUE = "Substring value" - obj.SUBOBJECTVALUE = sub_obj - await self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (4, :obj) - """, - obj=obj, - ) - await self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - where IntCol = 4 - """ +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def _test_data( + cursor, + test_env, + expected_int_value, + expected_obj_value, + expected_array_value, +): + int_value, object_value, array_value = await cursor.fetchone() + if object_value is not None: + object_value = await test_env.get_db_object_as_plain_object_async( + object_value ) - expected_value = ( - 5, - "A string", - "Fixed str ", - "A NCHAR string", - "Fixed N ", - b"Raw Value", - 27, - 13, - 184.875, - 1.375, - 23.75, - 14.25, - 29.1625, - datetime.datetime(2017, 5, 9, 0, 0, 0), - datetime.datetime(2017, 5, 9, 9, 41, 13), - datetime.datetime(1986, 8, 2, 15, 27, 38), - oracledb.Timestamp(1999, 11, 12, 23, 5, 2), - "A short CLOB", - "A short NCLOB", - b"A short BLOB", - (23, "Substring value"), + if array_value is not None: + array_value = array_value.aslist() + assert int_value == expected_int_value + assert object_value == expected_obj_value + assert array_value == expected_array_value + + +async def test_5600(async_conn, async_cursor): + "5600 - test binding an object (IN)" + type_obj = await async_conn.gettype("UDT_OBJECT") + obj = type_obj.newobject() + obj.NUMBERVALUE = 13 + obj.STRINGVALUE = "Test String" + result = await async_cursor.callfunc( + "pkg_TestBindObject.GetStringRep", str, [obj] + ) + exp = "udt_Object(13, 'Test String', null, null, null, null, null)" + assert result == exp + obj.NUMBERVALUE = None + obj.STRINGVALUE = "Test With Dates" + obj.DATEVALUE = datetime.datetime(2016, 2, 10) + obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 10, 14, 13, 50) + result = await async_cursor.callfunc( + "pkg_TestBindObject.GetStringRep", str, [obj] + ) + expected_value = ( + "udt_Object(null, 'Test With Dates', null, " + "to_date('2016-02-10', 'YYYY-MM-DD'), " + "to_timestamp('2016-02-10 14:13:50', " + "'YYYY-MM-DD HH24:MI:SS'), " + "null, null)" + ) + assert result == expected_value + obj.DATEVALUE = None + obj.TIMESTAMPVALUE = None + sub_type_obj = await async_conn.gettype("UDT_SUBOBJECT") + sub_obj = sub_type_obj.newobject() + sub_obj.SUBNUMBERVALUE = decimal.Decimal("18.25") + sub_obj.SUBSTRINGVALUE = "Sub String" + obj.SUBOBJECTVALUE = sub_obj + result = await async_cursor.callfunc( + "pkg_TestBindObject.GetStringRep", str, [obj] + ) + expected_value = ( + "udt_Object(null, 'Test With Dates', null, null, " + "null, udt_SubObject(18.25, 'Sub String'), null)" + ) + assert result == expected_value + + +async def test_5601(async_conn): + "5601 - test copying an object" + type_obj = await async_conn.gettype("UDT_OBJECT") + obj = type_obj() + obj.NUMBERVALUE = 5124 + obj.STRINGVALUE = "A test string" + obj.DATEVALUE = datetime.datetime(2016, 2, 24) + obj.TIMESTAMPVALUE = datetime.datetime(2016, 2, 24, 13, 39, 10) + copied_obj = obj.copy() + assert obj.NUMBERVALUE == copied_obj.NUMBERVALUE + assert obj.STRINGVALUE == copied_obj.STRINGVALUE + assert obj.DATEVALUE == copied_obj.DATEVALUE + assert obj.TIMESTAMPVALUE == copied_obj.TIMESTAMPVALUE + + +async def test_5602(async_cursor, test_env): + "5602 - test fetching objects" + await async_cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + order by IntCol + """ + ) + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "OBJECTCOL", + oracledb.DB_TYPE_OBJECT, None, - ) - await self.__test_data(4, expected_value, None) - - obj.CLOBVALUE = await self.conn.createlob(oracledb.DB_TYPE_CLOB) - obj.NCLOBVALUE = await self.conn.createlob(oracledb.DB_TYPE_NCLOB) - obj.BLOBVALUE = await self.conn.createlob(oracledb.DB_TYPE_BLOB) - await obj.CLOBVALUE.write("A short CLOB (modified)") - await obj.NCLOBVALUE.write("A short NCLOB (modified)") - await obj.BLOBVALUE.write(b"A short BLOB (modified)") - await self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (5, :obj) - """, - obj=obj, - ) - await self.cursor.execute( - """ - select IntCol, ObjectCol, ArrayCol - from TestObjects - where IntCol = 5 - """ - ) - expected_value = ( - 5, - "A string", - "Fixed str ", - "A NCHAR string", - "Fixed N ", - b"Raw Value", - 27, - 13, - 184.875, - 1.375, - 23.75, - 14.25, - 29.1625, - datetime.datetime(2017, 5, 9, 0, 0, 0), - datetime.datetime(2017, 5, 9, 9, 41, 13), - datetime.datetime(1986, 8, 2, 15, 27, 38), - oracledb.Timestamp(1999, 11, 12, 23, 5, 2), - "A short CLOB (modified)", - "A short NCLOB (modified)", - b"A short BLOB (modified)", - (23, "Substring value"), None, - ) - await self.__test_data(5, expected_value, None) - await self.conn.rollback() - - async def test_5606(self): - "5606 - test trying to find an object type that does not exist" - with self.assertRaises(TypeError): - await self.conn.gettype(2) - with self.assertRaisesFullCode("DPY-2035"): - await self.conn.gettype("A TYPE THAT DOES NOT EXIST") - - async def test_5607(self): - "5607 - test appending an object of the wrong type to a collection" - collection_obj_type = await self.conn.gettype("UDT_OBJECTARRAY") - collection_obj = collection_obj_type.newobject() - array_obj_type = await self.conn.gettype("UDT_ARRAY") - array_obj = array_obj_type.newobject() - with self.assertRaisesFullCode("DPY-2008"): - collection_obj.append(array_obj) - - async def test_5608(self): - "5608 - test that referencing a sub object affects the parent object" - obj_type = await self.conn.gettype("UDT_OBJECT") - sub_obj_type = await self.conn.gettype("UDT_SUBOBJECT") - obj = obj_type.newobject() - obj.SUBOBJECTVALUE = sub_obj_type.newobject() - obj.SUBOBJECTVALUE.SUBNUMBERVALUE = 5 - obj.SUBOBJECTVALUE.SUBSTRINGVALUE = "Substring" - self.assertEqual(obj.SUBOBJECTVALUE.SUBNUMBERVALUE, 5) - self.assertEqual(obj.SUBOBJECTVALUE.SUBSTRINGVALUE, "Substring") - - async def test_5609(self): - "5609 - test accessing sub object after parent object destroyed" - obj_type = await self.conn.gettype("UDT_OBJECT") - sub_obj_type = await self.conn.gettype("UDT_SUBOBJECT") - array_type = await self.conn.gettype("UDT_OBJECTARRAY") - sub_obj1 = sub_obj_type.newobject() - sub_obj1.SUBNUMBERVALUE = 2 - sub_obj1.SUBSTRINGVALUE = "AB" - sub_obj2 = sub_obj_type.newobject() - sub_obj2.SUBNUMBERVALUE = 3 - sub_obj2.SUBSTRINGVALUE = "CDE" - obj = obj_type.newobject() - obj.SUBOBJECTARRAY = array_type.newobject([sub_obj1, sub_obj2]) - sub_obj_array = obj.SUBOBJECTARRAY - del obj - self.assertEqual( - await self.get_db_object_as_plain_object(sub_obj_array), - [(2, "AB"), (3, "CDE")], - ) + None, + None, + True, + ), + ( + "ARRAYCOL", + oracledb.DB_TYPE_OBJECT, + None, + None, + None, + None, + True, + ), + ] + assert async_cursor.description == expected_value + expected_value = ( + 1, + "First row", + "First ", + "N First Row", + "N First ", + b"Raw Data 1", + 2, + 5, + 12.125, + 0.5, + 12.5, + 25.25, + 50.125, + datetime.datetime(2007, 3, 6, 0, 0, 0), + datetime.datetime(2008, 9, 12, 16, 40), + datetime.datetime(2009, 10, 13, 17, 50), + oracledb.Timestamp(2010, 11, 14, 18, 55), + "Short CLOB value", + "Short NCLOB Value", + b"Short BLOB value", + (11, "Sub object 1"), + [(5, "first element"), (6, "second element")], + ) + await _test_data( + async_cursor, test_env, 1, expected_value, [5, 10, None, 20] + ) + await _test_data(async_cursor, test_env, 2, None, [3, None, 9, 12, 15]) + expected_value = ( + 3, + "Third row", + "Third ", + "N Third Row", + "N Third ", + b"Raw Data 3", + 4, + 10, + 6.5, + 0.75, + 43.25, + 86.5, + 192.125, + datetime.datetime(2007, 6, 21, 0, 0, 0), + datetime.datetime(2007, 12, 13, 7, 30, 45), + datetime.datetime(2017, 6, 21, 23, 18, 45), + oracledb.Timestamp(2017, 7, 21, 8, 27, 13), + "Another short CLOB value", + "Another short NCLOB Value", + b"Yet another short BLOB value", + (13, "Sub object 3"), + [ + (10, "element #1"), + (20, "element #2"), + (30, "element #3"), + (40, "element #4"), + ], + ) + await _test_data(async_cursor, test_env, 3, expected_value, None) + + +async def test_5603(async_conn): + "5603 - test getting object type" + type_obj = await async_conn.gettype("UDT_OBJECT") + assert not type_obj.iscollection + assert type_obj.schema == async_conn.username.upper() + assert type_obj.name == "UDT_OBJECT" + sub_object_value_type = await async_conn.gettype("UDT_SUBOBJECT") + sub_object_array_type = await async_conn.gettype("UDT_OBJECTARRAY") + expected_metadata = [ + ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), + ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), + ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), + ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), + ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), + ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), + ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ( + "BINARYFLOATVALUE", + oracledb.DB_TYPE_BINARY_FLOAT, + None, + None, + None, + ), + ( + "BINARYDOUBLEVALUE", + oracledb.DB_TYPE_BINARY_DOUBLE, + None, + None, + None, + ), + ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), + ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), + ( + "TIMESTAMPTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_TZ, + None, + None, + None, + ), + ( + "TIMESTAMPLTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + None, + None, + None, + ), + ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), + ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), + ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), + ("SUBOBJECTVALUE", sub_object_value_type, None, None, None), + ("SUBOBJECTARRAY", sub_object_array_type, None, None, None), + ] + actual_metadata = [ + (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) + for attr in type_obj.attributes + ] + assert actual_metadata == expected_metadata + assert sub_object_array_type.iscollection + assert sub_object_array_type.attributes == [] + + +async def test_5604(async_conn, async_cursor): + "5604 - test object type data" + await async_cursor.execute( + """ + select ObjectCol + from TestObjects + where ObjectCol is not null + and rownum <= 1 + """ + ) + (obj,) = await async_cursor.fetchone() + assert obj.type.schema == async_conn.username.upper() + assert obj.type.name == "UDT_OBJECT" + assert obj.type.attributes[0].name == "NUMBERVALUE" + + +async def test_5605(async_conn, async_cursor, test_env): + "5605 - test inserting and then querying object with all data types" + await async_cursor.execute("delete from TestClobs") + await async_cursor.execute("delete from TestNClobs") + await async_cursor.execute("delete from TestBlobs") + await async_cursor.execute("delete from TestObjects where IntCol > 3") + await async_cursor.execute( + """ + insert into TestClobs (IntCol, ClobCol) + values (1, 'A short CLOB') + """ + ) + await async_cursor.execute( + """ + insert into TestNClobs (IntCol, NClobCol) + values (1, 'A short NCLOB') + """ + ) + await async_cursor.execute( + """ + insert into TestBlobs (IntCol, BlobCol) + values (1, utl_raw.cast_to_raw('A short BLOB')) + """ + ) + await async_conn.commit() + await async_cursor.execute("select CLOBCol from TestClobs") + (clob,) = await async_cursor.fetchone() + await async_cursor.execute("select NCLOBCol from TestNClobs") + (nclob,) = await async_cursor.fetchone() + await async_cursor.execute("select BLOBCol from TestBlobs") + (blob,) = await async_cursor.fetchone() + type_obj = await async_conn.gettype("UDT_OBJECT") + obj = type_obj.newobject() + obj.NUMBERVALUE = 5 + obj.STRINGVALUE = "A string" + obj.FIXEDCHARVALUE = "Fixed str" + obj.NSTRINGVALUE = "A NCHAR string" + obj.NFIXEDCHARVALUE = "Fixed N" + obj.RAWVALUE = b"Raw Value" + obj.INTVALUE = 27 + obj.SMALLINTVALUE = 13 + obj.REALVALUE = 184.875 + obj.DOUBLEPRECISIONVALUE = 1.375 + obj.FLOATVALUE = 23.75 + obj.DATEVALUE = datetime.date(2017, 5, 9) + obj.TIMESTAMPVALUE = datetime.datetime(2017, 5, 9, 9, 41, 13) + obj.TIMESTAMPTZVALUE = datetime.datetime(1986, 8, 2, 15, 27, 38) + obj.TIMESTAMPLTZVALUE = datetime.datetime(1999, 11, 12, 23, 5, 2) + obj.BINARYFLOATVALUE = 14.25 + obj.BINARYDOUBLEVALUE = 29.1625 + obj.CLOBVALUE = clob + obj.NCLOBVALUE = nclob + obj.BLOBVALUE = blob + sub_type_obj = await async_conn.gettype("UDT_SUBOBJECT") + sub_obj = sub_type_obj.newobject() + sub_obj.SUBNUMBERVALUE = 23 + sub_obj.SUBSTRINGVALUE = "Substring value" + obj.SUBOBJECTVALUE = sub_obj + await async_cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (4, :obj) + """, + obj=obj, + ) + await async_cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + where IntCol = 4 + """ + ) + expected_value = ( + 5, + "A string", + "Fixed str ", + "A NCHAR string", + "Fixed N ", + b"Raw Value", + 27, + 13, + 184.875, + 1.375, + 23.75, + 14.25, + 29.1625, + datetime.datetime(2017, 5, 9, 0, 0, 0), + datetime.datetime(2017, 5, 9, 9, 41, 13), + datetime.datetime(1986, 8, 2, 15, 27, 38), + oracledb.Timestamp(1999, 11, 12, 23, 5, 2), + "A short CLOB", + "A short NCLOB", + b"A short BLOB", + (23, "Substring value"), + None, + ) + await _test_data(async_cursor, test_env, 4, expected_value, None) + + obj.CLOBVALUE = await async_conn.createlob(oracledb.DB_TYPE_CLOB) + obj.NCLOBVALUE = await async_conn.createlob(oracledb.DB_TYPE_NCLOB) + obj.BLOBVALUE = await async_conn.createlob(oracledb.DB_TYPE_BLOB) + await obj.CLOBVALUE.write("A short CLOB (modified)") + await obj.NCLOBVALUE.write("A short NCLOB (modified)") + await obj.BLOBVALUE.write(b"A short BLOB (modified)") + await async_cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (5, :obj) + """, + obj=obj, + ) + await async_cursor.execute( + """ + select IntCol, ObjectCol, ArrayCol + from TestObjects + where IntCol = 5 + """ + ) + expected_value = ( + 5, + "A string", + "Fixed str ", + "A NCHAR string", + "Fixed N ", + b"Raw Value", + 27, + 13, + 184.875, + 1.375, + 23.75, + 14.25, + 29.1625, + datetime.datetime(2017, 5, 9, 0, 0, 0), + datetime.datetime(2017, 5, 9, 9, 41, 13), + datetime.datetime(1986, 8, 2, 15, 27, 38), + oracledb.Timestamp(1999, 11, 12, 23, 5, 2), + "A short CLOB (modified)", + "A short NCLOB (modified)", + b"A short BLOB (modified)", + (23, "Substring value"), + None, + ) + await _test_data(async_cursor, test_env, 5, expected_value, None) + await async_conn.rollback() + + +async def test_5606(async_conn, test_env): + "5606 - test trying to find an object type that does not exist" + with pytest.raises(TypeError): + await async_conn.gettype(2) + with test_env.assert_raises_full_code("DPY-2035"): + await async_conn.gettype("A TYPE THAT DOES NOT EXIST") + + +async def test_5607(async_conn, test_env): + "5607 - test appending an object of the wrong type to a collection" + collection_obj_type = await async_conn.gettype("UDT_OBJECTARRAY") + collection_obj = collection_obj_type.newobject() + array_obj_type = await async_conn.gettype("UDT_ARRAY") + array_obj = array_obj_type.newobject() + with test_env.assert_raises_full_code("DPY-2008"): + collection_obj.append(array_obj) + + +async def test_5608(async_conn): + "5608 - test that referencing a sub object affects the parent object" + obj_type = await async_conn.gettype("UDT_OBJECT") + sub_obj_type = await async_conn.gettype("UDT_SUBOBJECT") + obj = obj_type.newobject() + obj.SUBOBJECTVALUE = sub_obj_type.newobject() + obj.SUBOBJECTVALUE.SUBNUMBERVALUE = 5 + obj.SUBOBJECTVALUE.SUBSTRINGVALUE = "Substring" + assert obj.SUBOBJECTVALUE.SUBNUMBERVALUE == 5 + assert obj.SUBOBJECTVALUE.SUBSTRINGVALUE == "Substring" + + +async def test_5609(async_conn, test_env): + "5609 - test accessing sub object after parent object destroyed" + obj_type = await async_conn.gettype("UDT_OBJECT") + sub_obj_type = await async_conn.gettype("UDT_SUBOBJECT") + array_type = await async_conn.gettype("UDT_OBJECTARRAY") + sub_obj1 = sub_obj_type.newobject() + sub_obj1.SUBNUMBERVALUE = 2 + sub_obj1.SUBSTRINGVALUE = "AB" + sub_obj2 = sub_obj_type.newobject() + sub_obj2.SUBNUMBERVALUE = 3 + sub_obj2.SUBSTRINGVALUE = "CDE" + obj = obj_type.newobject() + obj.SUBOBJECTARRAY = array_type.newobject([sub_obj1, sub_obj2]) + sub_obj_array = obj.SUBOBJECTARRAY + del obj + val = await test_env.get_db_object_as_plain_object_async(sub_obj_array) + assert val == [(2, "AB"), (3, "CDE")] + + +async def test_5610(async_conn, test_env): + "5610 - test assigning an object of wrong type to an object attribute" + obj_type = await async_conn.gettype("UDT_OBJECT") + obj = obj_type.newobject() + wrong_obj_type = await async_conn.gettype("UDT_OBJECTARRAY") + wrong_obj = wrong_obj_type.newobject() + with test_env.assert_raises_full_code("DPY-2008"): + setattr(obj, "SUBOBJECTVALUE", wrong_obj) + + +async def test_5611(async_conn, async_cursor, test_env): + "5611 - test setting value of object variable to wrong object type" + obj_type = await async_conn.gettype("UDT_OBJECT") + wrong_obj_type = await async_conn.gettype("UDT_OBJECTARRAY") + wrong_obj = wrong_obj_type.newobject() + var = async_cursor.var(obj_type) + with test_env.assert_raises_full_code("DPY-2008"): + var.setvalue(0, wrong_obj) + + +async def test_5612(async_conn, test_env): + "5612 - test trimming a number of elements from a collection" + sub_obj_type = await async_conn.gettype("UDT_SUBOBJECT") + array_type = await async_conn.gettype("UDT_OBJECTARRAY") + data = [(1, "AB"), (2, "CDE"), (3, "FGH"), (4, "IJK"), (5, "LMN")] + array_obj = array_type() + for num_val, str_val in data: + subObj = sub_obj_type() + subObj.SUBNUMBERVALUE = num_val + subObj.SUBSTRINGVALUE = str_val + array_obj.append(subObj) + assert ( + await test_env.get_db_object_as_plain_object_async(array_obj) == data + ) + array_obj.trim(2) + assert ( + await test_env.get_db_object_as_plain_object_async(array_obj) + == data[:3] + ) + array_obj.trim(1) + assert ( + await test_env.get_db_object_as_plain_object_async(array_obj) + == data[:2] + ) + array_obj.trim(0) + assert ( + await test_env.get_db_object_as_plain_object_async(array_obj) + == data[:2] + ) + array_obj.trim(2) + assert await test_env.get_db_object_as_plain_object_async(array_obj) == [] + + +async def test_5613(async_conn, test_env): + "5613 - test the metadata of a SQL type" + user = test_env.main_user.upper() + typ = await async_conn.gettype("UDT_OBJECTARRAY") + assert typ.schema == user + assert typ.name == "UDT_OBJECTARRAY" + assert typ.package_name is None + assert typ.element_type.schema == user + assert typ.element_type.name == "UDT_SUBOBJECT" + assert typ.element_type.package_name is None + + +async def test_5614(async_conn, test_env): + "5614 - test the metadata of a PL/SQL type" + typ = await async_conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") + assert typ.schema == test_env.main_user.upper() + assert typ.name == "UDT_STRINGLIST" + assert typ.package_name == "PKG_TESTSTRINGARRAYS" + assert typ.element_type == oracledb.DB_TYPE_VARCHAR + + +async def test_5615(async_conn, async_cursor): + "5615 - test collection with thousands of entries" + typ = await async_conn.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST") + obj = typ.newobject() + obj.setelement(1, 1) + running_total = 1 + for i in range(1, 35000): + running_total += i + 1 + obj.append(running_total) + result = await async_cursor.callfunc( + "pkg_TestNumberArrays.TestInArrays", int, (2327, obj) + ) + assert result == 7146445847327 + + +async def test_5616(async_conn): + "5616 - test %ROWTYPE with all types" + sub_obj_type = await async_conn.gettype("UDT_SUBOBJECT") + sub_arr_type = await async_conn.gettype("UDT_OBJECTARRAY") + expected_metadata = [ + ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), + ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), + ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), + ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), + ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), + ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), + ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), + ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), + ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), + ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), + ( + "BINARYFLOATVALUE", + oracledb.DB_TYPE_BINARY_FLOAT, + None, + None, + None, + ), + ( + "BINARYDOUBLEVALUE", + oracledb.DB_TYPE_BINARY_DOUBLE, + None, + None, + None, + ), + ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), + ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), + ( + "TIMESTAMPTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_TZ, + None, + None, + None, + ), + ( + "TIMESTAMPLTZVALUE", + oracledb.DB_TYPE_TIMESTAMP_LTZ, + None, + None, + None, + ), + ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), + ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), + ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), + ("SUBOBJECTVALUE", sub_obj_type, None, None, None), + ("SUBOBJECTARRAY", sub_arr_type, None, None, None), + ] + obj_type = await async_conn.gettype("TESTALLTYPES%ROWTYPE") + actual_metadata = [ + (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) + for attr in obj_type.attributes + ] + assert actual_metadata == expected_metadata + + +async def test_5617(async_cursor): + "5617 - test collection iteration" + await async_cursor.execute("select udt_array(5, 10, 15) from dual") + (obj,) = await async_cursor.fetchone() + result = [i for i in obj] + assert result == [5, 10, 15] + + +async def test_5618(test_env): + "5618 - test insufficient privileges for gettype()" + user = test_env.proxy_user + password = test_env.proxy_password + main_user = test_env.main_user.upper() + async with await test_env.get_connection_async( + user=user, password=password + ) as conn: + with test_env.assert_raises_full_code("DPY-2035"): + await conn.gettype(f"{main_user}.UDT_OBJECTARRAY") - async def test_5610(self): - "5610 - test assigning an object of wrong type to an object attribute" - obj_type = await self.conn.gettype("UDT_OBJECT") - obj = obj_type.newobject() - wrong_obj_type = await self.conn.gettype("UDT_OBJECTARRAY") - wrong_obj = wrong_obj_type.newobject() - with self.assertRaisesFullCode("DPY-2008"): - setattr(obj, "SUBOBJECTVALUE", wrong_obj) - - async def test_5611(self): - "5611 - test setting value of object variable to wrong object type" - obj_type = await self.conn.gettype("UDT_OBJECT") - wrong_obj_type = await self.conn.gettype("UDT_OBJECTARRAY") - wrong_obj = wrong_obj_type.newobject() - var = self.cursor.var(obj_type) - with self.assertRaisesFullCode("DPY-2008"): - var.setvalue(0, wrong_obj) - - async def test_5612(self): - "5612 - test trimming a number of elements from a collection" - sub_obj_type = await self.conn.gettype("UDT_SUBOBJECT") - array_type = await self.conn.gettype("UDT_OBJECTARRAY") - data = [(1, "AB"), (2, "CDE"), (3, "FGH"), (4, "IJK"), (5, "LMN")] - array_obj = array_type() - for num_val, str_val in data: - subObj = sub_obj_type() - subObj.SUBNUMBERVALUE = num_val - subObj.SUBSTRINGVALUE = str_val - array_obj.append(subObj) - self.assertEqual( - await self.get_db_object_as_plain_object(array_obj), data - ) - array_obj.trim(2) - self.assertEqual( - await self.get_db_object_as_plain_object(array_obj), data[:3] - ) - array_obj.trim(1) - self.assertEqual( - await self.get_db_object_as_plain_object(array_obj), data[:2] - ) - array_obj.trim(0) - self.assertEqual( - await self.get_db_object_as_plain_object(array_obj), data[:2] - ) - array_obj.trim(2) - self.assertEqual( - await self.get_db_object_as_plain_object(array_obj), [] - ) - async def test_5613(self): - "5613 - test the metadata of a SQL type" - user = test_env.get_main_user() - typ = await self.conn.gettype("UDT_OBJECTARRAY") - self.assertEqual(typ.schema, user.upper()) - self.assertEqual(typ.name, "UDT_OBJECTARRAY") - self.assertIsNone(typ.package_name) - self.assertEqual(typ.element_type.schema, user.upper()) - self.assertEqual(typ.element_type.name, "UDT_SUBOBJECT") - self.assertIsNone(typ.element_type.package_name) - - async def test_5614(self): - "5614 - test the metadata of a PL/SQL type" - user = test_env.get_main_user() - typ = await self.conn.gettype("PKG_TESTSTRINGARRAYS.UDT_STRINGLIST") - self.assertEqual(typ.schema, user.upper()) - self.assertEqual(typ.name, "UDT_STRINGLIST") - self.assertEqual(typ.package_name, "PKG_TESTSTRINGARRAYS") - self.assertEqual(typ.element_type, oracledb.DB_TYPE_VARCHAR) - - async def test_5615(self): - "5615 - test collection with thousands of entries" - typ = await self.conn.gettype("PKG_TESTNUMBERARRAYS.UDT_NUMBERLIST") - obj = typ.newobject() - obj.setelement(1, 1) - running_total = 1 - for i in range(1, 35000): - running_total += i + 1 - obj.append(running_total) - result = await self.cursor.callfunc( - "pkg_TestNumberArrays.TestInArrays", int, (2327, obj) +async def test_5619(async_conn, async_cursor): + "5619 - test nested records" + options = [(None, None), (1, None), (None, 2), (1, 2)] + typ = await async_conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") + for option in options: + value1, value2 = option + obj = await async_cursor.callfunc( + "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) ) - self.assertEqual(result, 7146445847327) - - async def test_5616(self): - "5616 - test %ROWTYPE with all types" - sub_obj_type = await self.conn.gettype("UDT_SUBOBJECT") - sub_arr_type = await self.conn.gettype("UDT_OBJECTARRAY") - expected_metadata = [ - ("NUMBERVALUE", oracledb.DB_TYPE_NUMBER, 0, -127, None), - ("STRINGVALUE", oracledb.DB_TYPE_VARCHAR, None, None, 60), - ("FIXEDCHARVALUE", oracledb.DB_TYPE_CHAR, None, None, 10), - ("NSTRINGVALUE", oracledb.DB_TYPE_NVARCHAR, None, None, 120), - ("NFIXEDCHARVALUE", oracledb.DB_TYPE_NCHAR, None, None, 20), - ("RAWVALUE", oracledb.DB_TYPE_RAW, None, None, 16), - ("INTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("SMALLINTVALUE", oracledb.DB_TYPE_NUMBER, 38, 0, None), - ("REALVALUE", oracledb.DB_TYPE_NUMBER, 63, -127, None), - ("DECIMALVALUE", oracledb.DB_TYPE_NUMBER, 20, 6, None), - ("DOUBLEPRECISIONVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ("FLOATVALUE", oracledb.DB_TYPE_NUMBER, 126, -127, None), - ( - "BINARYFLOATVALUE", - oracledb.DB_TYPE_BINARY_FLOAT, - None, - None, - None, - ), - ( - "BINARYDOUBLEVALUE", - oracledb.DB_TYPE_BINARY_DOUBLE, - None, - None, - None, - ), - ("DATEVALUE", oracledb.DB_TYPE_DATE, None, None, None), - ("TIMESTAMPVALUE", oracledb.DB_TYPE_TIMESTAMP, None, None, None), - ( - "TIMESTAMPTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_TZ, - None, - None, - None, - ), - ( - "TIMESTAMPLTZVALUE", - oracledb.DB_TYPE_TIMESTAMP_LTZ, - None, - None, - None, - ), - ("CLOBVALUE", oracledb.DB_TYPE_CLOB, None, None, None), - ("NCLOBVALUE", oracledb.DB_TYPE_NCLOB, None, None, None), - ("BLOBVALUE", oracledb.DB_TYPE_BLOB, None, None, None), - ("SUBOBJECTVALUE", sub_obj_type, None, None, None), - ("SUBOBJECTARRAY", sub_arr_type, None, None, None), - ] - obj_type = await self.conn.gettype("TESTALLTYPES%ROWTYPE") - actual_metadata = [ - (attr.name, attr.type, attr.precision, attr.scale, attr.max_size) - for attr in obj_type.attributes - ] - self.assertEqual(actual_metadata, expected_metadata) - - async def test_5617(self): - "5617 - test collection iteration" - await self.cursor.execute("select udt_array(5, 10, 15) from dual") - (obj,) = await self.cursor.fetchone() - result = [i for i in obj] - self.assertEqual(result, [5, 10, 15]) - - async def test_5618(self): - "5618 - test insufficient privileges for gettype()" - user = test_env.get_proxy_user() - password = test_env.get_proxy_password() - main_user = test_env.get_main_user().upper() - async with await test_env.get_connection_async( - user=user, password=password - ) as conn: - with self.assertRaisesFullCode("DPY-2035"): - await conn.gettype(f"{main_user}.UDT_OBJECTARRAY") - - async def test_5619(self): - "5619 - test nested records" - options = [(None, None), (1, None), (None, 2), (1, 2)] - typ = await self.conn.gettype("PKG_TESTNESTEDRECORDS.UDT_OUTER") - for option in options: - with self.subTest(option=option): - value1, value2 = option - obj = await self.cursor.callfunc( - "pkg_TestNestedRecords.GetOuter", typ, (value1, value2) - ) - self.assertIsNotNone(obj.INNER1) - self.assertIsNone(obj.INNER1.ATTR1) - self.assertEqual(obj.INNER1.ATTR2, value1) - self.assertIsNotNone(obj.INNER2) - self.assertIsNone(obj.INNER2.ATTR1) - self.assertEqual(obj.INNER2.ATTR2, value2) - - -if __name__ == "__main__": - test_env.run_test_cases() + assert obj.INNER1 is not None + assert obj.INNER1.ATTR1 is None + assert obj.INNER1.ATTR2 == value1 + assert obj.INNER2 is not None + assert obj.INNER2.ATTR1 is None + assert obj.INNER2.ATTR2 == value2 diff --git a/tests/test_5700_lob_var_async.py b/tests/test_5700_lob_var_async.py index 6757d728..0e385f98 100644 --- a/tests/test_5700_lob_var_async.py +++ b/tests/test_5700_lob_var_async.py @@ -27,478 +27,508 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def __get_temp_lobs(self, sid): - cursor = self.conn.cursor() - await cursor.execute( - """ - select cache_lobs + nocache_lobs + abstract_lobs - from v$temporary_lobs - where sid = :sid - """, - sid=sid, - ) - row = await cursor.fetchone() - if row is None: - return 0 - return int(row[0]) - - async def __perform_test(self, lob_type, input_type): - long_string = "" - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - await self.cursor.execute(f"delete from Test{lob_type}s") - for i in range(11): - if i > 0: - char = chr(ord("A") + i - 1) - long_string += char * 25000 - elif input_type is not db_type: - continue - self.cursor.setinputsizes(long_string=input_type) - if lob_type == "BLOB": - bind_value = long_string.encode() - else: - bind_value = long_string - await self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:integer_value, :long_string) - """, - integer_value=i, - long_string=bind_value, - ) - await self.conn.commit() - await self.cursor.execute( - f""" - select IntCol, {lob_type}Col - from Test{lob_type}s - order by IntCol - """ - ) - await self.__validate_query(await self.cursor.fetchall(), lob_type) - - async def __test_bind_ordering(self, lob_type): - main_col = "A" * 32768 - extra_col_1 = "B" * 65536 - extra_col_2 = "C" * 131072 +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def _get_temp_lobs(conn, sid): + cursor = conn.cursor() + await cursor.execute( + """ + select cache_lobs + nocache_lobs + abstract_lobs + from v$temporary_lobs + where sid = :sid + """, + sid=sid, + ) + row = await cursor.fetchone() + if row is None: + return 0 + return int(row[0]) + + +async def _perform_test(conn, lob_type, input_type, arraysize=None): + long_string = "" + cursor = conn.cursor() + if arraysize is not None: + cursor.arraysize = arraysize + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + await cursor.execute(f"delete from Test{lob_type}s") + for i in range(11): + if i > 0: + char = chr(ord("A") + i - 1) + long_string += char * 25000 + elif input_type is not db_type: + continue + cursor.setinputsizes(long_string=input_type) if lob_type == "BLOB": - main_col = main_col.encode() - extra_col_1 = extra_col_1.encode() - extra_col_2 = extra_col_2.encode() - self.conn.stmtcachesize = 0 - await self.cursor.execute(f"delete from Test{lob_type}s") - await self.conn.commit() - data = (1, main_col, 8, extra_col_1, 15, extra_col_2) - await self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col, - ExtraNumCol1, Extra{lob_type}Col1, ExtraNumCol2, - Extra{lob_type}Col2) - values (:1, :2, :3, :4, :5, :6) - """, - data, - ) - with test_env.DefaultsContextManager("fetch_lobs", False): - await self.cursor.execute(f"select * from Test{lob_type}s") - self.assertEqual(await self.cursor.fetchone(), data) - - async def __test_fetch_lobs_direct(self, lob_type): - await self.cursor.execute(f"delete from Test{lob_type}s") - await self.conn.commit() - data = [] - long_string = "" - for i in range(1, 11): - if i > 0: - char = chr(ord("A") + i - 1) - long_string += char * 25000 - if lob_type == "BLOB": - data.append((i, long_string.encode())) - else: - data.append((i, long_string)) - await self.cursor.executemany( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:1, :2) - """, - data, - ) - with test_env.DefaultsContextManager("fetch_lobs", False): - await self.cursor.execute( - f""" - select IntCol, {lob_type}Col - from Test{lob_type}s - order by IntCol - """ - ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def __test_lob_operations(self, lob_type): - await self.cursor.execute(f"delete from Test{lob_type}s") - await self.conn.commit() - self.cursor.setinputsizes(long_string=getattr(oracledb, lob_type)) - long_string = "X" * 75000 - write_value = "TEST" - if lob_type == "BLOB": - long_string = long_string.encode("ascii") - write_value = write_value.encode("ascii") - await self.cursor.execute( + bind_value = long_string.encode() + else: + bind_value = long_string + await cursor.execute( f""" insert into Test{lob_type}s (IntCol, {lob_type}Col) values (:integer_value, :long_string) """, - integer_value=1, - long_string=long_string, - ) - await self.cursor.execute( - f""" - select {lob_type}Col - from Test{lob_type}s - where IntCol = 1 - """ + integer_value=i, + long_string=bind_value, ) - (lob,) = await self.cursor.fetchone() - self.assertFalse(await lob.isopen()) + await conn.commit() + await cursor.execute( + f""" + select IntCol, {lob_type}Col + from Test{lob_type}s + order by IntCol + """ + ) + await _validate_query(await cursor.fetchall(), lob_type) + + +async def _test_bind_ordering(conn, lob_type): + cursor = conn.cursor() + main_col = "A" * 32768 + extra_col_1 = "B" * 65536 + extra_col_2 = "C" * 131072 + if lob_type == "BLOB": + main_col = main_col.encode() + extra_col_1 = extra_col_1.encode() + extra_col_2 = extra_col_2.encode() + conn.stmtcachesize = 0 + await cursor.execute(f"delete from Test{lob_type}s") + await conn.commit() + data = (1, main_col, 8, extra_col_1, 15, extra_col_2) + await cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col, + ExtraNumCol1, Extra{lob_type}Col1, ExtraNumCol2, + Extra{lob_type}Col2) + values (:1, :2, :3, :4, :5, :6) + """, + data, + ) + await cursor.execute(f"select * from Test{lob_type}s") + assert await cursor.fetchone() == data + + +async def _test_fetch_lobs_direct(conn, lob_type): + cursor = conn.cursor() + await cursor.execute(f"delete from Test{lob_type}s") + await conn.commit() + data = [] + long_string = "" + for i in range(1, 11): + if i > 0: + char = chr(ord("A") + i - 1) + long_string += char * 25000 + if lob_type == "BLOB": + data.append((i, long_string.encode())) + else: + data.append((i, long_string)) + await cursor.executemany( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:1, :2) + """, + data, + ) + await cursor.execute( + f""" + select IntCol, {lob_type}Col + from Test{lob_type}s + order by IntCol + """ + ) + assert await cursor.fetchall() == data + + +async def _test_lob_operations(conn, test_env, lob_type): + cursor = conn.cursor() + await cursor.execute(f"delete from Test{lob_type}s") + await conn.commit() + cursor.setinputsizes(long_string=getattr(oracledb, lob_type)) + long_string = "X" * 75000 + write_value = "TEST" + if lob_type == "BLOB": + long_string = long_string.encode("ascii") + write_value = write_value.encode("ascii") + await cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:integer_value, :long_string) + """, + integer_value=1, + long_string=long_string, + ) + await cursor.execute( + f""" + select {lob_type}Col + from Test{lob_type}s + where IntCol = 1 + """ + ) + (lob,) = await cursor.fetchone() + assert not await lob.isopen() + await lob.open() + with test_env.assert_raises_full_code("ORA-22293"): await lob.open() - with self.assertRaisesFullCode("ORA-22293"): - await lob.open() - self.assertTrue(await lob.isopen()) + assert await lob.isopen() + await lob.close() + with test_env.assert_raises_full_code("ORA-22289"): await lob.close() - with self.assertRaisesFullCode("ORA-22289"): - await lob.close() - self.assertFalse(await lob.isopen()) - self.assertEqual(await lob.size(), 75000) - await lob.write(write_value, 75001) - self.assertEqual(await lob.size(), 75000 + len(write_value)) - with self.assertRaisesFullCode("DPY-2030"): - await lob.read(0) - with self.assertRaisesFullCode("DPY-2030"): - await lob.read(-25) - self.assertEqual(await lob.read(), long_string + write_value) - await lob.write(write_value, 1) - self.assertEqual( - await lob.read(), write_value + long_string[4:] + write_value - ) - await lob.trim(25000) - self.assertEqual(await lob.size(), 25000) - await lob.trim(newSize=10000) - self.assertEqual(await lob.size(), 10000) - with self.assertRaisesFullCode("DPY-2014"): - await lob.trim(new_size=50, newSize=60) - with self.assertRaises(TypeError): - await lob.trim(new_size="10000") - await lob.trim(new_size=40) - self.assertEqual(await lob.size(), 40) - await lob.trim() - self.assertEqual(await lob.size(), 0) - self.assertIsInstance(await lob.getchunksize(), int) - - async def __test_temporary_lob(self, lob_type): - await self.cursor.execute(f"delete from Test{lob_type}s") - value = "A test string value" - if lob_type == "BLOB": - value = value.encode("ascii") - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - lob = await self.conn.createlob(db_type, value) - await self.cursor.execute( - f""" - insert into Test{lob_type}s (IntCol, {lob_type}Col) - values (:int_val, :lob_val) - """, - int_val=1, - lob_val=lob, - ) - await self.conn.commit() - await self.cursor.execute(f"select {lob_type}Col from Test{lob_type}s") - (lob,) = await self.cursor.fetchone() - self.assertEqual(await lob.read(), value) - - async def __validate_query(self, rows, lob_type): - long_string = "" - db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") - for row in rows: - integer_value, lob = row - self.assertEqual(lob.type, db_type) - if integer_value == 0: - self.assertEqual(await lob.size(), 0) - expected_value = "" - if lob_type == "BLOB": - expected_value = expected_value.encode() - self.assertEqual(await lob.read(), expected_value) + assert not await lob.isopen() + assert await lob.size() == 75000 + await lob.write(write_value, 75001) + assert await lob.size() == 75000 + len(write_value) + with test_env.assert_raises_full_code("DPY-2030"): + await lob.read(0) + with test_env.assert_raises_full_code("DPY-2030"): + await lob.read(-25) + assert await lob.read() == long_string + write_value + await lob.write(write_value, 1) + assert await lob.read() == write_value + long_string[4:] + write_value + await lob.trim(25000) + assert await lob.size() == 25000 + await lob.trim(newSize=10000) + assert await lob.size() == 10000 + with test_env.assert_raises_full_code("DPY-2014"): + await lob.trim(new_size=50, newSize=60) + with pytest.raises(TypeError): + await lob.trim(new_size="10000") + await lob.trim(new_size=40) + assert await lob.size() == 40 + await lob.trim() + assert await lob.size() == 0 + assert isinstance(await lob.getchunksize(), int) + + +async def _test_temporary_lob(conn, lob_type): + cursor = conn.cursor() + await cursor.execute(f"delete from Test{lob_type}s") + value = "A test string value" + if lob_type == "BLOB": + value = value.encode("ascii") + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + lob = await conn.createlob(db_type, value) + await cursor.execute( + f""" + insert into Test{lob_type}s (IntCol, {lob_type}Col) + values (:int_val, :lob_val) + """, + int_val=1, + lob_val=lob, + ) + await conn.commit() + await cursor.execute(f"select {lob_type}Col from Test{lob_type}s") + (lob,) = await cursor.fetchone() + assert await lob.read() == value + + +async def _validate_query(rows, lob_type): + long_string = "" + db_type = getattr(oracledb, f"DB_TYPE_{lob_type}") + for row in rows: + integer_value, lob = row + assert lob.type == db_type + if integer_value == 0: + assert await lob.size() == 0 + expected_value = "" + if lob_type == "BLOB": + expected_value = expected_value.encode() + assert await lob.read() == expected_value + else: + char = chr(ord("A") + integer_value - 1) + prev_char = chr(ord("A") + integer_value - 2) + long_string += char * 25000 + if lob_type == "BLOB": + expected_value = long_string.encode("ascii") + char = char.encode("ascii") + prev_char = prev_char.encode("ascii") else: - char = chr(ord("A") + integer_value - 1) - prev_char = chr(ord("A") + integer_value - 2) - long_string += char * 25000 - if lob_type == "BLOB": - expected_value = long_string.encode("ascii") - char = char.encode("ascii") - prev_char = prev_char.encode("ascii") - else: - expected_value = long_string - self.assertEqual(await lob.size(), len(expected_value)) - self.assertEqual(await lob.read(), expected_value) - self.assertEqual(await lob.read(len(expected_value)), char) - if integer_value > 1: - offset = (integer_value - 1) * 25000 - 4 - string = prev_char * 5 + char * 5 - self.assertEqual(await lob.read(offset, 10), string) - - async def test_5700(self): - "5700 - test binding a LOB value directly" - await self.cursor.execute("delete from TestCLOBs") - await self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, 'Short value') - """ - ) - await self.cursor.execute("select ClobCol from TestCLOBs") - (lob,) = await self.cursor.fetchone() - await self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (2, :value) - """, - value=lob, - ) - await self.conn.commit() - - async def test_5701(self): - "5701 - test cursor description is accurate for BLOBs" - await self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), - ("BLOBCOL", oracledb.DB_TYPE_BLOB, None, None, None, None, 0), - ] - self.assertEqual(self.cursor.description, expected_value) - - async def test_5702(self): - "5703 - test binding and fetching BLOB data (indirectly)" - await self.__perform_test("BLOB", oracledb.DB_TYPE_LONG_RAW) - - async def test_5703(self): - "5703 - test operations on BLOBs" - await self.__test_lob_operations("BLOB") - - async def test_5704(self): - "5704 - test cursor description is accurate for CLOBs" - await self.cursor.execute("select IntCol, ClobCol from TestCLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ("CLOBCOL", oracledb.DB_TYPE_CLOB, None, None, None, None, False), - ] - self.assertEqual(self.cursor.description, expected_value) - - async def test_5705(self): - "5705 - test binding and fetching CLOB data (indirectly)" - await self.__perform_test("CLOB", oracledb.DB_TYPE_LONG) - - async def test_5706(self): - "5706 - test operations on CLOBs" - await self.__test_lob_operations("CLOB") - - async def test_5707(self): - "5707 - test creating a temporary BLOB" - await self.__test_temporary_lob("BLOB") - - async def test_5708(self): - "5708 - test creating a temporary CLOB" - await self.__test_temporary_lob("CLOB") - - async def test_5709(self): - "5709 - test creating a temporary NCLOB" - await self.__test_temporary_lob("NCLOB") - - async def test_5710(self): - "5710 - test retrieving data from a CLOB after multiple fetches" - self.cursor.arraysize = 1 - await self.__perform_test("CLOB", oracledb.DB_TYPE_LONG) - - async def test_5711(self): - "5711 - test cursor description is accurate for NCLOBs" - await self.cursor.execute("select IntCol, NClobCol from TestNCLOBs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), - ("NCLOBCOL", oracledb.DB_TYPE_NCLOB, None, None, None, None, 0), - ] - self.assertEqual(self.cursor.description, expected_value) - - async def test_5712(self): - "5712 - test binding and fetching NCLOB data (with non-ASCII chars)" - value = "\u03b4\u4e2a" - await self.cursor.execute("delete from TestNCLOBs") - self.cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) - await self.cursor.execute( - """ - insert into TestNCLOBs (IntCol, NClobCol) - values (1, :val) - """, - val=value, - ) - await self.conn.commit() - await self.cursor.execute("select NCLOBCol from TestNCLOBs") - (nclob,) = await self.cursor.fetchone() - self.cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) - await self.cursor.execute( - "update TestNCLOBs set NCLOBCol = :val", - val=await nclob.read() + value, - ) - await self.cursor.execute("select NCLOBCol from TestNCLOBs") - (nclob,) = await self.cursor.fetchone() - self.assertEqual(await nclob.read(), value + value) - - async def test_5713(self): - "5713 - test binding and fetching NCLOB data (indirectly)" - await self.__perform_test("NCLOB", oracledb.DB_TYPE_LONG) - - async def test_5714(self): - "5714 - test operations on NCLOBs" - await self.__test_lob_operations("NCLOB") - - @test_env.skip_if_implicit_pooling() - async def test_5715(self): - "5715 - test temporary LOBs" - await self.cursor.execute( - "select sys_context('USERENV', 'SID') from dual" - ) - (sid,) = await self.cursor.fetchone() - temp_lobs = await self.__get_temp_lobs(sid) - with self.conn.cursor() as cursor: - cursor.arraysize = 27 - self.assertEqual(temp_lobs, 0) - await cursor.execute( - "select extract(xmlcol, '/').getclobval() from TestXML" - ) - async for (lob,) in cursor: - await lob.read() - del lob - temp_lobs = await self.__get_temp_lobs(sid) - self.assertEqual(temp_lobs, 0) - - async def test_5716(self): - "5716 - test read/write temporary LOBs using supplemental characters" - charset = await test_env.get_charset_async() - if charset != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - supplemental_chars = ( - "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 " - "𠽌 𠾴 𠾼 𠿪 𡁜 𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 𢱕 𢳂 𢴈 " - "𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" - ) - await self.cursor.execute("delete from TestCLOBs") - lob = await self.conn.createlob( - oracledb.DB_TYPE_CLOB, supplemental_chars - ) - await self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, :val) - """, - [lob], - ) - await self.conn.commit() - await self.cursor.execute("select ClobCol from TestCLOBs") - (lob,) = await self.cursor.fetchone() - self.assertEqual(await lob.read(), supplemental_chars) - - async def test_5717(self): - "5717 - test fetching BLOB as bytes" - await self.__test_fetch_lobs_direct("BLOB") - - async def test_5718(self): - "5718 - test fetching CLOB as str" - await self.__test_fetch_lobs_direct("CLOB") - - async def test_5719(self): - "5719 - test fetching NCLOB as str" - await self.__test_fetch_lobs_direct("NCLOB") - - async def test_5720(self): - "5720 - test bind ordering with BLOB" - await self.__test_bind_ordering("BLOB") - - async def test_5721(self): - "5721 - test bind ordering with CLOB" - await self.__test_bind_ordering("CLOB") - - async def test_5722(self): - "5722 - test bind ordering with NCLOB" - await self.__test_bind_ordering("NCLOB") - - async def test_5723(self): - "5723 - test creating a lob with an invalid type" - with self.assertRaises(TypeError): - await self.conn.createlob(oracledb.DB_TYPE_NUMBER) - with self.assertRaises(TypeError): - await self.conn.createlob(oracledb.DB_TYPE_BFILE) - - async def test_5724(self): - "5724 - test creation of temporary LOBs with varying data" - cases = [ - (oracledb.DB_TYPE_BLOB, b"test_5724A", b"!", b"test_5724A!"), - (oracledb.DB_TYPE_BLOB, "test_5724B", "!", b"test_5724B!"), - (oracledb.DB_TYPE_CLOB, b"test_5724C", b"!", "test_5724C!"), - (oracledb.DB_TYPE_CLOB, "test_5724D", "!", "test_5724D!"), - (oracledb.DB_TYPE_NCLOB, b"test_5724E", b"!", "test_5724E!"), - (oracledb.DB_TYPE_NCLOB, "test_5724F", "!", "test_5724F!"), - ] - for typ, initial_data, additional_data, expected_result in cases: - with self.subTest(): - lob = await self.conn.createlob(typ, initial_data) - await lob.write(additional_data, len(initial_data) + 1) - self.assertEqual(await lob.read(), expected_result) - - async def test_5725(self): - "5725 - test reading and writing a LOB with a closed connection" - types = [ - oracledb.DB_TYPE_BLOB, - oracledb.DB_TYPE_CLOB, - oracledb.DB_TYPE_NCLOB, - ] - for typ in types: - conn = await test_env.get_connection(use_async=True) - lob = await conn.createlob(typ, "Temp LOB") - await conn.close() - with self.assertRaisesFullCode("DPY-1001"): - await lob.read() - with self.assertRaisesFullCode("DPY-1001"): - await lob.write("x") - - async def test_5726(self): - "5726 - test reading a non-existent directory" - directory_name = "TEST_5726_MISSING_DIR" - file_name = "test_5726_missing_file.txt" - await self.cursor.execute( - "select BFILENAME(:1, :2) from dual", [directory_name, file_name] + expected_value = long_string + assert await lob.size() == len(expected_value) + assert await lob.read() == expected_value + assert await lob.read(len(expected_value)) == char + if integer_value > 1: + offset = (integer_value - 1) * 25000 - 4 + string = prev_char * 5 + char * 5 + assert await lob.read(offset, 10) == string + + +async def test_5700(async_conn, async_cursor): + "5700 - test binding a LOB value directly" + await async_cursor.execute("delete from TestCLOBs") + await async_cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, 'Short value') + """ + ) + await async_cursor.execute("select ClobCol from TestCLOBs") + (lob,) = await async_cursor.fetchone() + await async_cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (2, :value) + """, + value=lob, + ) + await async_conn.commit() + + +async def test_5701(async_cursor): + "5701 - test cursor description is accurate for BLOBs" + await async_cursor.execute("select IntCol, BlobCol from TestBLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), + ("BLOBCOL", oracledb.DB_TYPE_BLOB, None, None, None, None, 0), + ] + assert async_cursor.description == expected_value + + +async def test_5702(async_conn): + "5703 - test binding and fetching BLOB data (indirectly)" + await _perform_test(async_conn, "BLOB", oracledb.DB_TYPE_LONG_RAW) + + +async def test_5703(async_conn, test_env): + "5703 - test operations on BLOBs" + await _test_lob_operations(async_conn, test_env, "BLOB") + + +async def test_5704(async_cursor): + "5704 - test cursor description is accurate for CLOBs" + await async_cursor.execute("select IntCol, ClobCol from TestCLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ("CLOBCOL", oracledb.DB_TYPE_CLOB, None, None, None, None, False), + ] + assert async_cursor.description == expected_value + + +async def test_5705(async_conn): + "5705 - test binding and fetching CLOB data (indirectly)" + await _perform_test(async_conn, "CLOB", oracledb.DB_TYPE_LONG) + + +async def test_5706(async_conn, test_env): + "5706 - test operations on CLOBs" + await _test_lob_operations(async_conn, test_env, "CLOB") + + +async def test_5707(async_conn): + "5707 - test creating a temporary BLOB" + await _test_temporary_lob(async_conn, "BLOB") + + +async def test_5708(async_conn): + "5708 - test creating a temporary CLOB" + await _test_temporary_lob(async_conn, "CLOB") + + +async def test_5709(async_conn): + "5709 - test creating a temporary NCLOB" + await _test_temporary_lob(async_conn, "NCLOB") + + +async def test_5710(async_conn): + "5710 - test retrieving data from a CLOB after multiple fetches" + await _perform_test(async_conn, "CLOB", oracledb.DB_TYPE_LONG, arraysize=1) + + +async def test_5711(async_cursor): + "5711 - test cursor description is accurate for NCLOBs" + await async_cursor.execute("select IntCol, NClobCol from TestNCLOBs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, 0), + ("NCLOBCOL", oracledb.DB_TYPE_NCLOB, None, None, None, None, 0), + ] + assert async_cursor.description == expected_value + + +async def test_5712(async_conn, async_cursor): + "5712 - test binding and fetching NCLOB data (with non-ASCII chars)" + value = "\u03b4\u4e2a" + await async_cursor.execute("delete from TestNCLOBs") + async_cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) + await async_cursor.execute( + """ + insert into TestNCLOBs (IntCol, NClobCol) + values (1, :val) + """, + val=value, + ) + await async_conn.commit() + await async_cursor.execute("select NCLOBCol from TestNCLOBs") + (nclob,) = await async_cursor.fetchone() + async_cursor.setinputsizes(val=oracledb.DB_TYPE_NVARCHAR) + await async_cursor.execute( + "update TestNCLOBs set NCLOBCol = :val", + val=await nclob.read() + value, + ) + await async_cursor.execute("select NCLOBCol from TestNCLOBs") + (nclob,) = await async_cursor.fetchone() + assert await nclob.read() == value + value + + +async def test_5713(async_conn): + "5713 - test binding and fetching NCLOB data (indirectly)" + await _perform_test(async_conn, "NCLOB", oracledb.DB_TYPE_LONG) + + +async def test_5714(async_conn, test_env): + "5714 - test operations on NCLOBs" + await _test_lob_operations(async_conn, test_env, "NCLOB") + + +async def test_5715(skip_if_implicit_pooling, async_conn, async_cursor): + "5715 - test temporary LOBs" + await async_cursor.execute( + "select sys_context('USERENV', 'SID') from dual" + ) + (sid,) = await async_cursor.fetchone() + temp_lobs = await _get_temp_lobs(async_conn, sid) + with async_conn.cursor() as cursor: + cursor.arraysize = 27 + assert temp_lobs == 0 + await cursor.execute( + "select extract(xmlcol, '/').getclobval() from TestXML" ) - (bfile,) = await self.cursor.fetchone() - self.assertEqual(bfile.getfilename(), (directory_name, file_name)) - with self.assertRaisesFullCode("ORA-22285"): - await bfile.fileexists() - with self.assertRaisesFullCode("ORA-22285"): - await bfile.read() - - async def test_5727(self): - "5727 - test using BFILE methods on non-BFILE LOBs" - types = [ - oracledb.DB_TYPE_BLOB, - oracledb.DB_TYPE_CLOB, - oracledb.DB_TYPE_NCLOB, - ] - for typ in types: - lob = await self.conn.createlob(typ) - with self.assertRaisesFullCode("DPY-3026"): - lob.getfilename() - with self.assertRaisesFullCode("DPY-3026"): - lob.setfilename("not_relevant", "not_relevant") - with self.assertRaisesFullCode("DPY-3026"): - await lob.fileexists() - - -if __name__ == "__main__": - test_env.run_test_cases() + async for (lob,) in cursor: + await lob.read() + del lob + temp_lobs = await _get_temp_lobs(async_conn, sid) + assert temp_lobs == 0 + + +async def test_5716(async_conn, async_cursor, test_env): + "5716 - test read/write temporary LOBs using supplemental characters" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + supplemental_chars = ( + "𠜎 𠜱 𠝹 𠱓 𠱸 𠲖 𠳏 𠳕 𠴕 𠵼 𠵿 𠸎 𠸏 𠹷 𠺝 𠺢 𠻗 𠻹 𠻺 𠼭 𠼮 " + "𠽌 𠾴 𠾼 𠿪 𡁜 𡁯 𡁵 𡁶 𡁻 𡃁 𡃉 𡇙 𢃇 𢞵 𢫕 𢭃 𢯊 𢱑 𢱕 𢳂 𢴈 " + "𢵌 𢵧 𢺳 𣲷 𤓓 𤶸 𤷪 𥄫 𦉘 𦟌 𦧲 𦧺 𧨾 𨅝 𨈇 𨋢 𨳊 𨳍 𨳒 𩶘" + ) + await async_cursor.execute("delete from TestCLOBs") + lob = await async_conn.createlob(oracledb.DB_TYPE_CLOB, supplemental_chars) + await async_cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, :val) + """, + [lob], + ) + await async_conn.commit() + await async_cursor.execute("select ClobCol from TestCLOBs") + (lob,) = await async_cursor.fetchone() + assert await lob.read() == supplemental_chars + + +async def test_5717(disable_fetch_lobs, async_conn): + "5717 - test fetching BLOB as bytes" + await _test_fetch_lobs_direct(async_conn, "BLOB") + + +async def test_5718(disable_fetch_lobs, async_conn): + "5718 - test fetching CLOB as str" + await _test_fetch_lobs_direct(async_conn, "CLOB") + + +async def test_5719(disable_fetch_lobs, async_conn): + "5719 - test fetching NCLOB as str" + await _test_fetch_lobs_direct(async_conn, "NCLOB") + + +async def test_5720(disable_fetch_lobs, async_conn): + "5720 - test bind ordering with BLOB" + await _test_bind_ordering(async_conn, "BLOB") + + +async def test_5721(disable_fetch_lobs, async_conn): + "5721 - test bind ordering with CLOB" + await _test_bind_ordering(async_conn, "CLOB") + + +async def test_5722(disable_fetch_lobs, async_conn): + "5722 - test bind ordering with NCLOB" + await _test_bind_ordering(async_conn, "NCLOB") + + +async def test_5723(async_conn): + "5723 - test creating a lob with an invalid type" + with pytest.raises(TypeError): + await async_conn.createlob(oracledb.DB_TYPE_NUMBER) + with pytest.raises(TypeError): + await async_conn.createlob(oracledb.DB_TYPE_BFILE) + + +async def test_5724(async_conn): + "5724 - test creation of temporary LOBs with varying data" + cases = [ + (oracledb.DB_TYPE_BLOB, b"test_5724A", b"!", b"test_5724A!"), + (oracledb.DB_TYPE_BLOB, "test_5724B", "!", b"test_5724B!"), + (oracledb.DB_TYPE_CLOB, b"test_5724C", b"!", "test_5724C!"), + (oracledb.DB_TYPE_CLOB, "test_5724D", "!", "test_5724D!"), + (oracledb.DB_TYPE_NCLOB, b"test_5724E", b"!", "test_5724E!"), + (oracledb.DB_TYPE_NCLOB, "test_5724F", "!", "test_5724F!"), + ] + for typ, initial_data, additional_data, expected_result in cases: + lob = await async_conn.createlob(typ, initial_data) + await lob.write(additional_data, len(initial_data) + 1) + assert await lob.read() == expected_result + + +async def test_5725(test_env): + "5725 - test reading and writing a LOB with a closed connection" + types = [ + oracledb.DB_TYPE_BLOB, + oracledb.DB_TYPE_CLOB, + oracledb.DB_TYPE_NCLOB, + ] + for typ in types: + conn = await test_env.get_connection(use_async=True) + lob = await conn.createlob(typ, "Temp LOB") + await conn.close() + with test_env.assert_raises_full_code("DPY-1001"): + await lob.read() + with test_env.assert_raises_full_code("DPY-1001"): + await lob.write("x") + + +async def test_5726(async_cursor, test_env): + "5726 - test reading a non-existent directory" + directory_name = "TEST_5726_MISSING_DIR" + file_name = "test_5726_missing_file.txt" + await async_cursor.execute( + "select BFILENAME(:1, :2) from dual", [directory_name, file_name] + ) + (bfile,) = await async_cursor.fetchone() + assert bfile.getfilename() == (directory_name, file_name) + with test_env.assert_raises_full_code("ORA-22285"): + await bfile.fileexists() + with test_env.assert_raises_full_code("ORA-22285"): + await bfile.read() + + +async def test_5727(async_conn, test_env): + "5727 - test using BFILE methods on non-BFILE LOBs" + types = [ + oracledb.DB_TYPE_BLOB, + oracledb.DB_TYPE_CLOB, + oracledb.DB_TYPE_NCLOB, + ] + for typ in types: + lob = await async_conn.createlob(typ) + with test_env.assert_raises_full_code("DPY-3026"): + lob.getfilename() + with test_env.assert_raises_full_code("DPY-3026"): + lob.setfilename("not_relevant", "not_relevant") + with test_env.assert_raises_full_code("DPY-3026"): + await lob.fileexists() diff --git a/tests/test_5800_cursor_var_async.py b/tests/test_5800_cursor_var_async.py index 4047b077..73b530f1 100644 --- a/tests/test_5800_cursor_var_async.py +++ b/tests/test_5800_cursor_var_async.py @@ -27,384 +27,389 @@ """ import oracledb -import test_env +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_5800(async_conn, async_cursor, test_env): + "5800 - test binding in a cursor" + ref_cursor = async_conn.cursor() + assert ref_cursor.description is None + await async_cursor.execute( + """ + begin + open :cursor for select 'X' StringValue from dual; + end; + """, + cursor=ref_cursor, + ) + varchar_ratio, _ = test_env.charset_ratios + expected_value = [ + ( + "STRINGVALUE", + oracledb.DB_TYPE_CHAR, + 1, + varchar_ratio, + None, + None, + True, + ) + ] + assert ref_cursor.description == expected_value + assert await ref_cursor.fetchall() == [("X",)] + + +async def test_5801(async_conn, async_cursor, test_env): + "5801 - test binding in a cursor from a package" + ref_cursor = async_conn.cursor() + assert ref_cursor.description is None + await async_cursor.callproc( + "pkg_TestRefCursors.TestOutCursor", (2, ref_cursor) + ) + varchar_ratio, _ = test_env.charset_ratios + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "STRINGCOL", + oracledb.DB_TYPE_VARCHAR, + 20, + 20 * varchar_ratio, + None, + None, + False, + ), + ] + assert ref_cursor.description == expected_value + assert await ref_cursor.fetchall() == [(1, "String 1"), (2, "String 2")] + + +async def test_5802(async_cursor, test_env): + "5802 - test that binding the cursor itself is not supported" + sql = """ + begin + open :pcursor for + select 1 from dual; + end;""" + with test_env.assert_raises_full_code("DPY-3009"): + await async_cursor.execute(sql, pcursor=async_cursor) -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_5800(self): - "5800 - test binding in a cursor" - cursor = self.conn.cursor() - self.assertIsNone(cursor.description) - await self.cursor.execute( - """ +async def test_5803(async_conn, async_cursor): + "5803 - test returning a ref cursor after closing it" + out_cursor = async_conn.cursor() + sql = """ begin - open :cursor for select 'X' StringValue from dual; - end; - """, - cursor=cursor, - ) - varchar_ratio, _ = await test_env.get_charset_ratios_async() - expected_value = [ - ( - "STRINGVALUE", - oracledb.DB_TYPE_CHAR, - 1, - varchar_ratio, - None, - None, - True, - ) - ] - self.assertEqual(cursor.description, expected_value) - self.assertEqual(await cursor.fetchall(), [("X",)]) - - async def test_5801(self): - "5801 - test binding in a cursor from a package" - cursor = self.conn.cursor() - self.assertIsNone(cursor.description) - await self.cursor.callproc( - "pkg_TestRefCursors.TestOutCursor", (2, cursor) + open :pcursor for + select IntCol + from TestNumbers + order by IntCol; + end;""" + await async_cursor.execute(sql, pcursor=out_cursor) + rows = await out_cursor.fetchall() + out_cursor.close() + out_cursor = async_conn.cursor() + await async_cursor.execute(sql, pcursor=out_cursor) + rows2 = await out_cursor.fetchall() + assert rows == rows2 + + +async def test_5804(async_cursor): + "5804 - test fetching a cursor" + await async_cursor.execute( + """ + select IntCol, cursor(select IntCol + 1 from dual) CursorValue + from TestNumbers + order by IntCol + """ + ) + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "CURSORVALUE", + oracledb.DB_TYPE_CURSOR, + None, + None, + None, + None, + True, + ), + ] + assert async_cursor.description == expected_value + for i in range(1, 11): + number, cursor = await async_cursor.fetchone() + assert number == i + assert await cursor.fetchall() == [(i + 1,)] + + +async def test_5805(async_conn, async_cursor): + "5805 - test that ref cursor binds cannot use optimised path" + ref_cursor = async_conn.cursor() + sql = """ + begin + open :rcursor for + select IntCol, StringCol + from TestStrings where IntCol + between :start_value and :end_value; + end;""" + expected_value = [(2, "String 2"), (3, "String 3"), (4, "String 4")] + await async_cursor.execute( + sql, rcursor=ref_cursor, start_value=2, end_value=4 + ) + assert await ref_cursor.fetchall() == expected_value + ref_cursor.close() + + expected_value = [(5, "String 5"), (6, "String 6")] + ref_cursor = async_conn.cursor() + await async_cursor.execute( + sql, rcursor=ref_cursor, start_value=5, end_value=6 + ) + assert await ref_cursor.fetchall() == expected_value + + +async def test_5806(async_conn, round_trip_checker_async): + "5806 - test round trips using a REF cursor" + + # simple DDL only requires a single round trip + with async_conn.cursor() as cursor: + await cursor.execute("truncate table TestTempTable") + assert await round_trip_checker_async.get_value_async() == 1 + + # array execution only requires a single round trip + num_rows = 590 + with async_conn.cursor() as cursor: + sql = "insert into TestTempTable (IntCol) values (:1)" + data = [(n + 1,) for n in range(num_rows)] + await cursor.executemany(sql, data) + assert await round_trip_checker_async.get_value_async() == 1 + + # create REF cursor and execute stored procedure + # (array size set before procedure is called) + with async_conn.cursor() as cursor: + refcursor = async_conn.cursor() + refcursor.arraysize = 150 + await cursor.callproc("myrefcursorproc", [refcursor]) + await refcursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 5 + + # create REF cursor and execute stored procedure + # (array size set after procedure is called) + with async_conn.cursor() as cursor: + refcursor = async_conn.cursor() + await cursor.callproc("myrefcursorproc", [refcursor]) + refcursor.arraysize = 145 + await refcursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 6 + + +async def test_5807(async_conn): + "5807 - test executing different SQL after getting a REF cursor" + with async_conn.cursor() as cursor: + refcursor = async_conn.cursor() + await cursor.callproc("myrefcursorproc", [refcursor]) + var = cursor.var(int) + await refcursor.execute("begin :1 := 15; end;", [var]) + assert var.getvalue() == 15 + + +async def test_5808(async_conn): + "5808 - test calling a function that returns a REF cursor" + with async_conn.cursor() as cursor: + ref_cursor = await cursor.callfunc( + "pkg_TestRefCursors.TestReturnCursor", + oracledb.DB_TYPE_CURSOR, + [2], ) - varchar_ratio, _ = await test_env.get_charset_ratios_async() - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "STRINGCOL", - oracledb.DB_TYPE_VARCHAR, - 20, - 20 * varchar_ratio, - None, - None, - False, - ), + assert await ref_cursor.fetchall() == [ + (1, "String 1"), + (2, "String 2"), ] - self.assertEqual(cursor.description, expected_value) - self.assertEqual( - await cursor.fetchall(), [(1, "String 1"), (2, "String 2")] - ) - async def test_5802(self): - "5802 - test that binding the cursor itself is not supported" - cursor = self.conn.cursor() - sql = """ - begin - open :pcursor for - select 1 from dual; - end;""" - with self.assertRaisesFullCode("DPY-3009"): - await cursor.execute(sql, pcursor=cursor) - - async def test_5803(self): - "5803 - test returning a ref cursor after closing it" - out_cursor = self.conn.cursor() - sql = """ - begin - open :pcursor for - select IntCol - from TestNumbers - order by IntCol; - end;""" - await self.cursor.execute(sql, pcursor=out_cursor) - rows = await out_cursor.fetchall() - out_cursor.close() - out_cursor = self.conn.cursor() - await self.cursor.execute(sql, pcursor=out_cursor) - rows2 = await out_cursor.fetchall() - self.assertEqual(rows, rows2) - - async def test_5804(self): - "5804 - test fetching a cursor" - await self.cursor.execute( - """ - select IntCol, cursor(select IntCol + 1 from dual) CursorValue - from TestNumbers - order by IntCol - """ - ) - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "CURSORVALUE", - oracledb.DB_TYPE_CURSOR, - None, - None, - None, - None, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - for i in range(1, 11): - number, cursor = await self.cursor.fetchone() - self.assertEqual(number, i) - self.assertEqual(await cursor.fetchall(), [(i + 1,)]) - - async def test_5805(self): - "5805 - test that ref cursor binds cannot use optimised path" - ref_cursor = self.conn.cursor() - sql = """ - begin - open :rcursor for - select IntCol, StringCol - from TestStrings where IntCol - between :start_value and :end_value; - end;""" - expected_value = [(2, "String 2"), (3, "String 3"), (4, "String 4")] - await self.cursor.execute( - sql, rcursor=ref_cursor, start_value=2, end_value=4 - ) - self.assertEqual(await ref_cursor.fetchall(), expected_value) - ref_cursor.close() - expected_value = [(5, "String 5"), (6, "String 6")] - ref_cursor = self.conn.cursor() - await self.cursor.execute( - sql, rcursor=ref_cursor, start_value=5, end_value=6 - ) - self.assertEqual(await ref_cursor.fetchall(), expected_value) - - async def test_5806(self): - "5806 - test round trips using a REF cursor" - await self.setup_round_trip_checker() - - # simple DDL only requires a single round trip - with self.conn.cursor() as cursor: - await cursor.execute("truncate table TestTempTable") - await self.assertRoundTrips(1) - - # array execution only requires a single round trip - num_rows = 590 - with self.conn.cursor() as cursor: - sql = "insert into TestTempTable (IntCol) values (:1)" - data = [(n + 1,) for n in range(num_rows)] - await cursor.executemany(sql, data) - await self.assertRoundTrips(1) - - # create REF cursor and execute stored procedure - # (array size set before procedure is called) - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - refcursor.arraysize = 150 - await cursor.callproc("myrefcursorproc", [refcursor]) - await refcursor.fetchall() - await self.assertRoundTrips(5) - - # create REF cursor and execute stored procedure - # (array size set after procedure is called) - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - await cursor.callproc("myrefcursorproc", [refcursor]) - refcursor.arraysize = 145 - await refcursor.fetchall() - await self.assertRoundTrips(6) - - async def test_5807(self): - "5807 - test executing different SQL after getting a REF cursor" - with self.conn.cursor() as cursor: - refcursor = self.conn.cursor() - await cursor.callproc("myrefcursorproc", [refcursor]) - var = cursor.var(int) - await refcursor.execute("begin :1 := 15; end;", [var]) - self.assertEqual(var.getvalue(), 15) - - async def test_5808(self): - "5808 - test calling a function that returns a REF cursor" - with self.conn.cursor() as cursor: - ref_cursor = await cursor.callfunc( - "pkg_TestRefCursors.TestReturnCursor", - oracledb.DB_TYPE_CURSOR, - [2], - ) - self.assertEqual( - await ref_cursor.fetchall(), [(1, "String 1"), (2, "String 2")] - ) +async def test_5809(async_conn, async_cursor): + "5809 - test using an output type handler with a REF cursor" - async def test_5809(self): - "5809 - test using an output type handler with a REF cursor" + def type_handler(cursor, metadata): + return cursor.var(str, arraysize=cursor.arraysize) - def type_handler(cursor, metadata): - return cursor.var(str, arraysize=cursor.arraysize) - - self.conn.outputtypehandler = type_handler - var = self.cursor.var(oracledb.DB_TYPE_CURSOR) - string_val = "Test String - 5809" - with self.conn.cursor() as cursor: - await cursor.callproc( - "pkg_TestRefCursors.TestLobCursor", [string_val, var] - ) - ref_cursor = var.getvalue() - self.assertEqual(await ref_cursor.fetchall(), [(string_val,)]) - - async def test_5810(self): - "5810 - bind a REF cursor but never open it" - ref_cursor_var = self.cursor.var(oracledb.DB_TYPE_CURSOR) - await self.cursor.execute( - """ - begin - if false then - open :cursor for - select user - from dual; - end if; - end; - """, - cursor=ref_cursor_var, + async_conn.outputtypehandler = type_handler + var = async_cursor.var(oracledb.DB_TYPE_CURSOR) + string_val = "Test String - 5809" + with async_conn.cursor() as cursor: + await cursor.callproc( + "pkg_TestRefCursors.TestLobCursor", [string_val, var] ) - ref_cursor = ref_cursor_var.getvalue() - if ref_cursor is not None: - with self.assertRaisesFullCode("DPY-4025"): - await ref_cursor.fetchall() - - async def test_5811(self): - "5811 - test fetching a cursor with a custom class" - - class Counter: - num_cursors_created = 0 - - @classmethod - def cursor_created(cls): - cls.num_cursors_created += 1 - - class MyConnection(oracledb.AsyncConnection): - def cursor(self): - Counter.cursor_created() - return super().cursor() - - conn = await test_env.get_connection_async(conn_class=MyConnection) - cursor = conn.cursor() - await cursor.execute( - """ + ref_cursor = var.getvalue() + assert await ref_cursor.fetchall() == [(string_val,)] + + +async def test_5810(async_cursor, test_env): + "5810 - bind a REF cursor but never open it" + ref_cursor_var = async_cursor.var(oracledb.DB_TYPE_CURSOR) + await async_cursor.execute( + """ + begin + if false then + open :cursor for + select user + from dual; + end if; + end; + """, + cursor=ref_cursor_var, + ) + ref_cursor = ref_cursor_var.getvalue() + if ref_cursor is not None: + with test_env.assert_raises_full_code("DPY-4025"): + await ref_cursor.fetchall() + + +async def test_5811(test_env): + "5811 - test fetching a cursor with a custom class" + + class Counter: + num_cursors_created = 0 + + @classmethod + def cursor_created(cls): + cls.num_cursors_created += 1 + + class MyConnection(oracledb.AsyncConnection): + def cursor(self): + Counter.cursor_created() + return super().cursor() + + conn = await test_env.get_connection_async(conn_class=MyConnection) + cursor = conn.cursor() + await cursor.execute( + """ + select + cursor(select 1 from dual), + cursor(select 2 from dual) + from dual + """ + ) + await cursor.fetchall() + assert Counter.num_cursors_created == 3 + + +async def test_5812(async_cursor): + "5812 - test that nested cursors are fetched correctly" + sql = """ + select + 'Level 1 String', + cursor( select - cursor(select 1 from dual), - cursor(select 2 from dual) - from dual - """ - ) - await cursor.fetchall() - self.assertEqual(Counter.num_cursors_created, 3) - - async def test_5812(self): - "5812 - test that nested cursors are fetched correctly" - sql = """ - select - 'Level 1 String', + 'Level 2 String', cursor( select - 'Level 2 String', + 'Level 3 String', cursor( - select - 'Level 3 String', - cursor( - select 1, 'Level 4 String A' from dual - union all - select 2, 'Level 4 String B' from dual - union all - select 3, 'Level 4 String C' from dual - ) as nc3 - from dual - ) as nc2 + select 1, 'Level 4 String A' from dual + union all + select 2, 'Level 4 String B' from dual + union all + select 3, 'Level 4 String C' from dual + ) as nc3 from dual - ) as nc1 - from dual""" - await self.cursor.execute(sql) - - async def transform_row(r): - return tuple([await transform_fn(v) for v in r]) - - async def transform_fn(v): - if isinstance(v, oracledb.AsyncCursor): - return [await transform_row(r) async for r in v] - return v - - rows = [await transform_row(r) async for r in self.cursor] - expected_value = [ - ( - "Level 1 String", - [ - ( - "Level 2 String", - [ - ( - "Level 3 String", - [ - (1, "Level 4 String A"), - (2, "Level 4 String B"), - (3, "Level 4 String C"), - ], - ), - ], - ), - ], - ) - ] - self.assertEqual(rows, expected_value) - - async def test_5813(self): - "5813 - test fetching nested cursors with more columns than parent" - sql = """ - select - 'Top Level String', - cursor( - select - 'Nested String 1', - 'Nested String 2', - 'Nested String 3' - from dual - ) - from dual""" - await self.cursor.execute(sql) - - async def transform_row(r): - return tuple([await transform_fn(v) for v in r]) - - async def transform_fn(v): - if isinstance(v, oracledb.AsyncCursor): - return [await transform_row(r) async for r in v] - return v - - rows = [await transform_row(r) async for r in self.cursor] - expected_value = [ - ( - "Top Level String", - [("Nested String 1", "Nested String 2", "Nested String 3")], - ) - ] - self.assertEqual(rows, expected_value) - - async def test_5814(self): - "5814 - test reusing a closed ref cursor for executing different sql" - sql = "select 58141, 'String 58141' from dual" - ref_cursor = self.conn.cursor() - ref_cursor.prefetchrows = 0 - await ref_cursor.execute(sql) - plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" - await self.cursor.execute(plsql, rcursor=ref_cursor) - sql = "select 58142, 'String 58142' from dual" - await ref_cursor.execute(sql) - self.assertEqual( - await ref_cursor.fetchall(), - [ - (58142, "String 58142"), - ], - ) - - async def test_5815(self): - "5815 - test reusing a closed ref cursor for executing same sql" - sql = "select 5815, 'String 5815' from dual" - ref_cursor = self.conn.cursor() - ref_cursor.prefetchrows = 0 - await ref_cursor.execute(sql) - plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" - await self.cursor.execute(plsql, rcursor=ref_cursor) - await ref_cursor.execute(sql) - self.assertEqual( - await ref_cursor.fetchall(), + ) as nc2 + from dual + ) as nc1 + from dual""" + await async_cursor.execute(sql) + + async def transform_row(r): + return tuple([await transform_fn(v) for v in r]) + + async def transform_fn(v): + if isinstance(v, oracledb.AsyncCursor): + return [await transform_row(r) async for r in v] + return v + + rows = [await transform_row(r) async for r in async_cursor] + expected_value = [ + ( + "Level 1 String", [ - (5815, "String 5815"), + ( + "Level 2 String", + [ + ( + "Level 3 String", + [ + (1, "Level 4 String A"), + (2, "Level 4 String B"), + (3, "Level 4 String C"), + ], + ), + ], + ), ], ) + ] + assert rows == expected_value -if __name__ == "__main__": - test_env.run_test_cases() +async def test_5813(async_cursor): + "5813 - test fetching nested cursors with more columns than parent" + sql = """ + select + 'Top Level String', + cursor( + select + 'Nested String 1', + 'Nested String 2', + 'Nested String 3' + from dual + ) + from dual""" + await async_cursor.execute(sql) + + async def transform_row(r): + return tuple([await transform_fn(v) for v in r]) + + async def transform_fn(v): + if isinstance(v, oracledb.AsyncCursor): + return [await transform_row(r) async for r in v] + return v + + rows = [await transform_row(r) async for r in async_cursor] + expected_value = [ + ( + "Top Level String", + [("Nested String 1", "Nested String 2", "Nested String 3")], + ) + ] + assert rows == expected_value + + +async def test_5814(async_conn, async_cursor): + "5814 - test reusing a closed ref cursor for executing different sql" + sql = "select 58141, 'String 58141' from dual" + ref_cursor = async_conn.cursor() + ref_cursor.prefetchrows = 0 + await ref_cursor.execute(sql) + plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" + await async_cursor.execute(plsql, rcursor=ref_cursor) + sql = "select 58142, 'String 58142' from dual" + await ref_cursor.execute(sql) + assert await ref_cursor.fetchall() == [ + (58142, "String 58142"), + ] + + +async def test_5815(async_conn, async_cursor): + "5815 - test reusing a closed ref cursor for executing same sql" + sql = "select 5815, 'String 5815' from dual" + ref_cursor = async_conn.cursor() + ref_cursor.prefetchrows = 0 + await ref_cursor.execute(sql) + plsql = "begin pkg_TestRefCursors.TestCloseCursor(:rcursor); end;" + await async_cursor.execute(plsql, rcursor=ref_cursor) + await ref_cursor.execute(sql) + assert await ref_cursor.fetchall() == [ + (5815, "String 5815"), + ] diff --git a/tests/test_5900_dml_returning_async.py b/tests/test_5900_dml_returning_async.py index 96e171cd..1bb4ea2b 100644 --- a/tests/test_5900_dml_returning_async.py +++ b/tests/test_5900_dml_returning_async.py @@ -29,506 +29,525 @@ import datetime import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_5900(self): - "5900 - test insert (single row) with DML returning" - await self.cursor.execute("truncate table TestTempTable") - int_val = 5 - str_val = "A test string" - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[int_val]]) - self.assertEqual(str_var.values, [[str_val]]) - - async def test_5901(self): - "5901 - test insert (multiple rows) with DML returning" - await self.cursor.execute("truncate table TestTempTable") - int_values = [5, 8, 17, 24, 6] - str_values = ["Test 5", "Test 8", "Test 17", "Test 24", "Test 6"] - int_var = self.cursor.var(oracledb.NUMBER, arraysize=len(int_values)) - str_var = self.cursor.var(str, arraysize=len(int_values)) - self.cursor.setinputsizes(None, None, int_var, str_var) - data = list(zip(int_values, str_values)) - await self.cursor.executemany( +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_5900(async_cursor): + "5900 - test insert (single row) with DML returning" + await async_cursor.execute("truncate table TestTempTable") + int_val = 5 + str_val = "A test string" + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[int_val]] + assert str_var.values == [[str_val]] + + +async def test_5901(async_cursor): + "5901 - test insert (multiple rows) with DML returning" + await async_cursor.execute("truncate table TestTempTable") + int_values = [5, 8, 17, 24, 6] + str_values = ["Test 5", "Test 8", "Test 17", "Test 24", "Test 6"] + int_var = async_cursor.var(oracledb.NUMBER, arraysize=len(int_values)) + str_var = async_cursor.var(str, arraysize=len(int_values)) + async_cursor.setinputsizes(None, None, int_var, str_var) + data = list(zip(int_values, str_values)) + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var + """, + data, + ) + assert int_var.values == [[v] for v in int_values] + assert str_var.values == [[v] for v in str_values] + + +async def test_5902(async_cursor, test_env): + "5902 - test insert with DML returning into too small a variable" + await async_cursor.execute("truncate table TestTempTable") + int_val = 6 + str_val = "A different test string" + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str, 2) + parameters = dict( + int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + ) + with test_env.assert_raises_full_code("DPY-4002", "DPI-1037"): + await async_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:int_val, :str_val) returning IntCol, StringCol1 into :int_var, :str_var """, - data, - ) - self.assertEqual(int_var.values, [[v] for v in int_values]) - self.assertEqual(str_var.values, [[v] for v in str_values]) - - async def test_5902(self): - "5902 - test insert with DML returning into too small a variable" - await self.cursor.execute("truncate table TestTempTable") - int_val = 6 - str_val = "A different test string" - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str, 2) - parameters = dict( - int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var - ) - with self.assertRaisesFullCode("DPY-4002", "DPI-1037"): - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var - """, - parameters, - ) - - async def test_5903(self): - "5903 - test update single row with DML returning" - int_val = 7 - str_val = "The updated value of the string" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (int_val, "The initial value of the string"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - await self.cursor.execute( - """ - update TestTempTable set - StringCol1 = :str_val - where IntCol = :int_val - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[int_val]]) - self.assertEqual(str_var.values, [[str_val]]) - - async def test_5904(self): - "5904 - test update no rows with DML returning" - int_val = 8 - str_val = "The updated value of the string" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (int_val, "The initial value of the string"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - await self.cursor.execute( - """ - update TestTempTable set - StringCol1 = :str_val - where IntCol = :int_val - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_val=int_val + 1, - str_val=str_val, - int_var=int_var, - str_var=str_var, - ) - self.assertEqual(int_var.values, [[]]) - self.assertEqual(str_var.values, [[]]) - self.assertEqual(int_var.getvalue(), []) - self.assertEqual(str_var.getvalue(), []) - - async def test_5905(self): - "5905 - test update multiple rows with DML returning" - await self.cursor.execute("truncate table TestTempTable") - for i in (8, 9, 10): - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) values (:1, :2) - """, - (i, f"The initial value of string {i}"), - ) - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - await self.cursor.execute( - """ - update TestTempTable set - IntCol = IntCol + 15, - StringCol1 = 'The final value of string ' || to_char(IntCol) - returning IntCol, StringCol1 into :int_var, :str_var - """, - int_var=int_var, - str_var=str_var, + parameters, ) - self.assertEqual(self.cursor.rowcount, 3) - self.assertEqual(int_var.values, [[23, 24, 25]]) - expected_values = [ - [ - "The final value of string 8", - "The final value of string 9", - "The final value of string 10", - ] - ] - self.assertEqual(str_var.values, expected_values) - - async def test_5906(self): - "5906 - test update multiple rows with DML returning (executemany)" - data = [(i, f"The initial value of string {i}") for i in range(1, 11)] - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - data, - ) - int_var = self.cursor.var(oracledb.NUMBER, arraysize=3) - str_var = self.cursor.var(str, arraysize=3) - self.cursor.setinputsizes(None, int_var, str_var) - await self.cursor.executemany( + + +async def test_5903(async_cursor): + "5903 - test update single row with DML returning" + int_val = 7 + str_val = "The updated value of the string" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (int_val, "The initial value of the string"), + ) + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str) + await async_cursor.execute( + """ + update TestTempTable set + StringCol1 = :str_val + where IntCol = :int_val + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[int_val]] + assert str_var.values == [[str_val]] + + +async def test_5904(async_cursor): + "5904 - test update no rows with DML returning" + int_val = 8 + str_val = "The updated value of the string" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (int_val, "The initial value of the string"), + ) + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str) + await async_cursor.execute( + """ + update TestTempTable set + StringCol1 = :str_val + where IntCol = :int_val + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_val=int_val + 1, + str_val=str_val, + int_var=int_var, + str_var=str_var, + ) + assert int_var.values == [[]] + assert str_var.values == [[]] + assert int_var.getvalue() == [] + assert str_var.getvalue() == [] + + +async def test_5905(async_cursor): + "5905 - test update multiple rows with DML returning" + await async_cursor.execute("truncate table TestTempTable") + for i in (8, 9, 10): + await async_cursor.execute( """ - update TestTempTable set - IntCol = IntCol + 25, - StringCol1 = 'Updated value of string ' || to_char(IntCol) - where IntCol < :inVal - returning IntCol, StringCol1 into :int_var, :str_var + insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - [[3], [8], [11]], + (i, f"The initial value of string {i}"), ) - expected_values = [[26, 27], [28, 29, 30, 31, 32], [33, 34, 35]] - self.assertEqual(int_var.values, expected_values) - expected_values = [ - ["Updated value of string 1", "Updated value of string 2"], - [ - "Updated value of string 3", - "Updated value of string 4", - "Updated value of string 5", - "Updated value of string 6", - "Updated value of string 7", - ], - [ - "Updated value of string 8", - "Updated value of string 9", - "Updated value of string 10", - ], + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str) + await async_cursor.execute( + """ + update TestTempTable set + IntCol = IntCol + 15, + StringCol1 = 'The final value of string ' || to_char(IntCol) + returning IntCol, StringCol1 into :int_var, :str_var + """, + int_var=int_var, + str_var=str_var, + ) + assert async_cursor.rowcount == 3 + assert int_var.values == [[23, 24, 25]] + expected_values = [ + [ + "The final value of string 8", + "The final value of string 9", + "The final value of string 10", ] - self.assertEqual(str_var.values, expected_values) - - async def test_5907(self): - "5907 - test inserting an object with DML returning" - type_obj = await self.conn.gettype("UDT_OBJECT") - string_value = "The string that will be verified" - obj = type_obj.newobject() - obj.STRINGVALUE = string_value - out_var = self.cursor.var(type_obj) - await self.cursor.execute( - """ - insert into TestObjects (IntCol, ObjectCol) - values (4, :obj)returning ObjectCol into :outObj - """, - obj=obj, - outObj=out_var, - ) - (result,) = out_var.getvalue() - self.assertEqual(result.STRINGVALUE, string_value) - await self.conn.rollback() - - async def test_5908(self): - "5908 - test inserting a row and returning a rowid" - await self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(oracledb.ROWID) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (278, 'String 278') - returning rowid into :1 - """, - [var], - ) - (rowid,) = var.getvalue() - await self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - where rowid = :1 - """, - [rowid], - ) - self.assertEqual(await self.cursor.fetchall(), [(278, "String 278")]) - - async def test_5909(self): - "5909 - test inserting with a REF cursor and returning a rowid" - await self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(oracledb.ROWID) - in_cursor = self.conn.cursor() - await in_cursor.execute( - """ - select StringCol - from TestStrings - where IntCol >= 5 - order by IntCol - """ - ) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (187, pkg_TestRefCursors.TestInCursor(:1)) - returning rowid into :2 - """, - (in_cursor, var), - ) - (rowid,) = var.getvalue() - await self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - where rowid = :1 - """, - [rowid], - ) - self.assertEqual( - await self.cursor.fetchall(), [(187, "String 7 (Modified)")] - ) + ] + assert str_var.values == expected_values - async def test_5910(self): - "5910 - test delete returning decreasing number of rows" - data = [(i, f"Test String {i}") for i in range(1, 11)] - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - results = [] - int_var = self.cursor.var(int) - self.cursor.setinputsizes(None, int_var) - for int_val in (5, 8, 10): - await self.cursor.execute( - """ - delete from TestTempTable - where IntCol < :1 - returning IntCol into :2 - """, - [int_val], - ) - results.append(int_var.getvalue()) - self.assertEqual(results, [[1, 2, 3, 4], [5, 6, 7], [8, 9]]) - - async def test_5911(self): - "5911 - test delete returning no rows after returning many rows" - data = [(i, f"Test String {i}") for i in range(1, 11)] - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - int_var = self.cursor.var(int) - await self.cursor.execute( + +async def test_5906(async_cursor): + "5906 - test update multiple rows with DML returning (executemany)" + data = [(i, f"The initial value of string {i}") for i in range(1, 11)] + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + data, + ) + int_var = async_cursor.var(oracledb.NUMBER, arraysize=3) + str_var = async_cursor.var(str, arraysize=3) + async_cursor.setinputsizes(None, int_var, str_var) + await async_cursor.executemany( + """ + update TestTempTable set + IntCol = IntCol + 25, + StringCol1 = 'Updated value of string ' || to_char(IntCol) + where IntCol < :inVal + returning IntCol, StringCol1 into :int_var, :str_var + """, + [[3], [8], [11]], + ) + expected_values = [[26, 27], [28, 29, 30, 31, 32], [33, 34, 35]] + assert int_var.values == expected_values + expected_values = [ + ["Updated value of string 1", "Updated value of string 2"], + [ + "Updated value of string 3", + "Updated value of string 4", + "Updated value of string 5", + "Updated value of string 6", + "Updated value of string 7", + ], + [ + "Updated value of string 8", + "Updated value of string 9", + "Updated value of string 10", + ], + ] + assert str_var.values == expected_values + + +async def test_5907(async_conn, async_cursor): + "5907 - test inserting an object with DML returning" + type_obj = await async_conn.gettype("UDT_OBJECT") + string_value = "The string that will be verified" + obj = type_obj.newobject() + obj.STRINGVALUE = string_value + out_var = async_cursor.var(type_obj) + await async_cursor.execute( + """ + insert into TestObjects (IntCol, ObjectCol) + values (4, :obj)returning ObjectCol into :outObj + """, + obj=obj, + outObj=out_var, + ) + (result,) = out_var.getvalue() + assert result.STRINGVALUE == string_value + await async_conn.rollback() + + +async def test_5908(async_cursor): + "5908 - test inserting a row and returning a rowid" + await async_cursor.execute("truncate table TestTempTable") + var = async_cursor.var(oracledb.ROWID) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (278, 'String 278') + returning rowid into :1 + """, + [var], + ) + (rowid,) = var.getvalue() + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + where rowid = :1 + """, + [rowid], + ) + assert await async_cursor.fetchall() == [(278, "String 278")] + + +async def test_5909(async_conn, async_cursor): + "5909 - test inserting with a REF cursor and returning a rowid" + await async_cursor.execute("truncate table TestTempTable") + var = async_cursor.var(oracledb.ROWID) + in_cursor = async_conn.cursor() + await in_cursor.execute( + """ + select StringCol + from TestStrings + where IntCol >= 5 + order by IntCol + """ + ) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (187, pkg_TestRefCursors.TestInCursor(:1)) + returning rowid into :2 + """, + (in_cursor, var), + ) + (rowid,) = var.getvalue() + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + where rowid = :1 + """, + [rowid], + ) + assert await async_cursor.fetchall() == [(187, "String 7 (Modified)")] + + +async def test_5910(async_cursor): + "5910 - test delete returning decreasing number of rows" + data = [(i, f"Test String {i}") for i in range(1, 11)] + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + results = [] + int_var = async_cursor.var(int) + async_cursor.setinputsizes(None, int_var) + for int_val in (5, 8, 10): + await async_cursor.execute( """ delete from TestTempTable where IntCol < :1 returning IntCol into :2 """, - [5, int_var], - ) - self.assertEqual(int_var.getvalue(), [1, 2, 3, 4]) - await self.cursor.execute(None, [4, int_var]) - self.assertEqual(int_var.getvalue(), []) - - async def test_5912(self): - "5912 - test DML returning when an error occurs" - await self.cursor.execute("truncate table TestTempTable") - int_val = 7 - str_val = "A" * 401 - int_var = self.cursor.var(oracledb.NUMBER) - str_var = self.cursor.var(str) - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :int_var, :str_var""" - parameters = dict( - int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + [int_val], ) - with self.assertRaisesFullCode("ORA-12899"): - await self.cursor.execute(sql, parameters) - - async def test_5913(self): - "5913 - test DML returning with no input variables, multiple iters" - await self.cursor.execute("truncate table TestTempTable") - sql = """ - insert into TestTempTable (IntCol) - values ((select count(*) + 1 from TestTempTable)) - returning IntCol into :1""" - var = self.cursor.var(int) - await self.cursor.execute(sql, [var]) - self.assertEqual(var.getvalue(), [1]) - await self.cursor.execute(sql, [var]) - self.assertEqual(var.getvalue(), [2]) - - async def test_5914(self): - "5914 - test DML returning with a quoted bind name" - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol, StringCol1 into :"_val1" , :"VaL_2" """ - await self.cursor.parse(sql) - expected_bind_names = ["INT_VAL", "STR_VAL", "_val1", "VaL_2"] - self.assertEqual(self.cursor.bindnames(), expected_bind_names) - - async def test_5915(self): - "5915 - test DML returning with an invalid bind name" - sql = """ - insert into TestTempTable (IntCol) - values (:int_val) - returning IntCol, StringCol1 into :ROWID""" - with self.assertRaisesFullCode("ORA-01745"): - await self.cursor.parse(sql) - - async def test_5916(self): - "5916 - test DML returning with input bind variable data" - await self.cursor.execute("truncate table TestTempTable") - out_var = self.cursor.var(int) - await self.cursor.execute( - """ + results.append(int_var.getvalue()) + assert results == [[1, 2, 3, 4], [5, 6, 7], [8, 9]] + + +async def test_5911(async_cursor): + "5911 - test delete returning no rows after returning many rows" + data = [(i, f"Test String {i}") for i in range(1, 11)] + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data, + ) + int_var = async_cursor.var(int) + await async_cursor.execute( + """ + delete from TestTempTable + where IntCol < :1 + returning IntCol into :2 + """, + [5, int_var], + ) + assert int_var.getvalue() == [1, 2, 3, 4] + await async_cursor.execute(None, [4, int_var]) + assert int_var.getvalue() == [] + + +async def test_5912(async_cursor, test_env): + "5912 - test DML returning when an error occurs" + await async_cursor.execute("truncate table TestTempTable") + int_val = 7 + str_val = "A" * 401 + int_var = async_cursor.var(oracledb.NUMBER) + str_var = async_cursor.var(str) + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :int_var, :str_var""" + parameters = dict( + int_val=int_val, str_val=str_val, int_var=int_var, str_var=str_var + ) + with test_env.assert_raises_full_code("ORA-12899"): + await async_cursor.execute(sql, parameters) + + +async def test_5913(async_cursor): + "5913 - test DML returning with no input variables, multiple iters" + await async_cursor.execute("truncate table TestTempTable") + sql = """ + insert into TestTempTable (IntCol) + values ((select count(*) + 1 from TestTempTable)) + returning IntCol into :1""" + var = async_cursor.var(int) + await async_cursor.execute(sql, [var]) + assert var.getvalue() == [1] + await async_cursor.execute(sql, [var]) + assert var.getvalue() == [2] + + +async def test_5914(async_cursor): + "5914 - test DML returning with a quoted bind name" + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol, StringCol1 into :"_val1" , :"VaL_2" """ + await async_cursor.parse(sql) + expected_bind_names = ["INT_VAL", "STR_VAL", "_val1", "VaL_2"] + assert async_cursor.bindnames() == expected_bind_names + + +async def test_5915(async_cursor, test_env): + "5915 - test DML returning with an invalid bind name" + sql = """ insert into TestTempTable (IntCol) values (:int_val) - returning IntCol + :add_val into :out_val - """, - int_val=5, - add_val=18, - out_val=out_var, - ) - await self.conn.commit() - self.assertEqual(out_var.getvalue(), [23]) - - async def test_5917(self): - "5917 - test DML returning with LOBs and an output converter" - await self.cursor.execute("truncate table TestCLOBs") - out_var = self.cursor.var( - oracledb.DB_TYPE_CLOB, outconverter=lambda value: value.read() - ) - lob_value = "A short CLOB - 1618" - await self.cursor.execute( - """ - insert into TestCLOBs (IntCol, ClobCol) - values (1, :in_val) - returning CLOBCol into :out_val - """, - in_val=lob_value, - out_val=out_var, - ) - await self.conn.commit() - self.assertEqual(out_var.getvalue(), [lob_value]) - - async def test_5918(self): - "5918 - test DML returning with CLOB converted to LONG" - await self.cursor.execute("truncate table TestCLOBs") - out_var = self.cursor.var(oracledb.DB_TYPE_LONG) - lob_value = "A short CLOB - 1619" - await self.cursor.execute( - """ - insert into TestCLOBs - (IntCol, ClobCol) - values (1, :in_val) - returning CLOBCol into :out_val - """, - in_val=lob_value, - out_val=out_var, - ) - await self.conn.commit() - self.assertEqual(out_var.getvalue(), [lob_value]) - - async def test_5919(self): - "5919 - test dml returning with an index organized table" - await self.cursor.execute("truncate table TestUniversalRowids") - rowid_var = self.cursor.var(oracledb.ROWID) - data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) - sql = """ - insert into TestUniversalRowids values (:1, :2, :3) - returning rowid into :4""" - await self.cursor.execute(sql, data) - (rowid_value,) = rowid_var.getvalue() - await self.cursor.execute( - """ - select * - from TestUniversalRowids - where rowid = :1 - """, - [rowid_value], - ) - (row,) = await self.cursor.fetchall() - self.assertEqual(row, data[:3]) - - async def test_5920(self): - "5920 - test plsql returning rowids with index organized table" - await self.cursor.execute("truncate table TestUniversalRowids") - rowid_var = self.cursor.var(oracledb.ROWID) - data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) - await self.cursor.execute( - """ - begin - insert into TestUniversalRowids values (:1, :2, :3) - returning rowid into :4; - end; - """, - data, - ) - rowid_value = rowid_var.values[0] - await self.cursor.execute( - """ - select * - from TestUniversalRowids - where rowid = :1 - """, - [rowid_value], - ) - (row,) = await self.cursor.fetchall() - self.assertEqual(row, data[:3]) - - async def test_5921(self): - "5921 - parse DML returning with no spaces" - await self.cursor.execute("truncate table TestTempTable") - sql = ( - "insert into TestTempTable (IntCol) values (:in_val)" - "returning(IntCol)into :out_val" - ) - out_val = self.cursor.var(int, arraysize=5) - await self.cursor.execute(sql, in_val=25, out_val=out_val) - self.assertEqual(out_val.getvalue(), [25]) - - async def test_5922(self): - "5922 - use bind variable in new statement after RETURNING statement" - await self.cursor.execute("truncate table TestTempTable") - sql = ( - "insert into TestTempTable (IntCol) values (:in_val)" - "returning IntCol + 15 into :out_val" - ) - out_val = self.cursor.var(int, arraysize=5) - await self.cursor.execute(sql, in_val=25, out_val=out_val) - self.assertEqual(out_val.getvalue(), [40]) - sql = "begin :out_val := :in_val + 35; end;" - await self.cursor.execute(sql, in_val=35, out_val=out_val) - self.assertEqual(out_val.getvalue(), 70) + returning IntCol, StringCol1 into :ROWID""" + with test_env.assert_raises_full_code("ORA-01745"): + await async_cursor.parse(sql) + + +async def test_5916(async_conn, async_cursor): + "5916 - test DML returning with input bind variable data" + await async_cursor.execute("truncate table TestTempTable") + out_var = async_cursor.var(int) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol) + values (:int_val) + returning IntCol + :add_val into :out_val + """, + int_val=5, + add_val=18, + out_val=out_var, + ) + await async_conn.commit() + assert out_var.getvalue() == [23] + + +async def test_5917(async_conn, async_cursor): + "5917 - test DML returning with LOBs and an output converter" + await async_cursor.execute("truncate table TestCLOBs") + out_var = async_cursor.var( + oracledb.DB_TYPE_CLOB, outconverter=lambda value: value.read() + ) + lob_value = "A short CLOB - 1618" + await async_cursor.execute( + """ + insert into TestCLOBs (IntCol, ClobCol) + values (1, :in_val) + returning CLOBCol into :out_val + """, + in_val=lob_value, + out_val=out_var, + ) + await async_conn.commit() + assert out_var.getvalue() == [lob_value] + + +async def test_5918(async_conn, async_cursor): + "5918 - test DML returning with CLOB converted to LONG" + await async_cursor.execute("truncate table TestCLOBs") + out_var = async_cursor.var(oracledb.DB_TYPE_LONG) + lob_value = "A short CLOB - 1619" + await async_cursor.execute( + """ + insert into TestCLOBs + (IntCol, ClobCol) + values (1, :in_val) + returning CLOBCol into :out_val + """, + in_val=lob_value, + out_val=out_var, + ) + await async_conn.commit() + assert out_var.getvalue() == [lob_value] + + +async def test_5919(async_cursor): + "5919 - test dml returning with an index organized table" + await async_cursor.execute("truncate table TestUniversalRowids") + rowid_var = async_cursor.var(oracledb.ROWID) + data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) + sql = """ + insert into TestUniversalRowids values (:1, :2, :3) + returning rowid into :4""" + await async_cursor.execute(sql, data) + (rowid_value,) = rowid_var.getvalue() + await async_cursor.execute( + """ + select * + from TestUniversalRowids + where rowid = :1 + """, + [rowid_value], + ) + (row,) = await async_cursor.fetchall() + assert row == data[:3] + + +async def test_5920(async_cursor): + "5920 - test plsql returning rowids with index organized table" + await async_cursor.execute("truncate table TestUniversalRowids") + rowid_var = async_cursor.var(oracledb.ROWID) + data = (1, "ABC", datetime.datetime(2017, 4, 11), rowid_var) + await async_cursor.execute( + """ + begin + insert into TestUniversalRowids values (:1, :2, :3) + returning rowid into :4; + end; + """, + data, + ) + rowid_value = rowid_var.values[0] + await async_cursor.execute( + """ + select * + from TestUniversalRowids + where rowid = :1 + """, + [rowid_value], + ) + (row,) = await async_cursor.fetchall() + assert row == data[:3] + + +async def test_5921(async_cursor): + "5921 - parse DML returning with no spaces" + await async_cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning(IntCol)into :out_val" + ) + out_val = async_cursor.var(int, arraysize=5) + await async_cursor.execute(sql, in_val=25, out_val=out_val) + assert out_val.getvalue() == [25] -if __name__ == "__main__": - test_env.run_test_cases() +async def test_5922(async_cursor): + "5922 - use bind variable in new statement after RETURNING statement" + await async_cursor.execute("truncate table TestTempTable") + sql = ( + "insert into TestTempTable (IntCol) values (:in_val)" + "returning IntCol + 15 into :out_val" + ) + out_val = async_cursor.var(int, arraysize=5) + await async_cursor.execute(sql, in_val=25, out_val=out_val) + assert out_val.getvalue() == [40] + sql = "begin :out_val := :in_val + 35; end;" + await async_cursor.execute(sql, in_val=35, out_val=out_val) + assert out_val.getvalue() == 70 diff --git a/tests/test_6000_typehandler_async.py b/tests/test_6000_typehandler_async.py index b13ba872..366dc8fe 100644 --- a/tests/test_6000_typehandler_async.py +++ b/tests/test_6000_typehandler_async.py @@ -30,7 +30,12 @@ import json import oracledb -import test_env +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass class Building: @@ -60,236 +65,220 @@ def from_json(cls, value): return cls(**result) -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - def building_in_converter(self, value): - return value.to_json() - - def input_type_handler(self, cursor, value, num_elements): - if isinstance(value, Building): - return cursor.var( - oracledb.STRING, - arraysize=num_elements, - inconverter=self.building_in_converter, - ) +def building_in_converter(value): + return value.to_json() - def output_type_handler(self, cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_VARCHAR: - return cursor.var( - metadata.type_code, - arraysize=cursor.arraysize, - outconverter=Building.from_json, - ) - async def test_6000(self): - "6000 - binding unsupported python object without input type handler" - await self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - building = Building(1, "The First Building", 5) - with self.assertRaisesFullCode("DPY-3002"): - await self.cursor.execute(sql, [building.building_id, building]) - - async def test_6001(self): - "6001 - not callable input type handler" - await self.cursor.execute("truncate table TestTempTable") - building = Building(1, "The First Building", 5) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - self.cursor.inputtypehandler = 5 - self.assertEqual(self.cursor.inputtypehandler, 5) - with self.assertRaises(TypeError): - await self.cursor.execute(sql, (building.building_id, building)) - - async def test_6002(self): - "6002 - binding unsupported python object with input type handler" - await self.cursor.execute("truncate table TestTempTable") - building = Building(1, "The First Building", 5) - self.cursor.inputtypehandler = self.input_type_handler - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [building.building_id, building], - ) - self.assertEqual( - self.cursor.bindvars[1].inconverter, self.building_in_converter - ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual( - await self.cursor.fetchall(), - [(building.building_id, building.to_json())], +def input_type_handler(cursor, value, num_elements): + if isinstance(value, Building): + return cursor.var( + oracledb.STRING, + arraysize=num_elements, + inconverter=building_in_converter, ) - async def test_6003(self): - "6003 - input type handler and output type handler on cursor level" - await self.cursor.execute("truncate table TestTempTable") - building_one = Building(1, "The First Building", 5) - building_two = Building(2, "The Second Building", 87) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - cursor_one = self.conn.cursor() - cursor_two = self.conn.cursor() - cursor_one.inputtypehandler = self.input_type_handler - await cursor_one.execute(sql, [building_one.building_id, building_one]) - await self.conn.commit() - - await cursor_one.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual( - await cursor_one.fetchall(), - [(building_one.building_id, building_one.to_json())], - ) - with self.assertRaisesFullCode("DPY-3002"): - await cursor_two.execute( - sql, (building_two.building_id, building_two) - ) - cursor_two.outputtypehandler = self.output_type_handler - await cursor_two.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual( - await cursor_two.fetchall(), - [(building_one.building_id, building_one)], +def output_type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + return cursor.var( + metadata.type_code, + arraysize=cursor.arraysize, + outconverter=Building.from_json, ) - async def test_6004(self): - "6004 - input type handler and output type handler on connection level" - await self.cursor.execute("truncate table TestTempTable") - building_one = Building(1, "The First Building", 5) - building_two = Building(2, "The Second Building", 87) - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - conn = await test_env.get_connection_async() - conn.inputtypehandler = self.input_type_handler - self.assertEqual(conn.inputtypehandler, self.input_type_handler) - - cursor_one = conn.cursor() - cursor_two = conn.cursor() - await cursor_one.execute(sql, [building_one.building_id, building_one]) - await cursor_two.execute(sql, [building_two.building_id, building_two]) - await conn.commit() - - expected_data = [ - (building_one.building_id, building_one), - (building_two.building_id, building_two), - ] - conn.outputtypehandler = self.output_type_handler - self.assertEqual(conn.outputtypehandler, self.output_type_handler) - await cursor_one.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual( - cursor_one.fetchvars[1].outconverter, Building.from_json - ) - self.assertEqual(await cursor_one.fetchall(), expected_data) - await cursor_two.execute( - "select IntCol, StringCol1 from TestTempTable" +async def test_6000(async_cursor, test_env): + "6000 - binding unsupported python object without input type handler" + await async_cursor.execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + building = Building(1, "The First Building", 5) + with test_env.assert_raises_full_code("DPY-3002"): + await async_cursor.execute(sql, [building.building_id, building]) + + +async def test_6001(async_conn, async_cursor): + "6001 - not callable input type handler" + await async_cursor.execute("truncate table TestTempTable") + building = Building(1, "The First Building", 5) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + async_cursor.inputtypehandler = 5 + assert async_cursor.inputtypehandler == 5 + with pytest.raises(TypeError): + await async_cursor.execute(sql, (building.building_id, building)) + + +async def test_6002(async_conn, async_cursor): + "6002 - binding unsupported python object with input type handler" + await async_cursor.execute("truncate table TestTempTable") + building = Building(1, "The First Building", 5) + async_cursor.inputtypehandler = input_type_handler + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [building.building_id, building], + ) + assert async_cursor.bindvars[1].inconverter == building_in_converter + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await async_cursor.fetchall() == [ + (building.building_id, building.to_json()) + ] + + +async def test_6003(async_conn, async_cursor, test_env): + "6003 - input type handler and output type handler on cursor level" + await async_cursor.execute("truncate table TestTempTable") + building_one = Building(1, "The First Building", 5) + building_two = Building(2, "The Second Building", 87) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + cursor_one = async_conn.cursor() + cursor_two = async_conn.cursor() + cursor_one.inputtypehandler = input_type_handler + await cursor_one.execute(sql, [building_one.building_id, building_one]) + await async_conn.commit() + + await cursor_one.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor_one.fetchall() == [ + (building_one.building_id, building_one.to_json()) + ] + with test_env.assert_raises_full_code("DPY-3002"): + await cursor_two.execute(sql, (building_two.building_id, building_two)) + + cursor_two.outputtypehandler = output_type_handler + await cursor_two.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor_two.fetchall() == [ + (building_one.building_id, building_one) + ] + + +async def test_6004(async_conn, async_cursor, test_env): + "6004 - input type handler and output type handler on connection level" + await async_cursor.execute("truncate table TestTempTable") + building_one = Building(1, "The First Building", 5) + building_two = Building(2, "The Second Building", 87) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + conn = await test_env.get_connection_async() + conn.inputtypehandler = input_type_handler + assert conn.inputtypehandler == input_type_handler + + cursor_one = conn.cursor() + cursor_two = conn.cursor() + await cursor_one.execute(sql, [building_one.building_id, building_one]) + await cursor_two.execute(sql, [building_two.building_id, building_two]) + await conn.commit() + + expected_data = [ + (building_one.building_id, building_one), + (building_two.building_id, building_two), + ] + conn.outputtypehandler = output_type_handler + assert conn.outputtypehandler == output_type_handler + await cursor_one.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor_one.fetchvars[1].outconverter == Building.from_json + assert await cursor_one.fetchall() == expected_data + + await cursor_two.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor_two.fetchall() == expected_data + other_cursor = async_conn.cursor() + with test_env.assert_raises_full_code("DPY-3002"): + await other_cursor.execute( + sql, (building_one.building_id, building_one) ) - self.assertEqual(await cursor_two.fetchall(), expected_data) - other_cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-3002"): - await other_cursor.execute( - sql, (building_one.building_id, building_one) - ) - async def test_6005(self): - "6005 - output type handler with outconvert and null values" - await self.cursor.execute("truncate table TestTempTable") - data_to_insert = [(1, "String 1"), (2, None), (3, "String 2")] - await self.cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data_to_insert, - ) - await self.conn.commit() - def converter(value): - return "CONVERTED" +async def test_6005(async_conn, async_cursor): + "6005 - output type handler with outconvert and null values" + await async_cursor.execute("truncate table TestTempTable") + data_to_insert = [(1, "String 1"), (2, None), (3, "String 2")] + await async_cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data_to_insert, + ) + await async_conn.commit() - def output_type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_VARCHAR: - return cursor.var( - str, outconverter=converter, arraysize=cursor.arraysize - ) + def converter(value): + return "CONVERTED" + + def output_type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_VARCHAR: + return cursor.var( + str, outconverter=converter, arraysize=cursor.arraysize + ) + + async_cursor.outputtypehandler = output_type_handler + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] + assert await async_cursor.fetchall() == expected_data - self.cursor.outputtypehandler = output_type_handler - await self.cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - expected_data = [(1, "CONVERTED"), (2, None), (3, "CONVERTED")] - self.assertEqual(await self.cursor.fetchall(), expected_data) - @test_env.skip_unless_native_json_supported() - async def test_6006(self): - "6006 - output type handler for fetching 21c JSON" +async def test_6006(skip_unless_native_json_supported, async_cursor): + "6006 - output type handler for fetching 21c JSON" + + def output_type_handler(cursor, metadata): + # fetch 21c JSON datatype when using python-oracledb thin mode + if metadata.type_code is oracledb.DB_TYPE_JSON: + return cursor.var( + str, arraysize=cursor.arraysize, outconverter=json.loads + ) - def output_type_handler(cursor, metadata): - # fetch 21c JSON datatype when using python-oracledb thin mode - if metadata.type_code is oracledb.DB_TYPE_JSON: + await async_cursor.execute("truncate table TestJson") + insert_sql = "insert into TestJson values (:1, :2)" + json_data = [ + dict(name="John", city="Delhi"), + dict(name="George", city="Bangalore"), + dict(name="Sam", city="Mumbai"), + ] + data_to_insert = list(enumerate(json_data)) + async_cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + await async_cursor.executemany(insert_sql, data_to_insert) + async_cursor.outputtypehandler = output_type_handler + await async_cursor.execute("select * from TestJson") + assert await async_cursor.fetchall() == data_to_insert + + +async def test_6007(async_conn): + "6007 - output type handler with object implementing __call__()" + + class TimestampOutputTypeHandler: + + def __init__(self, unit="s"): + if unit == "ms": + self.factor = 1000 + else: + self.factor = 1 + + def converter(self, d): + return int(d.timestamp() * self.factor) + + def __call__(self, cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_TIMESTAMP: return cursor.var( - str, arraysize=cursor.arraysize, outconverter=json.loads + metadata.type_code, + arraysize=cursor.arraysize, + outconverter=self.converter, ) - await self.cursor.execute("truncate table TestJson") - insert_sql = "insert into TestJson values (:1, :2)" - json_data = [ - dict(name="John", city="Delhi"), - dict(name="George", city="Bangalore"), - dict(name="Sam", city="Mumbai"), - ] - data_to_insert = list(enumerate(json_data)) - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - await self.cursor.executemany(insert_sql, data_to_insert) - self.cursor.outputtypehandler = output_type_handler - await self.cursor.execute("select * from TestJson") - self.assertEqual(await self.cursor.fetchall(), data_to_insert) - - async def test_6007(self): - "6007 - output type handler with object implementing __call__()" - - class TimestampOutputTypeHandler: - - def __init__(self, unit="s"): - if unit == "ms": - self.factor = 1000 - else: - self.factor = 1 - - def converter(self, d): - return int(d.timestamp() * self.factor) - - def __call__(self, cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_TIMESTAMP: - return cursor.var( - metadata.type_code, - arraysize=cursor.arraysize, - outconverter=self.converter, - ) - - d = datetime.datetime.today() - with self.conn.cursor() as cursor: - cursor.outputtypehandler = TimestampOutputTypeHandler("ms") - cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) - await cursor.execute("select :d from dual", [d]) - (result,) = await cursor.fetchone() - self.assertEqual(result, int(d.timestamp() * 1000)) - with self.conn.cursor() as cursor: - cursor.outputtypehandler = TimestampOutputTypeHandler("s") - cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) - await cursor.execute("select :d from dual", [d]) - (result,) = await cursor.fetchone() - self.assertEqual(result, int(d.timestamp())) - - -if __name__ == "__main__": - test_env.run_test_cases() + d = datetime.datetime.today() + with async_conn.cursor() as cursor: + cursor.outputtypehandler = TimestampOutputTypeHandler("ms") + cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) + await cursor.execute("select :d from dual", [d]) + (result,) = await cursor.fetchone() + assert result == int(d.timestamp() * 1000) + with async_conn.cursor() as cursor: + cursor.outputtypehandler = TimestampOutputTypeHandler("s") + cursor.setinputsizes(oracledb.DB_TYPE_TIMESTAMP) + await cursor.execute("select :d from dual", [d]) + (result,) = await cursor.fetchone() + assert result == int(d.timestamp()) diff --git a/tests/test_6100_cursor_executemany_async.py b/tests/test_6100_cursor_executemany_async.py index f55c8785..4d86160e 100644 --- a/tests/test_6100_cursor_executemany_async.py +++ b/tests/test_6100_cursor_executemany_async.py @@ -29,244 +29,286 @@ import decimal import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_6100(self): - "6100 - test executing a statement multiple times (named args)" - await self.cursor.execute("truncate table TestTempTable") - rows = [{"value": n} for n in range(250)] - self.cursor.arraysize = 100 - await self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:value)", - rows, - ) - await self.conn.commit() - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - async def test_6101(self): - "6101 - test executing a statement multiple times (positional args)" - await self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(230)] - self.cursor.arraysize = 100 - await self.cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - rows, - ) - await self.conn.commit() - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - async def test_6102(self): - "6102 - test executing a statement multiple times (with prepare)" - await self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(225)] - self.cursor.arraysize = 100 - self.cursor.prepare("insert into TestTempTable (IntCol) values (:1)") - await self.cursor.executemany(None, rows) - await self.conn.commit() - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - async def test_6103(self): - "6103 - test executing a statement multiple times (with rebind)" - await self.cursor.execute("truncate table TestTempTable") - rows = [[n] for n in range(235)] - self.cursor.arraysize = 100 - statement = "insert into TestTempTable (IntCol) values (:1)" - await self.cursor.executemany(statement, rows[:50]) - await self.cursor.executemany(statement, rows[50:]) - await self.conn.commit() - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, len(rows)) - - async def test_6104(self): - "6104 - test executing multiple times (with input sizes wrong)" - cursor = self.conn.cursor() - cursor.setinputsizes(oracledb.NUMBER) - data = [[decimal.Decimal("25.8")], [decimal.Decimal("30.0")]] - await cursor.executemany("declare t number; begin t := :1; end;", data) - - async def test_6105(self): - "6105 - test executing multiple times (with multiple batches)" - await self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - await self.cursor.executemany(sql, [(1, None), (2, None)]) - await self.cursor.executemany(sql, [(3, None), (4, "Testing")]) - - async def test_6106(self): - "6106 - test executemany() with various numeric types" - await self.cursor.execute("truncate table TestTempTable") - data = [ - (1, 5), - (2, 7.0), - (3, 6.5), - (4, 2**65), - (5, decimal.Decimal("24.5")), - ] - await self.cursor.executemany( - "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", - data, - ) - await self.cursor.execute( - "select IntCol, NumberCol from TestTempTable order by IntCol" - ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_6107(self): - "6107 - test executing a statement multiple times (with resize)" - await self.cursor.execute("truncate table TestTempTable") - rows = [ - (1, "First"), - (2, "Second"), - (3, "Third"), - (4, "Fourth"), - (5, "Fifth"), - (6, "Sixth"), - (7, "Seventh and the longest one"), - ] - await self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - rows, - ) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(await self.cursor.fetchall(), rows) - - async def test_6108(self): - "6108 - test executing a statement multiple times (with exception)" - await self.cursor.execute("truncate table TestTempTable") - rows = [{"value": n} for n in (1, 2, 3, 2, 5)] - statement = "insert into TestTempTable (IntCol) values (:value)" - with self.assertRaisesFullCode("ORA-00001"): - await self.cursor.executemany(statement, rows) - self.assertEqual(self.cursor.rowcount, 3) - - async def test_6109(self): - "6109 - test calling executemany() with invalid parameters" - sql = """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2)""" - with self.assertRaisesFullCode("DPY-2004"): - await self.cursor.executemany(sql, "Not valid parameters") - - async def test_6110(self): - "6110 - test calling executemany() without any bind parameters" - num_rows = 5 - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id); - end; - """, - num_rows, - ) - self.assertEqual(self.cursor.rowcount, 0) - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, num_rows) - - async def test_6111(self): - "6111 - test calling executemany() with binds performed earlier" - num_rows = 9 - await self.cursor.execute("truncate table TestTempTable") - var = self.cursor.var(int, arraysize=num_rows) - self.cursor.setinputsizes(var) - await self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id); - - select sum(IntCol) into :1 - from TestTempTable; - end; - """, - num_rows, - ) - self.assertEqual(self.cursor.rowcount, 0) - expected_data = [1, 3, 6, 10, 15, 21, 28, 36, 45] - self.assertEqual(var.values, expected_data) - - async def test_6112(self): - "6112 - test executing plsql statements multiple times (with binds)" - var = self.cursor.var(int, arraysize=5) - self.cursor.setinputsizes(var) - data = [[25], [30], [None], [35], [None]] - exepected_data = [25, 30, None, 35, None] - await self.cursor.executemany( - "declare t number; begin t := :1; end;", data - ) - self.assertEqual(var.values, exepected_data) - - async def test_6113(self): - "6113 - test executemany with incorrect parameters" - with self.assertRaisesFullCode("DPY-2004"): - await self.cursor.executemany("select :1 from dual", [1]) - - async def test_6114(self): - "6114 - test executemany with mixed binds (pos first)" - rows = [["test"], {"value": 1}] - with self.assertRaisesFullCode("DPY-2006"): - await self.cursor.executemany("select :1 from dual", rows) - - async def test_6115(self): - "6115 - test executemany with mixed binds (name first)" - rows = [{"value": 1}, ["test"]] - with self.assertRaisesFullCode("DPY-2006"): - await self.cursor.executemany("select :value from dual", rows) - - async def test_6116(self): - "6116 - test executemany() with a pl/sql statement with dml returning" - num_rows = 5 - await self.cursor.execute("truncate table TestTempTable") - out_var = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(out_var) - await self.cursor.executemany( - """ - declare - t_Id number; - begin - select nvl(count(*), 0) + 1 into t_Id - from TestTempTable; - - insert into TestTempTable (IntCol, StringCol1) - values (t_Id, 'Test String ' || t_Id) - returning IntCol into :out_bind; - end; - """, - num_rows, - ) - self.assertEqual(out_var.values, [1, 2, 3, 4, 5]) - - async def test_6117(self): - "6117 - test executemany() with pl/sql in binds and out binds" - await self.cursor.execute("truncate table TestTempTable") - values = [5, 8, 17, 24, 6] - data = [(i, f"Test {i}") for i in values] - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(None, None, out_bind) - await self.cursor.executemany( +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def empty_tab(async_cursor): + await async_cursor.execute("truncate table TestTempTable") + + +async def test_6100(async_conn, async_cursor, empty_tab): + "6100 - test executing a statement multiple times (named args)" + rows = [{"value": n} for n in range(250)] + async_cursor.arraysize = 100 + await async_cursor.executemany( + "insert into TestTempTable (IntCol) values (:value)", + rows, + ) + await async_conn.commit() + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == len(rows) + + +async def test_6101(async_conn, async_cursor, empty_tab): + "6101 - test executing a statement multiple times (positional args)" + rows = [[n] for n in range(230)] + async_cursor.arraysize = 100 + await async_cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + rows, + ) + await async_conn.commit() + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == len(rows) + + +async def test_6102(async_conn, async_cursor, empty_tab): + "6102 - test executing a statement multiple times (with prepare)" + rows = [[n] for n in range(225)] + async_cursor.arraysize = 100 + async_cursor.prepare("insert into TestTempTable (IntCol) values (:1)") + await async_cursor.executemany(None, rows) + await async_conn.commit() + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == len(rows) + + +async def test_6103(async_conn, async_cursor, empty_tab): + "6103 - test executing a statement multiple times (with rebind)" + rows = [[n] for n in range(235)] + async_cursor.arraysize = 100 + statement = "insert into TestTempTable (IntCol) values (:1)" + await async_cursor.executemany(statement, rows[:50]) + await async_cursor.executemany(statement, rows[50:]) + await async_conn.commit() + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == len(rows) + + +async def test_6104(async_conn): + "6104 - test executing multiple times (with input sizes wrong)" + cursor = async_conn.cursor() + cursor.setinputsizes(oracledb.NUMBER) + data = [[decimal.Decimal("25.8")], [decimal.Decimal("30.0")]] + await cursor.executemany("declare t number; begin t := :1; end;", data) + + +async def test_6105(async_cursor, empty_tab): + "6105 - test executing multiple times (with multiple batches)" + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + await async_cursor.executemany(sql, [(1, None), (2, None)]) + await async_cursor.executemany(sql, [(3, None), (4, "Testing")]) + + +async def test_6106(async_cursor, empty_tab): + "6106 - test executemany() with various numeric types" + data = [ + (1, 5), + (2, 7.0), + (3, 6.5), + (4, 2**65), + (5, decimal.Decimal("24.5")), + ] + await async_cursor.executemany( + "insert into TestTempTable (IntCol, NumberCol) values (:1, :2)", + data, + ) + await async_cursor.execute( + "select IntCol, NumberCol from TestTempTable order by IntCol" + ) + assert await async_cursor.fetchall() == data + + +async def test_6107(async_cursor, empty_tab): + "6107 - test executing a statement multiple times (with resize)" + rows = [ + (1, "First"), + (2, "Second"), + (3, "Third"), + (4, "Fourth"), + (5, "Fifth"), + (6, "Sixth"), + (7, "Seventh and the longest one"), + ] + await async_cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + ) + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert await async_cursor.fetchall() == rows + + +async def test_6108(async_cursor, empty_tab, test_env): + "6108 - test executing a statement multiple times (with exception)" + rows = [{"value": n} for n in (1, 2, 3, 2, 5)] + statement = "insert into TestTempTable (IntCol) values (:value)" + with test_env.assert_raises_full_code("ORA-00001"): + await async_cursor.executemany(statement, rows) + assert async_cursor.rowcount == 3 + + +async def test_6109(async_cursor, test_env): + "6109 - test calling executemany() with invalid parameters" + sql = """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2)""" + with test_env.assert_raises_full_code("DPY-2004"): + await async_cursor.executemany(sql, "Not valid parameters") + + +async def test_6110(async_cursor, empty_tab): + "6110 - test calling executemany() without any bind parameters" + num_rows = 5 + await async_cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id); + end; + """, + num_rows, + ) + assert async_cursor.rowcount == 0 + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == num_rows + + +async def test_6111(async_cursor, empty_tab): + "6111 - test calling executemany() with binds performed earlier" + num_rows = 9 + var = async_cursor.var(int, arraysize=num_rows) + async_cursor.setinputsizes(var) + await async_cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id); + + select sum(IntCol) into :1 + from TestTempTable; + end; + """, + num_rows, + ) + assert async_cursor.rowcount == 0 + expected_data = [1, 3, 6, 10, 15, 21, 28, 36, 45] + assert var.values == expected_data + + +async def test_6112(async_cursor): + "6112 - test executing plsql statements multiple times (with binds)" + var = async_cursor.var(int, arraysize=5) + async_cursor.setinputsizes(var) + data = [[25], [30], [None], [35], [None]] + exepected_data = [25, 30, None, 35, None] + await async_cursor.executemany( + "declare t number; begin t := :1; end;", data + ) + assert var.values == exepected_data + + +async def test_6113(async_cursor, test_env): + "6113 - test executemany with incorrect parameters" + with test_env.assert_raises_full_code("DPY-2004"): + await async_cursor.executemany("select :1 from dual", [1]) + + +async def test_6114(async_cursor, test_env): + "6114 - test executemany with mixed binds (pos first)" + rows = [["test"], {"value": 1}] + with test_env.assert_raises_full_code("DPY-2006"): + await async_cursor.executemany("select :1 from dual", rows) + + +async def test_6115(async_cursor, test_env): + "6115 - test executemany with mixed binds (name first)" + rows = [{"value": 1}, ["test"]] + with test_env.assert_raises_full_code("DPY-2006"): + await async_cursor.executemany("select :value from dual", rows) + + +async def test_6116(async_cursor, empty_tab): + "6116 - test executemany() with a pl/sql statement with dml returning" + num_rows = 5 + out_var = async_cursor.var(oracledb.NUMBER, arraysize=5) + async_cursor.setinputsizes(out_var) + await async_cursor.executemany( + """ + declare + t_Id number; + begin + select nvl(count(*), 0) + 1 into t_Id + from TestTempTable; + + insert into TestTempTable (IntCol, StringCol1) + values (t_Id, 'Test String ' || t_Id) + returning IntCol into :out_bind; + end; + """, + num_rows, + ) + assert out_var.values == [1, 2, 3, 4, 5] + + +async def test_6117(async_cursor, empty_tab): + "6117 - test executemany() with pl/sql in binds and out binds" + values = [5, 8, 17, 24, 6] + data = [(i, f"Test {i}") for i in values] + out_bind = async_cursor.var(oracledb.NUMBER, arraysize=5) + async_cursor.setinputsizes(None, None, out_bind) + await async_cursor.executemany( + """ + begin + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol into :out_bind; + end; + """, + data, + ) + assert out_bind.values == values + + +async def test_6118(async_cursor, empty_tab): + "6118 - test executemany() with pl/sql outbinds" + out_bind = async_cursor.var(oracledb.NUMBER, arraysize=5) + async_cursor.setinputsizes(out_bind) + await async_cursor.executemany("begin :out_var := 5; end;", 5) + assert out_bind.values == [5, 5, 5, 5, 5] + + +async def test_6119(async_cursor): + "6119 - test re-executemany() with pl/sql in binds and out binds" + values = [5, 8, 17, 24, 6] + data = [(i, f"Test {i}") for i in values] + for i in range(2): + await async_cursor.execute("truncate table TestTempTable") + out_bind = async_cursor.var(oracledb.NUMBER, arraysize=5) + async_cursor.setinputsizes(None, None, out_bind) + await async_cursor.executemany( """ begin insert into TestTempTable (IntCol, StringCol1) @@ -276,117 +318,88 @@ async def test_6117(self): """, data, ) - self.assertEqual(out_bind.values, values) - - async def test_6118(self): - "6118 - test executemany() with pl/sql outbinds" - await self.cursor.execute("truncate table TestTempTable") - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(out_bind) - await self.cursor.executemany("begin :out_var := 5; end;", 5) - self.assertEqual(out_bind.values, [5, 5, 5, 5, 5]) - - async def test_6119(self): - "6119 - test re-executemany() with pl/sql in binds and out binds" - values = [5, 8, 17, 24, 6] - data = [(i, f"Test {i}") for i in values] - for i in range(2): - await self.cursor.execute("truncate table TestTempTable") - out_bind = self.cursor.var(oracledb.NUMBER, arraysize=5) - self.cursor.setinputsizes(None, None, out_bind) - await self.cursor.executemany( - """ - begin - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol into :out_bind; - end; - """, - data, - ) - self.assertEqual(out_bind.values, values) - - async def test_6120(self): - "6120 - test PL/SQL statement with single row bind" - value = 4020 - var = self.cursor.var(int) - await self.cursor.executemany("begin :1 := :2; end;", [[var, value]]) - self.assertEqual(var.values, [value]) - - async def test_6121(self): - "6121 - test deferral of type assignment" - await self.cursor.execute("truncate table TestTempTable") - data = [(1, None), (2, 25)] - await self.cursor.executemany( - """ - insert into TestTempTable - (IntCol, NumberCol) - values (:1, :2) - """, - data, - ) - await self.conn.commit() - await self.cursor.execute( - """ - select IntCol, NumberCol - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_6122(self): - "6122 - test PL/SQL with a lerge number of binds" - parts = [] - bind_names = [] - all_bind_values = [] - out_binds = [] - for i in range(5): - all_bind_values.append([]) - for i in range(350): - n = len(parts) + 1 - bind_names.extend([f"v_out_{n}_0", f"a_{n}", f"b_{n}", f"c_{n}"]) - parts.append(f":v_out{n} := :a_{n} + :b_{n} + :c_{n};") - out_binds.append( - self.cursor.var(int, arraysize=len(all_bind_values)) + assert out_bind.values == values + + +async def test_6120(async_cursor): + "6120 - test PL/SQL statement with single row bind" + value = 4020 + var = async_cursor.var(int) + await async_cursor.executemany("begin :1 := :2; end;", [[var, value]]) + assert var.values == [value] + + +async def test_6121(async_conn, async_cursor, empty_tab): + "6121 - test deferral of type assignment" + data = [(1, None), (2, 25)] + await async_cursor.executemany( + """ + insert into TestTempTable + (IntCol, NumberCol) + values (:1, :2) + """, + data, + ) + await async_conn.commit() + await async_cursor.execute( + """ + select IntCol, NumberCol + from TestTempTable + order by IntCol + """ + ) + assert await async_cursor.fetchall() == data + + +async def test_6122(async_cursor): + "6122 - test PL/SQL with a lerge number of binds" + parts = [] + bind_names = [] + all_bind_values = [] + out_binds = [] + for i in range(5): + all_bind_values.append([]) + for i in range(350): + n = len(parts) + 1 + bind_names.extend([f"v_out_{n}_0", f"a_{n}", f"b_{n}", f"c_{n}"]) + parts.append(f":v_out{n} := :a_{n} + :b_{n} + :c_{n};") + out_binds.append(async_cursor.var(int, arraysize=len(all_bind_values))) + for j, bind_values in enumerate(all_bind_values): + bind_values.extend( + [out_binds[-1], n * 1 + j, n * 2 + j, n * 3 + j] ) - for j, bind_values in enumerate(all_bind_values): - bind_values.extend( - [out_binds[-1], n * 1 + j, n * 2 + j, n * 3 + j] - ) - lf = "\n" - sql = f"begin{lf}{lf.join(parts)}{lf}end;" - await self.cursor.executemany(sql, all_bind_values) - init_val = 6 - for var in out_binds: - expected_values = [ - init_val, - init_val + 3, - init_val + 6, - init_val + 9, - init_val + 12, - ] - self.assertEqual(var.values, expected_values) - init_val += 6 - - async def test_6123(self): - "6123 - test executing no statement" - cursor = self.conn.cursor() - with self.assertRaisesFullCode("DPY-2001"): - await cursor.executemany(None, [1, 2]) - - async def test_6124(self): - "6124 - test executemany with empty parameter set" - sql = "insert into TestTempTable values (:1)" - await self.cursor.executemany(sql, []) - - async def test_6125(self): - "6125 - test executemany with an empty statement" - with self.assertRaisesFullCode("DPY-2066"): - await self.cursor.executemany("", 5) - with self.assertRaisesFullCode("DPY-2066"): - await self.cursor.executemany(" ", 5) - - -if __name__ == "__main__": - test_env.run_test_cases() + lf = "\n" + sql = f"begin{lf}{lf.join(parts)}{lf}end;" + await async_cursor.executemany(sql, all_bind_values) + init_val = 6 + for var in out_binds: + expected_values = [ + init_val, + init_val + 3, + init_val + 6, + init_val + 9, + init_val + 12, + ] + assert var.values == expected_values + init_val += 6 + + +async def test_6123(async_conn, test_env): + "6123 - test executing no statement" + cursor = async_conn.cursor() + with test_env.assert_raises_full_code("DPY-2001"): + await cursor.executemany(None, [1, 2]) + + +async def test_6124(async_cursor): + "6124 - test executemany with empty parameter set" + sql = "insert into TestTempTable values (:1)" + await async_cursor.executemany(sql, []) + + +async def test_6125(async_cursor, test_env): + "6125 - test executemany with an empty statement" + with test_env.assert_raises_full_code("DPY-2066"): + await async_cursor.executemany("", 5) + with test_env.assert_raises_full_code("DPY-2066"): + await async_cursor.executemany(" ", 5) diff --git a/tests/test_6200_cursor_callproc_async.py b/tests/test_6200_cursor_callproc_async.py index f539a456..a7788d3a 100644 --- a/tests/test_6200_cursor_callproc_async.py +++ b/tests/test_6200_cursor_callproc_async.py @@ -28,107 +28,108 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_6200(self): - "6200 - test executing a stored procedure" - var = self.cursor.var(oracledb.NUMBER) - results = await self.cursor.callproc("proc_Test", ("hi", 5, var)) - self.assertEqual(results, ["hi", 10, 2.0]) - - async def test_6201(self): - "6201 - test executing a stored procedure with all args keyword args" - inout_value = self.cursor.var(oracledb.NUMBER) - inout_value.setvalue(0, 5) - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict( - a_InOutValue=inout_value, a_InValue="hi", a_OutValue=out_value - ) - results = await self.cursor.callproc("proc_Test", [], kwargs) - self.assertEqual(results, []) - self.assertEqual(inout_value.getvalue(), 10) - self.assertEqual(out_value.getvalue(), 2.0) - - async def test_6202(self): - "6202 - test executing a stored procedure with last arg as keyword arg" - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict(a_OutValue=out_value) - results = await self.cursor.callproc("proc_Test", ("hi", 5), kwargs) - self.assertEqual(results, ["hi", 10]) - self.assertEqual(out_value.getvalue(), 2.0) - - async def test_6203(self): - "6203 - test executing a stored procedure, repeated keyword arg" - kwargs = dict( - a_InValue="hi", a_OutValue=self.cursor.var(oracledb.NUMBER) - ) - with self.assertRaisesFullCode("ORA-06550"): - await self.cursor.callproc("proc_Test", ("hi", 5), kwargs) - - async def test_6204(self): - "6204 - test executing a stored procedure without any arguments" - results = await self.cursor.callproc("proc_TestNoArgs") - self.assertEqual(results, []) - - async def test_6205(self): - "6205 - test executing a stored function" - results = await self.cursor.callfunc( - "func_Test", oracledb.NUMBER, ("hi", 5) - ) - self.assertEqual(results, 7) - - async def test_6206(self): - "6206 - test executing a stored function without any arguments" - results = await self.cursor.callfunc( - "func_TestNoArgs", oracledb.NUMBER - ) - self.assertEqual(results, 712) - - async def test_6207(self): - "6207 - test executing a stored function with wrong parameters" - func_name = "func_Test" - with self.assertRaisesFullCode("DPY-2007"): - await self.cursor.callfunc(oracledb.NUMBER, func_name, ("hi", 5)) - with self.assertRaisesFullCode("ORA-06550"): - await self.cursor.callfunc( - func_name, oracledb.NUMBER, ("hi", 5, 7) - ) - with self.assertRaisesFullCode("DPY-2012"): - await self.cursor.callfunc(func_name, oracledb.NUMBER, "hi", 7) - with self.assertRaisesFullCode("ORA-06502"): - await self.cursor.callfunc(func_name, oracledb.NUMBER, [5, "hi"]) - with self.assertRaisesFullCode("ORA-06550"): - await self.cursor.callfunc(func_name, oracledb.NUMBER) - with self.assertRaisesFullCode("DPY-2012"): - await self.cursor.callfunc(func_name, oracledb.NUMBER, 5) - - async def test_6208(self): - "6208 - test error for keyword args with invalid type" - kwargs = [5] - with self.assertRaisesFullCode("DPY-2013"): - await self.cursor.callproc("proc_Test", [], kwargs) - with self.assertRaisesFullCode("DPY-2013"): - await self.cursor.callfunc( - "func_Test", oracledb.NUMBER, [], kwargs - ) - - async def test_6209(self): - "6209 - test calling a procedure with a string > 32767 characters" - data = "6209" * 16000 - size_var = self.cursor.var(int) - await self.cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) - self.assertEqual(size_var.getvalue(), len(data)) - - async def test_6210(self): - "6210 - test calling a procedure with raw data > 32767 bytes" - data = b"6210" * 16250 - size_var = self.cursor.var(int) - await self.cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) - self.assertEqual(size_var.getvalue(), len(data)) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_6200(async_cursor): + "6200 - test executing a stored procedure" + var = async_cursor.var(oracledb.NUMBER) + results = await async_cursor.callproc("proc_Test", ("hi", 5, var)) + assert results == ["hi", 10, 2.0] + + +async def test_6201(async_cursor): + "6201 - test executing a stored procedure with all args keyword args" + inout_value = async_cursor.var(oracledb.NUMBER) + inout_value.setvalue(0, 5) + out_value = async_cursor.var(oracledb.NUMBER) + kwargs = dict( + a_InOutValue=inout_value, a_InValue="hi", a_OutValue=out_value + ) + results = await async_cursor.callproc("proc_Test", [], kwargs) + assert results == [] + assert inout_value.getvalue() == 10 + assert out_value.getvalue() == 2.0 + + +async def test_6202(async_cursor): + "6202 - test executing a stored procedure with last arg as keyword arg" + out_value = async_cursor.var(oracledb.NUMBER) + kwargs = dict(a_OutValue=out_value) + results = await async_cursor.callproc("proc_Test", ("hi", 5), kwargs) + assert results == ["hi", 10] + assert out_value.getvalue() == 2.0 + + +async def test_6203(async_cursor, test_env): + "6203 - test executing a stored procedure, repeated keyword arg" + kwargs = dict(a_InValue="hi", a_OutValue=async_cursor.var(oracledb.NUMBER)) + with test_env.assert_raises_full_code("ORA-06550"): + await async_cursor.callproc("proc_Test", ("hi", 5), kwargs) + + +async def test_6204(async_cursor): + "6204 - test executing a stored procedure without any arguments" + results = await async_cursor.callproc("proc_TestNoArgs") + assert results == [] + + +async def test_6205(async_cursor): + "6205 - test executing a stored function" + results = await async_cursor.callfunc( + "func_Test", oracledb.NUMBER, ("hi", 5) + ) + assert results == 7 + + +async def test_6206(async_cursor): + "6206 - test executing a stored function without any arguments" + results = await async_cursor.callfunc("func_TestNoArgs", oracledb.NUMBER) + assert results == 712 + + +async def test_6207(async_cursor, test_env): + "6207 - test executing a stored function with wrong parameters" + func_name = "func_Test" + with test_env.assert_raises_full_code("DPY-2007"): + await async_cursor.callfunc(oracledb.NUMBER, func_name, ("hi", 5)) + with test_env.assert_raises_full_code("ORA-06550"): + await async_cursor.callfunc(func_name, oracledb.NUMBER, ("hi", 5, 7)) + with test_env.assert_raises_full_code("DPY-2012"): + await async_cursor.callfunc(func_name, oracledb.NUMBER, "hi", 7) + with test_env.assert_raises_full_code("ORA-06502"): + await async_cursor.callfunc(func_name, oracledb.NUMBER, [5, "hi"]) + with test_env.assert_raises_full_code("ORA-06550"): + await async_cursor.callfunc(func_name, oracledb.NUMBER) + with test_env.assert_raises_full_code("DPY-2012"): + await async_cursor.callfunc(func_name, oracledb.NUMBER, 5) + + +async def test_6208(async_cursor, test_env): + "6208 - test error for keyword args with invalid type" + kwargs = [5] + with test_env.assert_raises_full_code("DPY-2013"): + await async_cursor.callproc("proc_Test", [], kwargs) + with test_env.assert_raises_full_code("DPY-2013"): + await async_cursor.callfunc("func_Test", oracledb.NUMBER, [], kwargs) + + +async def test_6209(async_cursor): + "6209 - test calling a procedure with a string > 32767 characters" + data = "6209" * 16000 + size_var = async_cursor.var(int) + await async_cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) + assert size_var.getvalue() == len(data) + + +async def test_6210(async_cursor): + "6210 - test calling a procedure with raw data > 32767 bytes" + data = b"6210" * 16250 + size_var = async_cursor.var(int) + await async_cursor.callproc("pkg_TestLobs.GetSize", [data, size_var]) + assert size_var.getvalue() == len(data) diff --git a/tests/test_6300_cursor_other_async.py b/tests/test_6300_cursor_other_async.py index 6180b6c3..f8765579 100644 --- a/tests/test_6300_cursor_other_async.py +++ b/tests/test_6300_cursor_other_async.py @@ -29,916 +29,934 @@ import decimal import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_6300(self): - "6300 - test preparing a statement and executing it multiple times" - cursor = self.conn.cursor() - self.assertEqual(cursor.statement, None) - statement = "begin :value := :value + 5; end;" - cursor.prepare(statement) - var = cursor.var(oracledb.NUMBER) - self.assertEqual(cursor.statement, statement) - var.setvalue(0, 2) - await cursor.execute(None, value=var) - self.assertEqual(var.getvalue(), 7) - await cursor.execute(None, value=var) - self.assertEqual(var.getvalue(), 12) - await cursor.execute("begin :value2 := 3; end;", value2=var) - self.assertEqual(var.getvalue(), 3) - - async def test_6301(self): - "6301 - confirm an exception is raised after closing a cursor" - self.cursor.close() - with self.assertRaisesFullCode("DPY-1006"): - await self.cursor.execute("select 1 from dual") - - async def test_6302(self): - "6302 - test iterators" - await self.cursor.execute( - """ - select IntCol - from TestNumbers - where IntCol between 1 and 3 - order by IntCol - """ - ) - rows = [v async for v, in self.cursor] - self.assertEqual(rows, [1, 2, 3]) - - async def test_6303(self): - "6303 - test iterators (with intermediate execute)" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - """ - select IntCol - from TestNumbers - where IntCol between 1 and 3 - order by IntCol - """ - ) - test_iter = self.cursor.__aiter__() - (value,) = await test_iter.__anext__() - await self.cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - with self.assertRaisesFullCode("DPY-1003"): - await test_iter.__anext__() - - async def test_6304(self): - "6304 - test setting input sizes without any parameters" - self.cursor.setinputsizes() - await self.cursor.execute("select :val from dual", val="Test Value") - self.assertEqual(await self.cursor.fetchall(), [("Test Value",)]) - - async def test_6305(self): - "6305 - test setting input sizes with an empty dictionary" - empty_dict = {} - self.cursor.prepare("select 236 from dual") - self.cursor.setinputsizes(**empty_dict) - await self.cursor.execute(None, empty_dict) - self.assertEqual(await self.cursor.fetchall(), [(236,)]) - - async def test_6306(self): - "6306 - test setting input sizes with an empty list" - empty_list = [] - self.cursor.prepare("select 239 from dual") - self.cursor.setinputsizes(*empty_list) - await self.cursor.execute(None, empty_list) - self.assertEqual(await self.cursor.fetchall(), [(239,)]) - - async def test_6307(self): - "6307 - test setting input sizes with positional args" - var = self.cursor.var(oracledb.STRING, 100) - self.cursor.setinputsizes(None, 5, None, 10, None, oracledb.NUMBER) - await self.cursor.execute( - """ - begin - :1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6); - end; - """, - [var, "test_", 5, "_second_", 3, 7], +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_6300(async_cursor): + "6300 - test preparing a statement and executing it multiple times" + assert async_cursor.statement is None + statement = "begin :value := :value + 5; end;" + async_cursor.prepare(statement) + var = async_cursor.var(oracledb.NUMBER) + assert async_cursor.statement == statement + var.setvalue(0, 2) + await async_cursor.execute(None, value=var) + assert var.getvalue() == 7 + await async_cursor.execute(None, value=var) + assert var.getvalue() == 12 + await async_cursor.execute("begin :value2 := 3; end;", value2=var) + assert var.getvalue() == 3 + + +async def test_6301(async_conn, test_env): + "6301 - confirm an exception is raised after closing a cursor" + with async_conn.cursor() as cursor: + pass + with test_env.assert_raises_full_code("DPY-1006"): + await cursor.execute("select 1 from dual") + + +async def test_6302(async_cursor): + "6302 - test iterators" + await async_cursor.execute( + """ + select IntCol + from TestNumbers + where IntCol between 1 and 3 + order by IntCol + """ + ) + rows = [v async for v, in async_cursor] + assert rows == [1, 2, 3] + + +async def test_6303(async_cursor, test_env): + "6303 - test iterators (with intermediate execute)" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute( + """ + select IntCol + from TestNumbers + where IntCol between 1 and 3 + order by IntCol + """ + ) + test_iter = async_cursor.__aiter__() + (value,) = await test_iter.__anext__() + await async_cursor.execute("insert into TestTempTable (IntCol) values (1)") + with test_env.assert_raises_full_code("DPY-1003"): + await test_iter.__anext__() + + +async def test_6304(async_cursor): + "6304 - test setting input sizes without any parameters" + async_cursor.setinputsizes() + await async_cursor.execute("select :val from dual", val="Test Value") + assert await async_cursor.fetchall() == [("Test Value",)] + + +async def test_6305(async_cursor): + "6305 - test setting input sizes with an empty dictionary" + empty_dict = {} + async_cursor.prepare("select 236 from dual") + async_cursor.setinputsizes(**empty_dict) + await async_cursor.execute(None, empty_dict) + assert await async_cursor.fetchall() == [(236,)] + + +async def test_6306(async_cursor): + "6306 - test setting input sizes with an empty list" + empty_list = [] + async_cursor.prepare("select 239 from dual") + async_cursor.setinputsizes(*empty_list) + await async_cursor.execute(None, empty_list) + assert await async_cursor.fetchall() == [(239,)] + + +async def test_6307(async_cursor): + "6307 - test setting input sizes with positional args" + var = async_cursor.var(oracledb.STRING, 100) + async_cursor.setinputsizes(None, 5, None, 10, None, oracledb.NUMBER) + await async_cursor.execute( + """ + begin + :1 := :2 || to_char(:3) || :4 || to_char(:5) || to_char(:6); + end; + """, + [var, "test_", 5, "_second_", 3, 7], + ) + assert var.getvalue() == "test_5_second_37" + + +async def test_6308(async_cursor): + "6308 - test parsing query statements" + sql = "select LongIntCol from TestNumbers where IntCol = :val" + await async_cursor.parse(sql) + assert async_cursor.statement == sql + assert async_cursor.description == [ + ("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, 0) + ] + + +async def test_6309(async_cursor): + "6309 - test binding boolean data without the use of PL/SQL" + await async_cursor.execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + await async_cursor.execute(sql, (False, "Value should be 0")) + await async_cursor.execute(sql, (True, "Value should be 1")) + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + expected_value = [(0, "Value should be 0"), (1, "Value should be 1")] + assert await async_cursor.fetchall() == expected_value + + +async def test_6310(async_conn, test_env): + "6310 - test using a cursor as a context manager" + with async_conn.cursor() as cursor: + await cursor.execute("truncate table TestTempTable") + await cursor.execute("select count(*) from TestTempTable") + (count,) = await cursor.fetchone() + assert count == 0 + with test_env.assert_raises_full_code("DPY-1006"): + cursor.close() + + +async def test_6311(async_cursor): + "6311 - test that rowcount attribute is reset to zero on query execute" + for num in [0, 1, 1, 0]: + await async_cursor.execute("select * from dual where 1 = :s", [num]) + await async_cursor.fetchone() + assert async_cursor.rowcount == num + + +async def test_6312(async_conn, async_cursor): + "6312 - test that an object type can be used as type in cursor.var()" + obj_type = await async_conn.gettype("UDT_OBJECT") + var = async_cursor.var(obj_type) + await async_cursor.callproc( + "pkg_TestBindObject.BindObjectOut", (28, "Bind obj out", var) + ) + obj = var.getvalue() + result = await async_cursor.callfunc( + "pkg_TestBindObject.GetStringRep", str, (obj,) + ) + exp = "udt_Object(28, 'Bind obj out', null, null, null, null, null)" + assert result == exp + + +async def test_6313(async_cursor): + "6313 - test that fetching an XMLType returns a string" + int_val = 5 + label = "IntCol" + expected_result = f"<{label}>{int_val}" + await async_cursor.execute( + f""" + select XMLElement("{label}", IntCol) + from TestStrings + where IntCol = :int_val + """, + int_val=int_val, + ) + (result,) = await async_cursor.fetchone() + assert result == expected_result + + +async def test_6314(async_cursor): + "6314 - test last rowid" + + # no statement executed: no rowid + assert async_cursor.lastrowid is None + + # DDL statement executed: no rowid + await async_cursor.execute("truncate table TestTempTable") + assert async_cursor.lastrowid is None + + # statement prepared: no rowid + async_cursor.prepare("insert into TestTempTable (IntCol) values (:1)") + assert async_cursor.lastrowid is None + + # multiple rows inserted: rowid of last row inserted + rows = [(n,) for n in range(225)] + await async_cursor.executemany(None, rows) + rowid = async_cursor.lastrowid + await async_cursor.execute( + """ + select rowid + from TestTempTable + where IntCol = :1 + """, + rows[-1], + ) + row = await async_cursor.fetchone() + assert row[0] == rowid + + # statement executed but no rows updated: no rowid + await async_cursor.execute("delete from TestTempTable where 1 = 0") + assert async_cursor.lastrowid is None + + # stetement executed with one row updated: rowid of updated row + await async_cursor.execute( + """ + update TestTempTable set StringCol1 = 'Modified' + where IntCol = :1 + """, + rows[-2], + ) + rowid = async_cursor.lastrowid + await async_cursor.execute( + "select rowid from TestTempTable where IntCol = :1", + rows[-2], + ) + row = await async_cursor.fetchone() + assert row[0] == rowid + + # statement executed with many rows updated: rowid of last updated row + await async_cursor.execute( + """ + update TestTempTable set + StringCol1 = 'Row ' || to_char(IntCol) + where IntCol = :1 + """, + rows[-3], + ) + rowid = async_cursor.lastrowid + await async_cursor.execute( + "select StringCol1 from TestTempTable where rowid = :1", + [rowid], + ) + row = await async_cursor.fetchone() + assert row[0] == "Row %s" % rows[-3] + + +async def test_6315(async_conn, round_trip_checker_async): + "6315 - test prefetch rows" + + # perform simple query and verify only one round trip is needed + with async_conn.cursor() as cursor: + await cursor.execute("select sysdate from dual") + await cursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 1 + + # set prefetchrows to 1 and verify that two round trips are now needed + with async_conn.cursor() as cursor: + cursor.prefetchrows = 1 + assert cursor.prefetchrows == 1 + await cursor.execute("select sysdate from dual") + await cursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 2 + + # simple DDL only requires a single round trip + with async_conn.cursor() as cursor: + await cursor.execute("truncate table TestTempTable") + assert await round_trip_checker_async.get_value_async() == 1 + + # array execution only requires a single round trip + num_rows = 590 + with async_conn.cursor() as cursor: + data = [(n + 1,) for n in range(num_rows)] + await cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + data, ) - self.assertEqual(var.getvalue(), "test_5_second_37") - - async def test_6308(self): - "6308 - test parsing query statements" - sql = "select LongIntCol from TestNumbers where IntCol = :val" - await self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertEqual( - self.cursor.description, - [("LONGINTCOL", oracledb.DB_TYPE_NUMBER, 17, None, 16, 0, 0)], + assert await round_trip_checker_async.get_value_async() == 1 + + # setting prefetch and array size to 1 requires a round-trip for each + # row + with async_conn.cursor() as cursor: + cursor.arraysize = 1 + cursor.prefetchrows = 1 + assert cursor.prefetchrows == 1 + await cursor.execute("select IntCol from TestTempTable") + await cursor.fetchall() + assert await round_trip_checker_async.get_value_async() == num_rows + 1 + + # setting prefetch and array size to 300 requires 2 round-trips + with async_conn.cursor() as cursor: + cursor.arraysize = 300 + cursor.prefetchrows = 300 + assert cursor.prefetchrows == 300 + await cursor.execute("select IntCol from TestTempTable") + await cursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 2 + + +async def test_6316(async_conn, round_trip_checker_async): + "6316 - test prefetch rows using existing cursor" + + # Set prefetch rows on an existing cursor + num_rows = 590 + with async_conn.cursor() as cursor: + await cursor.execute("truncate table TestTempTable") + assert await round_trip_checker_async.get_value_async() == 1 + data = [(n + 1,) for n in range(num_rows)] + await cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + data, ) + assert await round_trip_checker_async.get_value_async() == 1 + cursor.prefetchrows = 30 + cursor.arraysize = 100 + await cursor.execute("select IntCol from TestTempTable") + await cursor.fetchall() + assert await round_trip_checker_async.get_value_async() == 7 + + +async def test_6317(async_cursor): + "6317 - test parsing plsql statements" + sql = "begin :value := 5; end;" + await async_cursor.parse(sql) + assert async_cursor.statement == sql + assert async_cursor.description is None + + +async def test_6318(async_cursor): + "6318 - test parsing ddl statements" + sql = "truncate table TestTempTable" + await async_cursor.parse(sql) + assert async_cursor.statement == sql + assert async_cursor.description is None + + +async def test_6319(async_cursor): + "6319 - test parsing dml statements" + sql = "insert into TestTempTable (IntCol) values (1)" + await async_cursor.parse(sql) + assert async_cursor.statement == sql + assert async_cursor.description is None + + +async def test_6320(async_cursor): + "6320 - test binding by name with leading colon" + params = {":arg1": 5} + await async_cursor.execute("select :arg1 from dual", params) + (result,) = await async_cursor.fetchone() + assert result == params[":arg1"] + + +async def test_6321(async_cursor): + "6321 - test binding mixed null and not null values in a PL/SQL block" + out_vars = [async_cursor.var(str) for i in range(4)] + await async_cursor.execute( + """ + begin + :1 := null; + :2 := 'Value 1'; + :3 := null; + :4 := 'Value 2'; + end; + """, + out_vars, + ) + values = [var.getvalue() for var in out_vars] + assert values == [None, "Value 1", None, "Value 2"] + + +async def test_6322(async_conn, parse_count_checker_async): + "6322 - test excluding statement from statement cache" + num_iters = 10 + sql = "select user from dual" + + # with statement cache enabled, only one parse should take place + for i in range(num_iters): + with async_conn.cursor() as cursor: + await cursor.execute(sql) + assert await parse_count_checker_async.get_value_async() == 1 - async def test_6309(self): - "6309 - test binding boolean data without the use of PL/SQL" - await self.cursor.execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" - await self.cursor.execute(sql, (False, "Value should be 0")) - await self.cursor.execute(sql, (True, "Value should be 1")) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - expected_value = [(0, "Value should be 0"), (1, "Value should be 1")] - self.assertEqual(await self.cursor.fetchall(), expected_value) - - async def test_6310(self): - "6310 - test using a cursor as a context manager" - with self.cursor as cursor: - await cursor.execute("truncate table TestTempTable") - await cursor.execute("select count(*) from TestTempTable") - (count,) = await cursor.fetchone() - self.assertEqual(count, 0) - with self.assertRaisesFullCode("DPY-1006"): - self.cursor.close() - - async def test_6311(self): - "6311 - test that rowcount attribute is reset to zero on query execute" - for num in [0, 1, 1, 0]: - await self.cursor.execute("select * from dual where 1 = :s", [num]) - await self.cursor.fetchone() - self.assertEqual(self.cursor.rowcount, num) - - async def test_6312(self): - "6312 - test that an object type can be used as type in cursor.var()" - obj_type = await self.conn.gettype("UDT_OBJECT") - var = self.cursor.var(obj_type) - await self.cursor.callproc( - "pkg_TestBindObject.BindObjectOut", (28, "Bind obj out", var) - ) - obj = var.getvalue() - result = await self.cursor.callfunc( - "pkg_TestBindObject.GetStringRep", str, (obj,) - ) - exp = "udt_Object(28, 'Bind obj out', null, null, null, null, null)" - self.assertEqual(result, exp) - - async def test_6313(self): - "6313 - test that fetching an XMLType returns a string" - int_val = 5 - label = "IntCol" - expected_result = f"<{label}>{int_val}" - await self.cursor.execute( - f""" - select XMLElement("{label}", IntCol) - from TestStrings - where IntCol = :int_val - """, - int_val=int_val, - ) - (result,) = await self.cursor.fetchone() - self.assertEqual(result, expected_result) + # with statement cache disabled for the statement, parse count should + # be the same as the number of iterations + for i in range(num_iters): + with async_conn.cursor() as cursor: + cursor.prepare(sql, cache_statement=False) + await cursor.execute(None) + assert await parse_count_checker_async.get_value_async() == num_iters - 1 - async def test_6314(self): - "6314 - test last rowid" - # no statement executed: no rowid - self.assertIsNone(self.cursor.lastrowid) +async def test_6323(async_cursor): + "6323 - test repeated DDL" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute("insert into TestTempTable (IntCol) values (1)") + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute("insert into TestTempTable (IntCol) values (1)") - # DDL statement executed: no rowid - await self.cursor.execute("truncate table TestTempTable") - self.assertIsNone(self.cursor.lastrowid) - # statement prepared: no rowid - self.cursor.prepare("insert into TestTempTable (IntCol) values (:1)") - self.assertIsNone(self.cursor.lastrowid) +async def test_6324(async_cursor): + "6324 - test executing SQL with non-ASCII characters" + await async_cursor.execute("select 'FÖÖ' from dual") + (result,) = await async_cursor.fetchone() + assert result in ("FÖÖ", "F¿¿") - # multiple rows inserted: rowid of last row inserted - rows = [(n,) for n in range(225)] - await self.cursor.executemany(None, rows) - rowid = self.cursor.lastrowid - await self.cursor.execute( - """ - select rowid - from TestTempTable - where IntCol = :1 - """, - rows[-1], - ) - row = await self.cursor.fetchone() - self.assertEqual(row[0], rowid) - # statement executed but no rows updated: no rowid - await self.cursor.execute("delete from TestTempTable where 1 = 0") - self.assertIsNone(self.cursor.lastrowid) +async def test_6325(async_cursor): + "6325 - test case sensitivity of unquoted bind names" + await async_cursor.execute("select :test from dual", {"TEST": "a"}) + (result,) = await async_cursor.fetchone() + assert result == "a" - # stetement executed with one row updated: rowid of updated row - await self.cursor.execute( - """ - update TestTempTable set StringCol1 = 'Modified' - where IntCol = :1 - """, - rows[-2], - ) - rowid = self.cursor.lastrowid - await self.cursor.execute( - "select rowid from TestTempTable where IntCol = :1", - rows[-2], - ) - row = await self.cursor.fetchone() - self.assertEqual(row[0], rowid) - # statement executed with many rows updated: rowid of last updated row - await self.cursor.execute( - """ - update TestTempTable set - StringCol1 = 'Row ' || to_char(IntCol) - where IntCol = :1 - """, - rows[-3], - ) - rowid = self.cursor.lastrowid - await self.cursor.execute( - "select StringCol1 from TestTempTable where rowid = :1", - [rowid], - ) - row = await self.cursor.fetchone() - self.assertEqual(row[0], "Row %s" % rows[-3]) - - async def test_6315(self): - "6315 - test prefetch rows" - await self.setup_round_trip_checker() - - # perform simple query and verify only one round trip is needed - with self.conn.cursor() as cursor: - await cursor.execute("select sysdate from dual") - await cursor.fetchall() - await self.assertRoundTrips(1) - - # set prefetchrows to 1 and verify that two round trips are now needed - with self.conn.cursor() as cursor: - cursor.prefetchrows = 1 - self.assertEqual(cursor.prefetchrows, 1) - await cursor.execute("select sysdate from dual") - await cursor.fetchall() - await self.assertRoundTrips(2) - - # simple DDL only requires a single round trip - with self.conn.cursor() as cursor: - await cursor.execute("truncate table TestTempTable") - await self.assertRoundTrips(1) - - # array execution only requires a single round trip - num_rows = 590 - with self.conn.cursor() as cursor: - data = [(n + 1,) for n in range(num_rows)] - await cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - data, - ) - await self.assertRoundTrips(1) +async def test_6326(async_cursor, test_env): + "6326 - test case sensitivity of quoted bind names" + with test_env.assert_raises_full_code("ORA-01036", "DPY-4008"): + await async_cursor.execute('select :"test" from dual', {'"TEST"': "a"}) - # setting prefetch and array size to 1 requires a round-trip for each - # row - with self.conn.cursor() as cursor: - cursor.arraysize = 1 - cursor.prefetchrows = 1 - self.assertEqual(cursor.prefetchrows, 1) - await cursor.execute("select IntCol from TestTempTable") - await cursor.fetchall() - await self.assertRoundTrips(num_rows + 1) - - # setting prefetch and array size to 300 requires 2 round-trips - with self.conn.cursor() as cursor: - cursor.arraysize = 300 - cursor.prefetchrows = 300 - self.assertEqual(cursor.prefetchrows, 300) - await cursor.execute("select IntCol from TestTempTable") - await cursor.fetchall() - await self.assertRoundTrips(2) - - async def test_6316(self): - "6316 - test prefetch rows using existing cursor" - await self.setup_round_trip_checker() - - # Set prefetch rows on an existing cursor - num_rows = 590 - with self.conn.cursor() as cursor: - await cursor.execute("truncate table TestTempTable") - await self.assertRoundTrips(1) - data = [(n + 1,) for n in range(num_rows)] - await cursor.executemany( - "insert into TestTempTable (IntCol) values (:1)", - data, - ) - await self.assertRoundTrips(1) - cursor.prefetchrows = 30 - cursor.arraysize = 100 - await cursor.execute("select IntCol from TestTempTable") - await cursor.fetchall() - await self.assertRoundTrips(7) - - async def test_6317(self): - "6317 - test parsing plsql statements" - sql = "begin :value := 5; end;" - await self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - async def test_6318(self): - "6318 - test parsing ddl statements" - sql = "truncate table TestTempTable" - await self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - async def test_6319(self): - "6319 - test parsing dml statements" - sql = "insert into TestTempTable (IntCol) values (1)" - await self.cursor.parse(sql) - self.assertEqual(self.cursor.statement, sql) - self.assertIsNone(self.cursor.description) - - async def test_6320(self): - "6320 - test binding by name with leading colon" - params = {":arg1": 5} - await self.cursor.execute("select :arg1 from dual", params) - (result,) = await self.cursor.fetchone() - self.assertEqual(result, params[":arg1"]) - - async def test_6321(self): - "6321 - test binding mixed null and not null values in a PL/SQL block" - out_vars = [self.cursor.var(str) for i in range(4)] - await self.cursor.execute( - """ - begin - :1 := null; - :2 := 'Value 1'; - :3 := null; - :4 := 'Value 2'; - end; - """, - out_vars, - ) - values = [var.getvalue() for var in out_vars] - self.assertEqual(values, [None, "Value 1", None, "Value 2"]) - - async def test_6322(self): - "6322 - test excluding statement from statement cache" - num_iters = 10 - sql = "select user from dual" - await self.setup_parse_count_checker() - - # with statement cache enabled, only one parse should take place - for i in range(num_iters): - with self.conn.cursor() as cursor: - await cursor.execute(sql) - await self.assertParseCount(1) - - # with statement cache disabled for the statement, parse count should - # be the same as the number of iterations - for i in range(num_iters): - with self.conn.cursor() as cursor: - cursor.prepare(sql, cache_statement=False) - await cursor.execute(None) - await self.assertParseCount(num_iters - 1) - - async def test_6323(self): - "6323 - test repeated DDL" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( - "insert into TestTempTable (IntCol) values (1)" - ) - async def test_6324(self): - "6324 - test executing SQL with non-ASCII characters" - await self.cursor.execute("select 'FÖÖ' from dual") - (result,) = await self.cursor.fetchone() - self.assertIn(result, ("FÖÖ", "F¿¿")) - - async def test_6325(self): - "6325 - test case sensitivity of unquoted bind names" - await self.cursor.execute("select :test from dual", {"TEST": "a"}) - (result,) = await self.cursor.fetchone() - self.assertEqual(result, "a") - - async def test_6326(self): - "6326 - test case sensitivity of quoted bind names" - with self.assertRaisesFullCode("ORA-01036", "DPY-4008"): - await self.cursor.execute( - 'select :"test" from dual', {'"TEST"': "a"} - ) +async def test_6327(async_cursor, test_env): + "6327 - test using a reserved keywords as a bind name" + sql = "select :ROWID from dual" + with test_env.assert_raises_full_code("ORA-01745"): + await async_cursor.parse(sql) - async def test_6327(self): - "6327 - test using a reserved keywords as a bind name" - sql = "select :ROWID from dual" - with self.assertRaisesFullCode("ORA-01745"): - await self.cursor.parse(sql) - - async def test_6328(self): - "6328 - test array size less than prefetch rows" - for i in range(2): - with self.conn.cursor() as cursor: - cursor.arraysize = 1 - await cursor.execute( - "select 1 from dual union select 2 from dual" - ) - self.assertEqual(await cursor.fetchall(), [(1,), (2,)]) - - async def test_6329(self): - "6329 - test re-executing a query with blob as bytes" - - def type_handler(cursor, metadata): - if metadata.type_code is oracledb.DB_TYPE_BLOB: - return cursor.var(bytes, arraysize=cursor.arraysize) - - self.conn.outputtypehandler = type_handler - blob_data = b"An arbitrary set of blob data for test case 4348" - await self.cursor.execute("truncate table TestBLOBs") - await self.cursor.execute( - "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", - [blob_data], - ) - await self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - self.assertEqual(await self.cursor.fetchall(), [(1, blob_data)]) - await self.cursor.execute("truncate table TestBLOBs") - await self.cursor.execute( - "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", - [blob_data], - ) - await self.cursor.execute("select IntCol, BlobCol from TestBLOBs") - self.assertEqual(await self.cursor.fetchall(), [(1, blob_data)]) - - async def test_6330(self): - "6330 - test re-executing a statement after raising an error" - sql = "select * from TestFakeTable" - with self.assertRaisesFullCode("ORA-00942"): - await self.cursor.execute(sql) - with self.assertRaisesFullCode("ORA-00942"): - await self.cursor.execute(sql) - - sql = "insert into TestStrings (StringCol) values (NULL)" - with self.assertRaisesFullCode("ORA-01400"): - await self.cursor.execute(sql) - with self.assertRaisesFullCode("ORA-01400"): - await self.cursor.execute(sql) - - async def test_6331(self): - "6331 - test executing a statement that raises ORA-01007" - with self.conn.cursor() as cursor: - await cursor.execute( - """ - create or replace view ora_1007 as - select 1 as SampleNumber, 'String' as SampleString, - 'Another String' as AnotherString - from dual - """ - ) - with self.conn.cursor() as cursor: - await cursor.execute("select * from ora_1007") - self.assertEqual( - await cursor.fetchone(), (1, "String", "Another String") - ) - with self.conn.cursor() as cursor: - await cursor.execute( - """ - create or replace view ora_1007 as - select 1 as SampleNumber, - 'Another String' as AnotherString - from dual - """ - ) - with self.conn.cursor() as cursor: - await cursor.execute("select * from ora_1007") - self.assertEqual(await cursor.fetchone(), (1, "Another String")) - - async def test_6332(self): - "6332 - test updating an empty row" - int_var = self.cursor.var(int) - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.execute( +async def test_6328(async_conn): + "6328 - test array size less than prefetch rows" + for i in range(2): + with async_conn.cursor() as cursor: + cursor.arraysize = 1 + await cursor.execute("select 1 from dual union select 2 from dual") + assert await cursor.fetchall() == [(1,), (2,)] + + +async def test_6329(async_conn, async_cursor): + "6329 - test re-executing a query with blob as bytes" + + def type_handler(cursor, metadata): + if metadata.type_code is oracledb.DB_TYPE_BLOB: + return cursor.var(bytes, arraysize=cursor.arraysize) + + async_conn.outputtypehandler = type_handler + blob_data = b"An arbitrary set of blob data for test case 4348" + await async_cursor.execute("truncate table TestBLOBs") + await async_cursor.execute( + "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", + [blob_data], + ) + await async_cursor.execute("select IntCol, BlobCol from TestBLOBs") + assert await async_cursor.fetchall() == [(1, blob_data)] + + await async_cursor.execute("truncate table TestBLOBs") + await async_cursor.execute( + "insert into TestBLOBs (IntCol, BlobCol) values (1, :data)", + [blob_data], + ) + await async_cursor.execute("select IntCol, BlobCol from TestBLOBs") + assert await async_cursor.fetchall() == [(1, blob_data)] + + +async def test_6330(async_cursor, test_env): + "6330 - test re-executing a statement after raising an error" + sql = "select * from TestFakeTable" + with test_env.assert_raises_full_code("ORA-00942"): + await async_cursor.execute(sql) + with test_env.assert_raises_full_code("ORA-00942"): + await async_cursor.execute(sql) + + sql = "insert into TestStrings (StringCol) values (NULL)" + with test_env.assert_raises_full_code("ORA-01400"): + await async_cursor.execute(sql) + with test_env.assert_raises_full_code("ORA-01400"): + await async_cursor.execute(sql) + + +async def test_6331(async_conn): + "6331 - test executing a statement that raises ORA-01007" + with async_conn.cursor() as cursor: + await cursor.execute( + """ + create or replace view ora_1007 as + select 1 as SampleNumber, 'String' as SampleString, + 'Another String' as AnotherString + from dual """ - begin - update TestTempTable set IntCol = :1 - where StringCol1 = :2 - returning IntCol into :3; - end; - """, - [1, "test string 4352", int_var], ) - self.assertEqual(int_var.values, [None]) - - async def test_6333(self): - "6333 - fetch duplicate data from query in statement cache" - sql = """ - select 'A', 'B', 'C' from dual - union all - select 'A', 'B', 'C' from dual - union all - select 'A', 'B', 'C' from dual""" - expected_data = [("A", "B", "C")] * 3 - with self.conn.cursor() as cursor: - cursor.prefetchrows = 0 - await cursor.execute(sql) - self.assertEqual(await cursor.fetchall(), expected_data) - with self.conn.cursor() as cursor: - cursor.prefetchrows = 0 - await cursor.execute(sql) - self.assertEqual(await cursor.fetchall(), expected_data) - - async def test_6334(self): - "6334 - fetch duplicate data with outconverter" - - def out_converter(value): - self.assertIs(type(value), str) - return int(value) - - def type_handler(cursor, metadata): - if metadata.name == "COL_3": - return cursor.var( - str, arraysize=cursor.arraysize, outconverter=out_converter - ) - - self.cursor.outputtypehandler = type_handler - await self.cursor.execute( + with async_conn.cursor() as cursor: + await cursor.execute("select * from ora_1007") + assert await cursor.fetchone() == (1, "String", "Another String") + with async_conn.cursor() as cursor: + await cursor.execute( """ - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual - union all - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual - union all - select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + create or replace view ora_1007 as + select 1 as SampleNumber, + 'Another String' as AnotherString + from dual """ ) - expected_data = [("A", 2, 3)] * 3 - self.assertEqual(await self.cursor.fetchall(), expected_data) - - @test_env.skip_if_drcp() - async def test_6335(self): - "6335 - kill connection with open cursor" - admin_conn = await test_env.get_admin_connection_async() - conn = await test_env.get_connection_async() - self.assertEqual(conn.is_healthy(), True) - sid, serial = await self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - sql = f"alter system kill session '{sid},{serial}'" - await admin_cursor.execute(sql) - with self.assertRaisesFullCode("DPY-4011"): - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - self.assertFalse(conn.is_healthy()) - - @test_env.skip_if_drcp() - async def test_6336(self): - "6336 - kill connection in cursor context manager" - admin_conn = await test_env.get_admin_connection_async() - conn = await test_env.get_connection_async() - self.assertEqual(conn.is_healthy(), True) - sid, serial = await self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - await admin_cursor.execute( - f"alter system kill session '{sid},{serial}'" + with async_conn.cursor() as cursor: + await cursor.execute("select * from ora_1007") + assert await cursor.fetchone() == (1, "Another String") + + +async def test_6332(async_cursor): + "6332 - test updating an empty row" + int_var = async_cursor.var(int) + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.execute( + """ + begin + update TestTempTable set IntCol = :1 + where StringCol1 = :2 + returning IntCol into :3; + end; + """, + [1, "test string 4352", int_var], + ) + assert int_var.values == [None] + + +async def test_6333(async_conn): + "6333 - fetch duplicate data from query in statement cache" + sql = """ + select 'A', 'B', 'C' from dual + union all + select 'A', 'B', 'C' from dual + union all + select 'A', 'B', 'C' from dual""" + expected_data = [("A", "B", "C")] * 3 + with async_conn.cursor() as cursor: + cursor.prefetchrows = 0 + await cursor.execute(sql) + assert await cursor.fetchall() == expected_data + with async_conn.cursor() as cursor: + cursor.prefetchrows = 0 + await cursor.execute(sql) + assert await cursor.fetchall() == expected_data + + +async def test_6334(async_cursor): + "6334 - fetch duplicate data with outconverter" + + def out_converter(value): + assert isinstance(value, str) + return int(value) + + def type_handler(cursor, metadata): + if metadata.name == "COL_3": + return cursor.var( + str, arraysize=cursor.arraysize, outconverter=out_converter ) - with self.assertRaisesFullCode("DPY-4011"): - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - self.assertEqual(conn.is_healthy(), False) - - async def test_6337(self): - "6337 - fetchmany() with and without parameters" - sql_part = "select user from dual" - sql = " union all ".join([sql_part] * 10) - with self.conn.cursor() as cursor: - cursor.arraysize = 6 - await cursor.execute(sql) - rows = await cursor.fetchmany() - self.assertEqual(len(rows), cursor.arraysize) - await cursor.execute(sql) - rows = await cursor.fetchmany(size=2) - self.assertEqual(len(rows), 2) - await cursor.execute(sql) - async def test_6338(self): - "6338 - access cursor.rowcount after closing cursor" - with self.conn.cursor() as cursor: + async_cursor.outputtypehandler = type_handler + await async_cursor.execute( + """ + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + union all + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + union all + select 'A' as col_1, 2 as col_2, 3 as col_3 from dual + """ + ) + expected_data = [("A", 2, 3)] * 3 + assert await async_cursor.fetchall() == expected_data + + +async def test_6335(skip_if_drcp, test_env): + "6335 - kill connection with open cursor" + admin_conn = await test_env.get_admin_connection_async() + conn = await test_env.get_connection_async() + assert conn.is_healthy() + sid, serial = (conn.session_id, conn.serial_num) + with admin_conn.cursor() as admin_cursor: + sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(sql) + with test_env.assert_raises_full_code("DPY-4011"): + with conn.cursor() as cursor: await cursor.execute("select user from dual") - await cursor.fetchall() - self.assertEqual(cursor.rowcount, 1) - self.assertEqual(cursor.rowcount, -1) - - async def test_6339(self): - "6339 - changing bind type with define needed" - await self.cursor.execute("truncate table TestClobs") - row_for_1 = (1, "Short value 1") - row_for_56 = (56, "Short value 56") - for data in (row_for_1, row_for_56): - await self.cursor.execute( - "insert into TestClobs (IntCol, ClobCol) values (:1, :2)", - data, - ) - sql = "select IntCol, ClobCol from TestClobs where IntCol = :int_col" - with test_env.DefaultsContextManager("fetch_lobs", False): - await self.cursor.execute(sql, int_col="1") - self.assertEqual(await self.cursor.fetchone(), row_for_1) - await self.cursor.execute(sql, int_col="56") - self.assertEqual(await self.cursor.fetchone(), row_for_56) - await self.cursor.execute(sql, int_col=1) - self.assertEqual(await self.cursor.fetchone(), row_for_1) - - async def test_6340(self): - "6340 - test calling cursor.parse() twice with the same statement" - await self.cursor.execute("truncate table TestTempTable") - data = (4363, "Value for test 4363") - await self.cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - data, - ) - sql = "update TestTempTable set StringCol1 = :v where IntCol = :i" - for i in range(2): - await self.cursor.parse(sql) - await self.cursor.execute(sql, ("Updated value", data[0])) - - async def test_6341(self): - "6341 - test addition of column to cached query" - table_name = "test_4365" - try: - await self.cursor.execute(f"drop table {table_name}") - except oracledb.DatabaseError: - pass - data = ("val 1", "val 2") - await self.cursor.execute( - f"create table {table_name} (col1 varchar2(10))" - ) - await self.cursor.execute( - f"insert into {table_name} values (:1)", [data[0]] - ) - await self.conn.commit() - await self.cursor.execute(f"select * from {table_name}") - self.assertEqual(await self.cursor.fetchall(), [(data[0],)]) - await self.cursor.execute( - f"alter table {table_name} add col2 varchar2(10)" - ) - await self.cursor.execute( - f"update {table_name} set col2 = :1", [data[1]] - ) - await self.conn.commit() - await self.cursor.execute(f"select * from {table_name}") - self.assertEqual(await self.cursor.fetchall(), [data]) - - async def test_6342(self): - "6342 - test executemany() with PL/SQL and increasing data lengths" - sql = "begin :1 := length(:2); end;" - var = self.cursor.var(int, arraysize=3) - await self.cursor.executemany( - sql, [(var, "one"), (var, "two"), (var, "end")] - ) - self.assertEqual(var.values, [3, 3, 3]) - await self.cursor.executemany( - sql, [(var, "three"), (var, "four"), (var, "end")] - ) - self.assertEqual(var.values, [5, 4, 3]) - await self.cursor.executemany( - sql, [(var, "five"), (var, "six"), (var, "end")] - ) - self.assertEqual(var.values, [4, 3, 3]) - - async def test_6343(self): - "6343 - test cursor.rowcount values for queries" - max_rows = 93 - self.cursor.arraysize = 10 - await self.cursor.execute( - "select rownum as id from dual connect by rownum <= :1", - [max_rows], - ) - self.assertEqual(self.cursor.rowcount, 0) - batch_num = 1 - while True: - rows = await self.cursor.fetchmany() - if not rows: - break - expected_value = min(max_rows, batch_num * self.cursor.arraysize) - self.assertEqual(self.cursor.rowcount, expected_value) - batch_num += 1 - await self.cursor.fetchall() - self.assertEqual(self.cursor.rowcount, max_rows) - - async def test_6344(self): - "6344 - test bind order for PL/SQL" - await self.cursor.execute("truncate table TestClobs") - sql = """ - insert into TestClobs (IntCol, CLOBCol, ExtraNumCol1) - values (:1, :2, :3)""" - data = "x" * 9000 - rows = [(1, data, 5), (2, data, 6)] - await self.cursor.execute(sql, rows[0]) - plsql = f"begin {sql}; end;" - await self.cursor.execute(plsql, rows[1]) - await self.conn.commit() - with test_env.DefaultsContextManager("fetch_lobs", False): - await self.cursor.execute( - """ - select IntCol, CLOBCol, ExtraNumCol1 - from TestCLOBs - order by IntCol - """ - ) - self.assertEqual(await self.cursor.fetchall(), rows) - - async def test_6345(self): - "6345 - test rebuild of table with LOB in cached query (as string)" - table_name = "test_4370" - drop_sql = f"drop table {table_name} purge" - create_sql = f""" - create table {table_name} ( - Col1 number(9) not null, - Col2 clob not null - )""" - insert_sql = f"insert into {table_name} values (:1, :2)" - query_sql = f"select * from {table_name} order by Col1" - data = [(1, "CLOB value 1"), (2, "CLOB value 2")] - try: - await self.cursor.execute(drop_sql) - except oracledb.DatabaseError: - pass - with test_env.DefaultsContextManager("fetch_lobs", False): - await self.cursor.execute(create_sql) - await self.cursor.executemany(insert_sql, data) - await self.cursor.execute(query_sql) - self.assertEqual(await self.cursor.fetchall(), data) - await self.cursor.execute(query_sql) - self.assertEqual(await self.cursor.fetchall(), data) - await self.cursor.execute(drop_sql) - await self.cursor.execute(create_sql) - await self.cursor.executemany(insert_sql, data) - await self.cursor.execute(query_sql) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_6346(self): - "6346 - test rebuild of table with LOB in cached query (as LOB)" - table_name = "test_4371" - drop_sql = f"drop table {table_name} purge" - create_sql = f""" - create table {table_name} ( - Col1 number(9) not null, - Col2 clob not null)""" - insert_sql = f"insert into {table_name} values (:1, :2)" - query_sql = f"select * from {table_name} order by Col1" - data = [(1, "CLOB value 1"), (2, "CLOB value 2")] - try: - await self.cursor.execute(drop_sql) - except oracledb.DatabaseError: - pass - await self.cursor.execute(create_sql) - await self.cursor.executemany(insert_sql, data) - await self.cursor.execute(query_sql) - fetched_data = [(n, await c.read()) async for n, c in self.cursor] - self.assertEqual(fetched_data, data) - await self.cursor.execute(query_sql) - fetched_data = [(n, await c.read()) async for n, c in self.cursor] - self.assertEqual(fetched_data, data) - await self.cursor.execute(drop_sql) - await self.cursor.execute(create_sql) - await self.cursor.executemany(insert_sql, data) - await self.cursor.execute(query_sql) - fetched_data = [(n, await c.read()) async for n, c in self.cursor] - self.assertEqual(fetched_data, data) - - @test_env.skip_unless_domains_supported() - async def test_6347(self): - "6347 - fetch table with domain and annotations" - await self.cursor.execute( - "select * from TableWithDomainAndAnnotations" - ) - self.assertEqual(await self.cursor.fetchall(), [(1, 25)]) - column_1 = self.cursor.description[0] - self.assertIsNone(column_1.domain_schema) - self.assertIsNone(column_1.domain_name) - self.assertIsNone(column_1.annotations) - column_2 = self.cursor.description[1] - self.assertEqual( - column_2.domain_schema, test_env.get_main_user().upper() - ) - self.assertEqual(column_2.domain_name, "SIMPLEDOMAIN") - expected_annotations = { - "ANNO_1": "first annotation", - "ANNO_2": "second annotation", - "ANNO_3": "", - } - self.assertEqual(column_2.annotations, expected_annotations) - - async def test_6348(self): - "6348 - test fetching LOBs after an error" - sql = """ - select - to_clob(:val), - 1 / (dbms_lob.getlength(to_clob(:val)) - 1) - from dual""" - with self.assertRaisesFullCode("ORA-01476"): - await self.cursor.execute(sql, val="a") - await self.cursor.execute(sql, val="bb") - lob, num_val = await self.cursor.fetchone() - self.assertEqual(await lob.read(), "bb") - self.assertEqual(num_val, 1) - - async def test_6349(self): - "6349 - test parse() with autocommit enabled" - async with test_env.get_connection_async() as conn: - conn.autocommit = True - cursor = conn.cursor() - await cursor.execute("truncate table TestTempTable") - await cursor.parse( - "insert into TestTempTable (IntCol) values (:1)" - ) - await cursor.execute(None, [1]) - - async def test_6350(self): - "6350 - test cursor.setinputsizes() with early failed execute" - self.cursor.setinputsizes(a=int, b=str) - with self.assertRaisesFullCode("DPY-2006"): - await self.cursor.execute("select :c from dual", [5]) - value = 4368 - await self.cursor.execute("select :d from dual", [value]) - (fetched_value,) = await self.cursor.fetchone() - self.assertEqual(fetched_value, value) - - async def test_6351(self): - "6351 - fetch JSON columns as Python objects" - expected_data = [ - (1, [1, 2, 3], [4, 5, 6], [7, 8, 9]), - (2, None, None, None), - ] - await self.cursor.execute("select * from TestJsonCols order by IntCol") - self.assertEqual(await self.cursor.fetchall(), expected_data) - - async def test_6352(self): - "6352 - test fetching nested cursors repeatedly" - sql = """ - select - s.Description, - cursor(select 'Nested String for ' || s.Description from dual) - from - ( - select 'Top Level String 1' as Description - from dual - union all - select 'Top Level String 2' - from dual - union all - select 'Top Level String 3' - from dual - union all - select 'Top Level String 4' - from dual - union all - select 'Top Level String 5' - from dual - ) s""" - - for i in range(3): - with self.conn.cursor() as cursor: - cursor.arraysize = 10 - await cursor.execute(sql) - desc, nested1 = await cursor.fetchone() - self.assertEqual(desc, "Top Level String 1") - nested_rows = await nested1.fetchall() - self.assertEqual( - nested_rows, [("Nested String for Top Level String 1",)] - ) - desc, nested2 = await cursor.fetchone() - self.assertEqual(desc, "Top Level String 2") - nested_rows = await nested2.fetchall() - self.assertEqual( - nested_rows, [("Nested String for Top Level String 2",)] - ) - - async def test_6353(self): - "6353 - access cursor.rowcount after closing connection" - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - self.assertEqual(cursor.rowcount, -1) - - async def test_6354(self): - "6354 - execute PL/SQL with out vars after query with duplicate data" - await self.cursor.execute("truncate table TestTempTable") - await self.cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - [(i + 1, "test_4370") for i in range(20)], - ) - await self.conn.commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" + assert not conn.is_healthy() + + +async def test_6336(skip_if_drcp, test_env): + "6336 - kill connection in cursor context manager" + admin_conn = await test_env.get_admin_connection_async() + conn = await test_env.get_connection_async() + assert conn.is_healthy() + sid, serial = (conn.session_id, conn.serial_num) + with admin_conn.cursor() as admin_cursor: + await admin_cursor.execute( + f"alter system kill session '{sid},{serial}'" ) - var = self.cursor.var(int) - await self.cursor.execute("begin :1 := 4370; end;", [var]) - self.assertEqual(var.getvalue(), 4370) - - async def test_6355(self): - "6355 - test cursor with fetch_decimals=True specified" - value = 4371 - await self.cursor.execute( - "select :1 from dual", [value], fetch_decimals=True + with test_env.assert_raises_full_code("DPY-4011"): + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + assert not conn.is_healthy() + + +async def test_6337(async_conn): + "6337 - fetchmany() with and without parameters" + sql_part = "select user from dual" + sql = " union all ".join([sql_part] * 10) + with async_conn.cursor() as cursor: + cursor.arraysize = 6 + await cursor.execute(sql) + rows = await cursor.fetchmany() + assert len(rows) == cursor.arraysize + await cursor.execute(sql) + rows = await cursor.fetchmany(size=2) + assert len(rows) == 2 + await cursor.execute(sql) + + +async def test_6338(async_conn): + "6338 - access cursor.rowcount after closing cursor" + with async_conn.cursor() as cursor: + await cursor.execute("select user from dual") + await cursor.fetchall() + assert cursor.rowcount == 1 + assert cursor.rowcount == -1 + + +async def test_6339(disable_fetch_lobs, async_cursor): + "6339 - changing bind type with define needed" + await async_cursor.execute("truncate table TestClobs") + row_for_1 = (1, "Short value 1") + row_for_56 = (56, "Short value 56") + for data in (row_for_1, row_for_56): + await async_cursor.execute( + "insert into TestClobs (IntCol, ClobCol) values (:1, :2)", + data, ) - rows = await self.cursor.fetchall() - self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) - + sql = "select IntCol, ClobCol from TestClobs where IntCol = :int_col" + await async_cursor.execute(sql, int_col="1") + assert await async_cursor.fetchone() == row_for_1 + await async_cursor.execute(sql, int_col="56") + assert await async_cursor.fetchone() == row_for_56 + await async_cursor.execute(sql, int_col=1) + assert await async_cursor.fetchone() == row_for_1 + + +async def test_6340(async_cursor): + "6340 - test calling cursor.parse() twice with the same statement" + await async_cursor.execute("truncate table TestTempTable") + data = (4363, "Value for test 4363") + await async_cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + data, + ) + sql = "update TestTempTable set StringCol1 = :v where IntCol = :i" + for i in range(2): + await async_cursor.parse(sql) + await async_cursor.execute(sql, ("Updated value", data[0])) + + +async def test_6341(async_conn, async_cursor): + "6341 - test addition of column to cached query" + table_name = "test_4365" + try: + await async_cursor.execute(f"drop table {table_name}") + except oracledb.DatabaseError: + pass + data = ("val 1", "val 2") + await async_cursor.execute( + f"create table {table_name} (col1 varchar2(10))" + ) + await async_cursor.execute( + f"insert into {table_name} values (:1)", [data[0]] + ) + await async_conn.commit() + await async_cursor.execute(f"select * from {table_name}") + assert await async_cursor.fetchall() == [(data[0],)] + await async_cursor.execute( + f"alter table {table_name} add col2 varchar2(10)" + ) + await async_cursor.execute(f"update {table_name} set col2 = :1", [data[1]]) + await async_conn.commit() + await async_cursor.execute(f"select * from {table_name}") + assert await async_cursor.fetchall() == [data] + + +async def test_6342(async_cursor): + "6342 - test executemany() with PL/SQL and increasing data lengths" + sql = "begin :1 := length(:2); end;" + var = async_cursor.var(int, arraysize=3) + await async_cursor.executemany( + sql, [(var, "one"), (var, "two"), (var, "end")] + ) + assert var.values == [3, 3, 3] + await async_cursor.executemany( + sql, [(var, "three"), (var, "four"), (var, "end")] + ) + assert var.values == [5, 4, 3] + await async_cursor.executemany( + sql, [(var, "five"), (var, "six"), (var, "end")] + ) + assert var.values == [4, 3, 3] + + +async def test_6343(async_cursor): + "6343 - test cursor.rowcount values for queries" + max_rows = 93 + async_cursor.arraysize = 10 + await async_cursor.execute( + "select rownum as id from dual connect by rownum <= :1", + [max_rows], + ) + assert async_cursor.rowcount == 0 + batch_num = 1 + while True: + rows = await async_cursor.fetchmany() + if not rows: + break + expected_value = min(max_rows, batch_num * async_cursor.arraysize) + assert async_cursor.rowcount == expected_value + batch_num += 1 + await async_cursor.fetchall() + assert async_cursor.rowcount == max_rows + + +async def test_6344(disable_fetch_lobs, async_conn, async_cursor): + "6344 - test bind order for PL/SQL" + await async_cursor.execute("truncate table TestClobs") + sql = """ + insert into TestClobs (IntCol, CLOBCol, ExtraNumCol1) + values (:1, :2, :3)""" + data = "x" * 9000 + rows = [(1, data, 5), (2, data, 6)] + await async_cursor.execute(sql, rows[0]) + plsql = f"begin {sql}; end;" + await async_cursor.execute(plsql, rows[1]) + await async_conn.commit() + await async_cursor.execute( + """ + select IntCol, CLOBCol, ExtraNumCol1 + from TestCLOBs + order by IntCol + """ + ) + assert await async_cursor.fetchall() == rows + + +async def test_6345(disable_fetch_lobs, async_cursor): + "6345 - test rebuild of table with LOB in cached query (as string)" + table_name = "test_4370" + drop_sql = f"drop table {table_name} purge" + create_sql = f""" + create table {table_name} ( + Col1 number(9) not null, + Col2 clob not null + )""" + insert_sql = f"insert into {table_name} values (:1, :2)" + query_sql = f"select * from {table_name} order by Col1" + data = [(1, "CLOB value 1"), (2, "CLOB value 2")] + try: + await async_cursor.execute(drop_sql) + except oracledb.DatabaseError: + pass + await async_cursor.execute(create_sql) + await async_cursor.executemany(insert_sql, data) + await async_cursor.execute(query_sql) + assert await async_cursor.fetchall() == data + await async_cursor.execute(query_sql) + assert await async_cursor.fetchall() == data + await async_cursor.execute(drop_sql) + await async_cursor.execute(create_sql) + await async_cursor.executemany(insert_sql, data) + await async_cursor.execute(query_sql) + assert await async_cursor.fetchall() == data + + +async def test_6346(async_cursor): + "6346 - test rebuild of table with LOB in cached query (as LOB)" + table_name = "test_4371" + drop_sql = f"drop table {table_name} purge" + create_sql = f""" + create table {table_name} ( + Col1 number(9) not null, + Col2 clob not null)""" + insert_sql = f"insert into {table_name} values (:1, :2)" + query_sql = f"select * from {table_name} order by Col1" + data = [(1, "CLOB value 1"), (2, "CLOB value 2")] + try: + await async_cursor.execute(drop_sql) + except oracledb.DatabaseError: + pass + await async_cursor.execute(create_sql) + await async_cursor.executemany(insert_sql, data) + await async_cursor.execute(query_sql) + fetched_data = [(n, await c.read()) async for n, c in async_cursor] + assert fetched_data == data + await async_cursor.execute(query_sql) + fetched_data = [(n, await c.read()) async for n, c in async_cursor] + assert fetched_data == data + await async_cursor.execute(drop_sql) + await async_cursor.execute(create_sql) + await async_cursor.executemany(insert_sql, data) + await async_cursor.execute(query_sql) + fetched_data = [(n, await c.read()) async for n, c in async_cursor] + assert fetched_data == data + + +async def test_6347(skip_unless_domains_supported, async_cursor, test_env): + "6347 - fetch table with domain and annotations" + await async_cursor.execute("select * from TableWithDomainAndAnnotations") + assert await async_cursor.fetchall() == [(1, 25)] + column_1 = async_cursor.description[0] + assert column_1.domain_schema is None + assert column_1.domain_name is None + assert column_1.annotations is None + column_2 = async_cursor.description[1] + assert column_2.domain_schema == test_env.main_user.upper() + assert column_2.domain_name == "SIMPLEDOMAIN" + expected_annotations = { + "ANNO_1": "first annotation", + "ANNO_2": "second annotation", + "ANNO_3": "", + } + assert column_2.annotations == expected_annotations + + +async def test_6348(async_cursor, test_env): + "6348 - test fetching LOBs after an error" + sql = """ + select + to_clob(:val), + 1 / (dbms_lob.getlength(to_clob(:val)) - 1) + from dual""" + with test_env.assert_raises_full_code("ORA-01476"): + await async_cursor.execute(sql, val="a") + await async_cursor.execute(sql, val="bb") + lob, num_val = await async_cursor.fetchone() + assert await lob.read() == "bb" + assert num_val == 1 + + +async def test_6349(test_env): + "6349 - test parse() with autocommit enabled" + async with test_env.get_connection_async() as conn: + conn.autocommit = True + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + await cursor.parse("insert into TestTempTable (IntCol) values (:1)") + await cursor.execute(None, [1]) + + +async def test_6350(async_cursor, test_env): + "6350 - test cursor.setinputsizes() with early failed execute" + async_cursor.setinputsizes(a=int, b=str) + with test_env.assert_raises_full_code("DPY-2006"): + await async_cursor.execute("select :c from dual", [5]) + value = 4368 + await async_cursor.execute("select :d from dual", [value]) + (fetched_value,) = await async_cursor.fetchone() + assert fetched_value == value + + +async def test_6351(async_cursor): + "6351 - fetch JSON columns as Python objects" + expected_data = [ + (1, [1, 2, 3], [4, 5, 6], [7, 8, 9]), + (2, None, None, None), + ] + await async_cursor.execute("select * from TestJsonCols order by IntCol") + assert await async_cursor.fetchall() == expected_data + + +async def test_6352(async_conn): + "6352 - test fetching nested cursors repeatedly" + sql = """ + select + s.Description, + cursor(select 'Nested String for ' || s.Description from dual) + from + ( + select 'Top Level String 1' as Description + from dual + union all + select 'Top Level String 2' + from dual + union all + select 'Top Level String 3' + from dual + union all + select 'Top Level String 4' + from dual + union all + select 'Top Level String 5' + from dual + ) s""" -if __name__ == "__main__": - test_env.run_test_cases() + for i in range(3): + with async_conn.cursor() as cursor: + cursor.arraysize = 10 + await cursor.execute(sql) + desc, nested1 = await cursor.fetchone() + assert desc == "Top Level String 1" + nested_rows = await nested1.fetchall() + assert nested_rows == [("Nested String for Top Level String 1",)] + desc, nested2 = await cursor.fetchone() + assert desc == "Top Level String 2" + nested_rows = await nested2.fetchall() + assert nested_rows == [("Nested String for Top Level String 2",)] + + +async def test_6353(test_env): + "6353 - access cursor.rowcount after closing connection" + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + assert cursor.rowcount == -1 + + +async def test_6354(async_conn, async_cursor): + "6354 - execute PL/SQL with out vars after query with duplicate data" + await async_cursor.execute("truncate table TestTempTable") + await async_cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + [(i + 1, "test_4370") for i in range(20)], + ) + await async_conn.commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + var = async_cursor.var(int) + await async_cursor.execute("begin :1 := 4370; end;", [var]) + assert var.getvalue() == 4370 + + +async def test_6355(async_cursor): + "6355 - test cursor with fetch_decimals=True specified" + value = 4371 + await async_cursor.execute( + "select :1 from dual", [value], fetch_decimals=True + ) + rows = await async_cursor.fetchall() + assert isinstance(rows[0][0], decimal.Decimal) diff --git a/tests/test_6400_vector_var.py b/tests/test_6400_vector_var.py index f1e0bcc7..57642ea4 100644 --- a/tests/test_6400_vector_var.py +++ b/tests/test_6400_vector_var.py @@ -28,700 +28,722 @@ import array import json -import unittest + import oracledb -import test_env +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_vectors_supported): + pass + + +def _test_insert_and_fetch(cursor, value, column_name, expected_typecode): + """ + Test inserting and fetching a vector. + """ + cursor.execute("delete from TestVectors") + if isinstance(value, list): + cursor.setinputsizes(value=oracledb.DB_TYPE_VECTOR) + cursor.execute( + f""" + insert into TestVectors (IntCol, {column_name}) + values(1, :value) + """, + value=value, + ) + cursor.connection.commit() + cursor.execute(f"select {column_name} from TestVectors") + (fetched_value,) = cursor.fetchone() + if expected_typecode == "b": + expected_value = array.array("b", [int(i) for i in value]) + else: + expected_value = array.array(expected_typecode, value) + assert fetched_value == expected_value + assert fetched_value.typecode == expected_typecode + + +def _test_plsql_insert_and_fetch(cursor, vec1, vec2, expected_distance): + in_out_vec = cursor.var(oracledb.DB_TYPE_VECTOR) + in_out_vec.setvalue(0, vec2) + + distance = cursor.var(oracledb.DB_TYPE_BINARY_DOUBLE) + output_vec = cursor.var(oracledb.DB_TYPE_VECTOR) + + plsql_block = """ + BEGIN + select + vector_distance(:in_vec, :in_out_vec, euclidean) + into :distance; + :output_vec := :in_out_vec; + :in_out_vec := :in_vec; + END; + """ + + cursor.execute( + plsql_block, + in_vec=vec1, + in_out_vec=in_out_vec, + distance=distance, + output_vec=output_vec, + ) + assert output_vec.getvalue() == vec2 + assert in_out_vec.getvalue() == vec1 + assert distance.getvalue() == pytest.approx(expected_distance, abs=0.01) + + +def test_6400(cursor): + "6400 - test binding in a vector from a Python list" + value = [1, 2] + cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) + cursor.execute("select :1 from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == "d" + assert fetched_value == array.array("d", value) + + +def test_6401(cursor): + "6401 - test binding in a vector from a Python array of type float64" + value = array.array("d", [3, 4, 5]) + cursor.execute("select :1 from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == "d" + assert fetched_value == value + + +def test_6402(cursor): + "6402 - test binding in a vector from a Python array of type float32" + value = array.array("f", [6, 7, 8, 9]) + cursor.execute("select :1 from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == "f" + assert fetched_value == value + + +def test_6403(cursor): + "6402 - test binding in a vector from a Python array of type int8" + value = array.array("b", [-10, 11, -12, 13, -14]) + cursor.execute("select :1 from dual", [value]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == "b" + assert fetched_value == value + + +def test_6404(cursor, test_env): + "6404 - unspported array type for vector" + with test_env.assert_raises_full_code("DPY-3013"): + cursor.execute("select :1 from dual", [array.array("L", [4, 5])]) + + +def test_6405(cursor): + "6405 - insert a float32 vector into a float32 column" + value = array.array( + "f", + [ + 1.23, + 4.56, + -7.89, + 10.11, + -12.13, + 14.15, + -16.17, + 18.19, + -20.21, + 9.23, + -2.54, + 6.5, + 4.21, + -1.96, + 3.54, + 2.6, + ], + ) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + + +def test_6406(cursor): + "6406 - insert a float32 vector into a float64 column" + value = array.array( + "f", + [ + 1.23, + 4.56, + -7.89, + 10.11, + -12.13, + 14.15, + -16.17, + 18.19, + -20.21, + 9.23, + -2.54, + 6.5, + 4.21, + -1.96, + 3.54, + 2.6, + ], + ) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + + +def test_6407(cursor): + "6407 - insert a float32 vector into a flexible format column" + value = array.array( + "f", + [ + 1.23, + 4.56, + -7.89, + 10.11, + -12.13, + 14.15, + -16.17, + 18.19, + -20.21, + 9.23, + -2.54, + 6.5, + 4.21, + -1.96, + 3.54, + 2.6, + ], + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "f") + + +def test_6408(cursor): + "6408 - insert a float64 vector into a float64 column" + value = array.array( + "d", + [ + -0.0375, + 0.625, + -0.025, + 0.125, + -0.75, + 0.0, + -0.3625, + 0.125, + -0.5, + 0.03125, + -2.50, + -0.75, + 1.625, + 1.025, + 0.125, + 0.725, + ], + ) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + + +def test_6409(cursor): + "6409 - insert float64 vector into a float32 column" + value = array.array( + "d", + [ + -0.0375, + 0.625, + -0.025, + 0.125, + -0.75, + 0.0, + -0.3625, + 0.125, + -0.5, + 0.03125, + -2.50, + -0.75, + 1.625, + 1.025, + 0.125, + 0.725, + ], + ) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + + +def test_6410(cursor): + "6410 - insert float64 vector into a flexible type column" + value = array.array( + "d", + [ + -0.0375, + 0.625, + -0.025, + 0.125, + -0.75, + 0.0, + -0.3625, + 0.125, + -0.5, + 0.03125, + -2.50, + -0.75, + 1.625, + 1.025, + 0.125, + 0.725, + ], + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "d") + + +def test_6411(cursor, test_env): + "6411 - insert a vector with an invalid size" + cursor.execute("truncate table TestVectors") + for num_elems in [4, 20]: + statement = """ + insert into TestVectors (IntCol, Vector64Col) + values(2, :1)""" + vector = array.array("d", [i * 0.625 for i in range(num_elems)]) + with test_env.assert_raises_full_code("ORA-51803"): + cursor.execute(statement, [vector]) + + +def test_6412(cursor): + "6412 - verify fetch info for vectors" + attr_names = [ + "name", + "type_code", + "vector_dimensions", + "vector_format", + "vector_is_sparse", + ] + expected_values = [ + ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None, None], + ["VECTORFLEXALLCOL", oracledb.DB_TYPE_VECTOR, None, None, False], + ["VECTORFLEXTYPECOL", oracledb.DB_TYPE_VECTOR, 2, None, False], + [ + "VECTORFLEX8COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_INT8, + False, + ], + [ + "VECTORFLEX32COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_FLOAT32, + False, + ], + [ + "VECTORFLEX64COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_FLOAT64, + False, + ], + [ + "VECTOR8COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_INT8, + False, + ], + [ + "VECTOR32COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_FLOAT32, + False, + ], + [ + "VECTOR64COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_FLOAT64, + False, + ], + ] + cursor.execute("select * from TestVectors") + values = [[getattr(i, n) for n in attr_names] for i in cursor.description] + assert values == expected_values + assert cursor.description[3].vector_format is oracledb.VectorFormat.INT8 + + +def test_6413(cursor): + "6413 - insert an int8 vector into an int8 column" + value = array.array( + "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + + +def test_6414(cursor): + "6414 - insert an int8 vector into a float32 column" + value = array.array( + "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + + +def test_6415(cursor): + "6415 - insert an int8 vector into a float64 column" + value = array.array( + "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + + +def test_6416(cursor): + "6416 - insert an int8 vector into a flexible column" + value = array.array( + "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "b") + + +def test_6417(cursor): + "6417 - insert a float32 vector into an int8 column" + value = array.array( + "f", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + + +def test_6418(cursor): + "6418 - insert a float64 vector into an int8 column" + value = array.array( + "d", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] + ) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + + +def test_6419(conn, cursor): + "6419 - test dml returning vector type" + value = array.array("d", [6423.5, 6423.625]) + out_var = cursor.var(oracledb.DB_TYPE_VECTOR) + cursor.execute("delete from TestVectors") + cursor.execute( + """ + insert into TestVectors (IntCol, VectorFlexTypeCol) + values (1, :value) + returning VectorFlexTypeCol into :out_value + """, + [value, out_var], + ) + conn.commit() + assert value == out_var.getvalue()[0] + + +def test_6420(conn, cursor): + "6420 - test handling of NULL vector value" + cursor.execute("delete from TestVectors") + cursor.execute("insert into TestVectors (IntCol) values (1)") + conn.commit() + cursor.execute("select VectorFlexTypeCol from TestVectors") + (fetched_value,) = cursor.fetchone() + assert fetched_value is None + + +def test_6421(cursor, test_env): + "6421 - insert a float32 vector into an int8 column (negative)" + value = array.array( + "f", + [-130, -129, 0, 1, 2, 3, 127, 128, 129, 348, 12, 49, 78, 12, 9, 2], + ) + with test_env.assert_raises_full_code("ORA-51806"): + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + + +def test_6422(cursor): + "6422 - insert a float64 vector with 65,533 dimensions" + value = array.array("d", [2.5] * 65533) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "d") + + +def test_6423(cursor): + "6423 - insert a float32 vector with 65,533 dimensions" + value = array.array("f", [2.5] * 65533) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "f") + + +def test_6424(cursor): + "6424 - insert an int8 vector with 65,533 dimensions" + value = array.array("b", [2] * 65533) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "b") + + +def test_6425(cursor): + "6425 - insert vectors with different dimensions" + for dim in [30, 70, 255, 256, 65534, 65535]: + for typ in ["f", "d", "b"]: + element_value = 3 if typ == "b" else 1.5 + value = array.array(typ, [element_value] * dim) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", typ) + + +@pytest.mark.skip("awaiting database support") +def test_6426(conn, cursor): + "6426 - insert and fetch VECTOR data using CLOB" + value = [6426, -15.75, 283.125, -8.625] + clob = conn.createlob(oracledb.DB_TYPE_CLOB) + clob.write(json.dumps(value)) + cursor.execute("delete from TestVectors") + cursor.execute( + """ + insert into TestVectors (IntCol, VectorFlexAllCol) + values(1, :value) + """, + value=clob, + ) + + def type_handler(cursor, metadata): + if metadata.name == "VECTORFLEXALLCOL": + return cursor.var( + oracledb.DB_TYPE_CLOB, arraysize=cursor.arraysize + ) + cursor.outputtypehandler = type_handler -@test_env.skip_unless_vectors_supported() -class TestCase(test_env.BaseTestCase): - def __test_insert_and_fetch(self, value, column_name, expected_typecode): + cursor.execute("select VectorFlexAllCol from TestVectors") + (clob_data,) = cursor.fetchone() + fetched_value = json.loads(clob_data.read()) + assert fetched_value == value + + +def test_6427(cursor): + "6427 - insert and fetch VECTOR data using strings" + value = [6427, -25.75, 383.125, -18.625] + cursor.execute("delete from TestVectors") + cursor.execute( """ - Test inserting and fetching a vector. + insert into TestVectors (IntCol, VectorFlexAllCol) + values(1, :value) + """, + value=json.dumps(value), + ) + + def type_handler(cursor, metadata): + if metadata.name == "VECTORFLEXALLCOL": + return cursor.var( + oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize + ) + + cursor.outputtypehandler = type_handler + + cursor.execute("select VectorFlexAllCol from TestVectors") + (fetched_value,) = cursor.fetchone() + assert json.loads(fetched_value) == value + + +def test_6428(cursor): + "6428 - insert vectors with flexible dimensions and conversion" + for dim in [30, 255, 256, 257, 32768, 65535]: + for source_type in ["f", "d", "b"]: + for target_type in ["f", "d", "b"]: + if target_type == "f": + target_col = "VectorFlex32Col" + elif target_type == "d": + target_col = "VectorFlex64Col" + else: + target_col = "VectorFlex8Col" + element_value = 4 if source_type == "b" else 2.5 + value = array.array(source_type, [element_value] * dim) + _test_insert_and_fetch(cursor, value, target_col, target_type) + + +@pytest.mark.skip("awaiting database support") +def test_6429(cursor): + "6427 - insert and fetch large VECTOR data using strings" + value = [0.12345678925] * 35625 + cursor.execute("delete from TestVectors") + cursor.execute( """ - self.cursor.execute("delete from TestVectors") - if isinstance(value, list): - self.cursor.setinputsizes(value=oracledb.DB_TYPE_VECTOR) - self.cursor.execute( - f""" - insert into TestVectors (IntCol, {column_name}) - values(1, :value) - """, - value=value, - ) - self.conn.commit() - self.cursor.execute(f"select {column_name} from TestVectors") - (fetched_value,) = self.cursor.fetchone() - if expected_typecode == "b": - expected_value = array.array("b", [int(i) for i in value]) - else: - expected_value = array.array(expected_typecode, value) - self.assertEqual(fetched_value, expected_value) - self.assertEqual(fetched_value.typecode, expected_typecode) - - def __test_plsql_insert_and_fetch(self, vec1, vec2, expected_distance): - in_out_vec = self.cursor.var(oracledb.DB_TYPE_VECTOR) - in_out_vec.setvalue(0, vec2) - - distance = self.cursor.var(oracledb.DB_TYPE_BINARY_DOUBLE) - output_vec = self.cursor.var(oracledb.DB_TYPE_VECTOR) - - plsql_block = """ - BEGIN - select - vector_distance(:in_vec, :in_out_vec, euclidean) - into :distance; - :output_vec := :in_out_vec; - :in_out_vec := :in_vec; - END; - """ - - self.cursor.execute( - plsql_block, - in_vec=vec1, - in_out_vec=in_out_vec, - distance=distance, - output_vec=output_vec, - ) - self.assertEqual(output_vec.getvalue(), vec2) - self.assertEqual(in_out_vec.getvalue(), vec1) - self.assertAlmostEqual( - distance.getvalue(), expected_distance, places=2 - ) - - def test_6400(self): - "6400 - test binding in a vector from a Python list" - value = [1, 2] - self.cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) - self.cursor.execute("select :1 from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, "d") - self.assertEqual(fetched_value, array.array("d", value)) - - def test_6401(self): - "6401 - test binding in a vector from a Python array of type float64" - value = array.array("d", [3, 4, 5]) - self.cursor.execute("select :1 from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, "d") - self.assertEqual(fetched_value, value) - - def test_6402(self): - "6402 - test binding in a vector from a Python array of type float32" - value = array.array("f", [6, 7, 8, 9]) - self.cursor.execute("select :1 from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, "f") - self.assertEqual(fetched_value, value) - - def test_6403(self): - "6402 - test binding in a vector from a Python array of type int8" - value = array.array("b", [-10, 11, -12, 13, -14]) - self.cursor.execute("select :1 from dual", [value]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, "b") - self.assertEqual(fetched_value, value) - - def test_6404(self): - "6404 - unspported array type for vector" - with self.assertRaisesFullCode("DPY-3013"): - self.cursor.execute( - "select :1 from dual", [array.array("L", [4, 5])] + insert into TestVectors (IntCol, VectorFlex64Col) + values(1, :value) + """, + value=json.dumps(value), + ) + + def type_handler(cursor, metadata): + if metadata.name == "VECTORFLEX64COL": + return cursor.var( + oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize ) - def test_6405(self): - "6405 - insert a float32 vector into a float32 column" - value = array.array( - "f", - [ - 1.23, - 4.56, - -7.89, - 10.11, - -12.13, - 14.15, - -16.17, - 18.19, - -20.21, - 9.23, - -2.54, - 6.5, - 4.21, - -1.96, - 3.54, - 2.6, - ], - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - - def test_6406(self): - "6406 - insert a float32 vector into a float64 column" - value = array.array( - "f", - [ - 1.23, - 4.56, - -7.89, - 10.11, - -12.13, - 14.15, - -16.17, - 18.19, - -20.21, - 9.23, - -2.54, - 6.5, - 4.21, - -1.96, - 3.54, - 2.6, - ], - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - - def test_6407(self): - "6407 - insert a float32 vector into a flexible format column" - value = array.array( - "f", - [ - 1.23, - 4.56, - -7.89, - 10.11, - -12.13, - 14.15, - -16.17, - 18.19, - -20.21, - 9.23, - -2.54, - 6.5, - 4.21, - -1.96, - 3.54, - 2.6, - ], - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") - - def test_6408(self): - "6408 - insert a float64 vector into a float64 column" - value = array.array( - "d", - [ - -0.0375, - 0.625, - -0.025, - 0.125, - -0.75, - 0.0, - -0.3625, - 0.125, - -0.5, - 0.03125, - -2.50, - -0.75, - 1.625, - 1.025, - 0.125, - 0.725, - ], - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - - def test_6409(self): - "6409 - insert float64 vector into a float32 column" - value = array.array( - "d", - [ - -0.0375, - 0.625, - -0.025, - 0.125, - -0.75, - 0.0, - -0.3625, - 0.125, - -0.5, - 0.03125, - -2.50, - -0.75, - 1.625, - 1.025, - 0.125, - 0.725, - ], - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - - def test_6410(self): - "6410 - insert float64 vector into a flexible type column" - value = array.array( - "d", - [ - -0.0375, - 0.625, - -0.025, - 0.125, - -0.75, - 0.0, - -0.3625, - 0.125, - -0.5, - 0.03125, - -2.50, - -0.75, - 1.625, - 1.025, - 0.125, - 0.725, - ], - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - - def test_6411(self): - "6411 - insert a vector with an invalid size" - self.cursor.execute("truncate table TestVectors") - for num_elems in [4, 20]: - statement = """ - insert into TestVectors (IntCol, Vector64Col) - values(2, :1)""" - vector = array.array("d", [i * 0.625 for i in range(num_elems)]) - with self.assertRaisesFullCode("ORA-51803"): - self.cursor.execute(statement, [vector]) - - def test_6412(self): - "6412 - verify fetch info for vectors" - attr_names = [ - "name", - "type_code", - "vector_dimensions", - "vector_format", - "vector_is_sparse", - ] - expected_values = [ - ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None, None], - ["VECTORFLEXALLCOL", oracledb.DB_TYPE_VECTOR, None, None, False], - ["VECTORFLEXTYPECOL", oracledb.DB_TYPE_VECTOR, 2, None, False], - [ - "VECTORFLEX8COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_INT8, - False, - ], - [ - "VECTORFLEX32COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_FLOAT32, - False, - ], - [ - "VECTORFLEX64COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_FLOAT64, - False, - ], - [ - "VECTOR8COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_INT8, - False, - ], - [ - "VECTOR32COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_FLOAT32, - False, - ], - [ - "VECTOR64COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_FLOAT64, - False, - ], - ] - self.cursor.execute("select * from TestVectors") - values = [ - [getattr(i, n) for n in attr_names] - for i in self.cursor.description - ] - self.assertEqual(values, expected_values) - self.assertIs( - self.cursor.description[3].vector_format, - oracledb.VectorFormat.INT8, - ) - - def test_6413(self): - "6413 - insert an int8 vector into an int8 column" - value = array.array( - "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - - def test_6414(self): - "6414 - insert an int8 vector into a float32 column" - value = array.array( - "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - - def test_6415(self): - "6415 - insert an int8 vector into a float64 column" - value = array.array( - "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - - def test_6416(self): - "6416 - insert an int8 vector into a flexible column" - value = array.array( - "b", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "b") - - def test_6417(self): - "6417 - insert a float32 vector into an int8 column" - value = array.array( - "f", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - - def test_6418(self): - "6418 - insert a float64 vector into an int8 column" - value = array.array( - "d", [-5, 4, -7, 6, -9, 8, -127, 127, 0, -128, 1, 4, -3, 2, -8, 0] - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - - def test_6419(self): - "6419 - test dml returning vector type" - value = array.array("d", [6423.5, 6423.625]) - out_var = self.cursor.var(oracledb.DB_TYPE_VECTOR) - self.cursor.execute("delete from TestVectors") - self.cursor.execute( - """ - insert into TestVectors (IntCol, VectorFlexTypeCol) - values (1, :value) - returning VectorFlexTypeCol into :out_value - """, - [value, out_var], - ) - self.conn.commit() - self.assertEqual(value, out_var.getvalue()[0]) - - def test_6420(self): - "6420 - test handling of NULL vector value" - self.cursor.execute("delete from TestVectors") - self.cursor.execute("insert into TestVectors (IntCol) values (1)") - self.conn.commit() - self.cursor.execute("select VectorFlexTypeCol from TestVectors") - (fetched_value,) = self.cursor.fetchone() - self.assertIsNone(fetched_value) - - def test_6421(self): - "6421 - insert a float32 vector into an int8 column (negative)" - value = array.array( - "f", - [-130, -129, 0, 1, 2, 3, 127, 128, 129, 348, 12, 49, 78, 12, 9, 2], - ) - with self.assertRaisesFullCode("ORA-51806"): - self.__test_insert_and_fetch(value, "Vector8Col", "b") - - def test_6422(self): - "6422 - insert a float64 vector with 65,533 dimensions" - value = array.array("d", [2.5] * 65533) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - - def test_6423(self): - "6423 - insert a float32 vector with 65,533 dimensions" - value = array.array("f", [2.5] * 65533) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") - - def test_6424(self): - "6424 - insert an int8 vector with 65,533 dimensions" - value = array.array("b", [2] * 65533) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "b") - - def test_6425(self): - "6425 - insert vectors with different dimensions" - for dim in [30, 70, 255, 256, 65534, 65535]: - for typ in ["f", "d", "b"]: - with self.subTest(dim=dim, typ=typ): - element_value = 3 if typ == "b" else 1.5 - value = array.array(typ, [element_value] * dim) - self.__test_insert_and_fetch( - value, "VectorFlexAllCol", typ - ) - - @unittest.skip("awaiting database support") - def test_6426(self): - "6426 - insert and fetch VECTOR data using CLOB" - value = [6426, -15.75, 283.125, -8.625] - clob = self.conn.createlob(oracledb.DB_TYPE_CLOB) - clob.write(json.dumps(value)) - self.cursor.execute("delete from TestVectors") - self.cursor.execute( - """ - insert into TestVectors (IntCol, VectorFlexAllCol) - values(1, :value) - """, - value=clob, - ) - - def type_handler(cursor, metadata): - if metadata.name == "VECTORFLEXALLCOL": - return cursor.var( - oracledb.DB_TYPE_CLOB, arraysize=cursor.arraysize - ) - - self.cursor.outputtypehandler = type_handler - - self.cursor.execute("select VectorFlexAllCol from TestVectors") - (clob_data,) = self.cursor.fetchone() - fetched_value = json.loads(clob_data.read()) - self.assertEqual(fetched_value, value) - - def test_6427(self): - "6427 - insert and fetch VECTOR data using strings" - value = [6427, -25.75, 383.125, -18.625] - self.cursor.execute("delete from TestVectors") - self.cursor.execute( - """ - insert into TestVectors (IntCol, VectorFlexAllCol) - values(1, :value) - """, - value=json.dumps(value), - ) - - def type_handler(cursor, metadata): - if metadata.name == "VECTORFLEXALLCOL": - return cursor.var( - oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize - ) - - self.cursor.outputtypehandler = type_handler - - self.cursor.execute("select VectorFlexAllCol from TestVectors") - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(json.loads(fetched_value), value) - - def test_6428(self): - "6428 - insert vectors with flexible dimensions and conversion" - for dim in [30, 255, 256, 257, 32768, 65535]: - for source_type in ["f", "d", "b"]: - for target_type in ["f", "d", "b"]: - with self.subTest( - dim=dim, - source_type=source_type, - target_type=target_type, - ): - if target_type == "f": - target_col = "VectorFlex32Col" - elif target_type == "d": - target_col = "VectorFlex64Col" - else: - target_col = "VectorFlex8Col" - element_value = 4 if source_type == "b" else 2.5 - value = array.array(source_type, [element_value] * dim) - self.__test_insert_and_fetch( - value, target_col, target_type - ) - - @unittest.skip("awaiting database support") - def test_6429(self): - "6427 - insert and fetch large VECTOR data using strings" - value = [0.12345678925] * 35625 - self.cursor.execute("delete from TestVectors") - self.cursor.execute( - """ - insert into TestVectors (IntCol, VectorFlex64Col) - values(1, :value) - """, - value=json.dumps(value), - ) - - def type_handler(cursor, metadata): - if metadata.name == "VECTORFLEX64COL": - return cursor.var( - oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize - ) - - self.cursor.outputtypehandler = type_handler - - self.cursor.execute("select VectorFlex64Col from TestVectors") - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(json.loads(fetched_value), value) - - def test_6430(self): - "6430 - test binding a vector with inf values (negative)" - value = array.array( - "d", [float("inf") if i % 2 else float("-inf") for i in range(16)] - ) - with self.assertRaisesFullCode("ORA-51805"): - self.cursor.execute("select :1 from dual", [value]) - - def test_6431(self): - "6431 - test setting an invalid type to a vector" - var = self.cursor.var(oracledb.DB_TYPE_VECTOR) - self.assertRaises(TypeError, var.setvalue, 0, [[i] for i in range(16)]) - - def test_6432(self): - "6432 - fetch JSON value with an embedded vector" - self.cursor.execute( - """ - select json_object( - 'id' : 6432, - 'vector' : to_vector('[1, 2, 3]') - returning json - ) from dual - """ - ) - (result,) = self.cursor.fetchone() - expected_val = dict(id=6432, vector=array.array("f", [1, 2, 3])) - self.assertEqual(result, expected_val) - - def test_6433(self): - "6433 - bind JSON value with an embedded vector" - value = dict(id=6433, vector=array.array("d", [6433, 6433.25, 6433.5])) - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) - self.cursor.execute( - "insert into TestJson values (:1, :2)", [6433, value] - ) - self.conn.commit() - self.cursor.execute("select JsonCol from TestJson") - (fetched_val,) = self.cursor.fetchone() - self.assertEqual(fetched_val, value) - - def test_6434(self): - "6434 - executemany() without setinputsizes()" - self.cursor.execute("delete from TestVectors") - values = [array.array("f", [3.1416, 4]), [3.14159, 5]] - self.cursor.executemany( - """ - insert into TestVectors (IntCol, VectorFlexTypeCol) - values (:1, :2) - """, - list(enumerate(values)), - ) - self.cursor.execute( - "select VectorFlexTypeCol from TestVectors order by IntCol" - ) - expected_value = [ - (array.array("f", [3.1416, 4.0]),), - (array.array("d", [3.14159, 5.0]),), - ] - self.assertEqual(self.cursor.fetchall(), expected_value) - - def test_6435(self): - "6435 - executemany() with setinputsizes()" - self.cursor.execute("delete from TestVectors") - values = [[144, 1000], array.array("d", [66.0, 7.14])] - self.cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR) - self.cursor.executemany( - """ - insert into TestVectors (IntCol, VectorFlex64Col) - values (:1, :2) - """, - list(enumerate(values)), - ) - self.cursor.execute( - "select VectorFlex64Col from TestVectors order by IntCol" - ) - expected_value = [ - (array.array("d", [144.0, 1000.0]),), - (array.array("d", [66.0, 7.14]),), - ] - self.assertEqual(self.cursor.fetchall(), expected_value) - - def test_6436(self): - "6436 - vector with zero dimensions" - with self.assertRaisesFullCode("DPY-4031"): - self.cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) - self.cursor.execute("select :1", [[]]) - with self.assertRaisesFullCode("DPY-4031"): - self.cursor.execute("select :1", [array.array("d", [])]) - - def test_6437(self): - "6437 - insert a list vector into a flexible format column" - value = [1.5, 9.9] - self.__test_insert_and_fetch(value, "VectorFlexTypeCol", "d") - - def test_6438(self): - "6438 - insert a list vector into a flexible size column" - value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - - def test_6439(self): - "6439 - insert a list vector into a flexible float32 column" - value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "VectorFlex32Col", "f") - - def test_6440(self): - "6440 - insert a list vector into a flexible float64 column" - value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "VectorFlex64Col", "d") - - def test_6441(self): - "6441 - insert a list vector into a float32 column" - value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "Vector32Col", "f") - - def test_6442(self): - "6442 - insert a list vector into a float64 column" - value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "Vector64Col", "d") - - def test_6443(self): - "6443 - insert a list vector into a flexible int8 column" - value = [1, 9, 3, 8, 4, 7, 5, 6, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "VectorFlex8Col", "b") - - def test_6444(self): - "6444 - insert a list vector into an int8 column" - value = [1, 9, 3, 8, 4, 7, 5, 6, 0, 2, 6, 4, 5, 6, 7, 8] - self.__test_insert_and_fetch(value, "Vector8Col", "b") - - def test_6445(self): - "6445 - test setting a PL-SQL type to a float32 vector" - vec1 = array.array("f", [1, 1.5, 2, 2.5]) - vec2 = array.array("f", [4, 4.5, 5, 5.5]) - self.__test_plsql_insert_and_fetch(vec1, vec2, 6) - - vec3 = array.array("f", [3.5] * 65535) - vec4 = array.array("f", [2.5] * 65535) - self.__test_plsql_insert_and_fetch(vec3, vec4, 256) - - def test_6446(self): - "6446 - test setting a PL-SQL type to a float64 vector" - vec1 = array.array("d", [1, 1.5, 2, 2.5]) - vec2 = array.array("d", [4, 4.5, 5, 5.5]) - self.__test_plsql_insert_and_fetch(vec1, vec2, 6) - - vec3 = array.array("d", [3.5] * 65535) - vec4 = array.array("d", [2.5] * 65535) - self.__test_plsql_insert_and_fetch(vec3, vec4, 256) - - def test_6447(self): - "6447 - test setting a PL-SQL type to a int8 vector" - vec1 = array.array("b", [1, 2, 3, 4]) - vec2 = array.array("b", [5, 6, 7, 8]) - self.__test_plsql_insert_and_fetch(vec1, vec2, 8) - - vec3 = array.array("b", [3] * 65535) - vec4 = array.array("b", [2] * 65535) - self.__test_plsql_insert_and_fetch(vec3, vec4, 256) - - -if __name__ == "__main__": - test_env.run_test_cases() + cursor.outputtypehandler = type_handler + + cursor.execute("select VectorFlex64Col from TestVectors") + (fetched_value,) = cursor.fetchone() + assert json.loads(fetched_value) == value + + +def test_6430(cursor, test_env): + "6430 - test binding a vector with inf values (negative)" + value = array.array( + "d", [float("inf") if i % 2 else float("-inf") for i in range(16)] + ) + with test_env.assert_raises_full_code("ORA-51805"): + cursor.execute("select :1 from dual", [value]) + + +def test_6431(cursor): + "6431 - test setting an invalid type to a vector" + var = cursor.var(oracledb.DB_TYPE_VECTOR) + pytest.raises(TypeError, var.setvalue, 0, [[i] for i in range(16)]) + + +def test_6432(cursor): + "6432 - fetch JSON value with an embedded vector" + cursor.execute( + """ + select json_object( + 'id' : 6432, + 'vector' : to_vector('[1, 2, 3]') + returning json + ) from dual + """ + ) + (result,) = cursor.fetchone() + expected_val = dict(id=6432, vector=array.array("f", [1, 2, 3])) + assert result == expected_val + + +def test_6433(conn, cursor): + "6433 - bind JSON value with an embedded vector" + value = dict(id=6433, vector=array.array("d", [6433, 6433.25, 6433.5])) + cursor.execute("delete from TestJson") + cursor.setinputsizes(None, oracledb.DB_TYPE_JSON) + cursor.execute("insert into TestJson values (:1, :2)", [6433, value]) + conn.commit() + cursor.execute("select JsonCol from TestJson") + (fetched_val,) = cursor.fetchone() + assert fetched_val == value + + +def test_6434(cursor): + "6434 - executemany() without setinputsizes()" + cursor.execute("delete from TestVectors") + values = [array.array("f", [3.1416, 4]), [3.14159, 5]] + cursor.executemany( + """ + insert into TestVectors (IntCol, VectorFlexTypeCol) + values (:1, :2) + """, + list(enumerate(values)), + ) + cursor.execute("select VectorFlexTypeCol from TestVectors order by IntCol") + expected_value = [ + (array.array("f", [3.1416, 4.0]),), + (array.array("d", [3.14159, 5.0]),), + ] + assert cursor.fetchall() == expected_value + + +def test_6435(cursor): + "6435 - executemany() with setinputsizes()" + cursor.execute("delete from TestVectors") + values = [[144, 1000], array.array("d", [66.0, 7.14])] + cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR) + cursor.executemany( + """ + insert into TestVectors (IntCol, VectorFlex64Col) + values (:1, :2) + """, + list(enumerate(values)), + ) + cursor.execute("select VectorFlex64Col from TestVectors order by IntCol") + expected_value = [ + (array.array("d", [144.0, 1000.0]),), + (array.array("d", [66.0, 7.14]),), + ] + assert cursor.fetchall() == expected_value + + +def test_6436(cursor, test_env): + "6436 - vector with zero dimensions" + with test_env.assert_raises_full_code("DPY-4031"): + cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) + cursor.execute("select :1", [[]]) + with test_env.assert_raises_full_code("DPY-4031"): + cursor.execute("select :1", [array.array("d", [])]) + + +def test_6437(cursor): + "6437 - insert a list vector into a flexible format column" + value = [1.5, 9.9] + _test_insert_and_fetch(cursor, value, "VectorFlexTypeCol", "d") + + +def test_6438(cursor): + "6438 - insert a list vector into a flexible size column" + value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "d") + + +def test_6439(cursor): + "6439 - insert a list vector into a flexible float32 column" + value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "VectorFlex32Col", "f") + + +def test_6440(cursor): + "6440 - insert a list vector into a flexible float64 column" + value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "VectorFlex64Col", "d") + + +def test_6441(cursor): + "6441 - insert a list vector into a float32 column" + value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + + +def test_6442(cursor): + "6442 - insert a list vector into a float64 column" + value = [1.5, 9.9, 3, 8, 4.25, 7, 5, 6.125, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + + +def test_6443(cursor): + "6443 - insert a list vector into a flexible int8 column" + value = [1, 9, 3, 8, 4, 7, 5, 6, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "VectorFlex8Col", "b") + + +def test_6444(cursor): + "6444 - insert a list vector into an int8 column" + value = [1, 9, 3, 8, 4, 7, 5, 6, 0, 2, 6, 4, 5, 6, 7, 8] + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + + +def test_6445(cursor): + "6445 - test setting a PL-SQL type to a float32 vector" + vec1 = array.array("f", [1, 1.5, 2, 2.5]) + vec2 = array.array("f", [4, 4.5, 5, 5.5]) + _test_plsql_insert_and_fetch(cursor, vec1, vec2, 6) + + vec3 = array.array("f", [3.5] * 65535) + vec4 = array.array("f", [2.5] * 65535) + _test_plsql_insert_and_fetch(cursor, vec3, vec4, 256) + + +def test_6446(cursor): + "6446 - test setting a PL-SQL type to a float64 vector" + vec1 = array.array("d", [1, 1.5, 2, 2.5]) + vec2 = array.array("d", [4, 4.5, 5, 5.5]) + _test_plsql_insert_and_fetch(cursor, vec1, vec2, 6) + + vec3 = array.array("d", [3.5] * 65535) + vec4 = array.array("d", [2.5] * 65535) + _test_plsql_insert_and_fetch(cursor, vec3, vec4, 256) + + +def test_6447(cursor): + "6447 - test setting a PL-SQL type to a int8 vector" + vec1 = array.array("b", [1, 2, 3, 4]) + vec2 = array.array("b", [5, 6, 7, 8]) + _test_plsql_insert_and_fetch(cursor, vec1, vec2, 8) + + vec3 = array.array("b", [3] * 65535) + vec4 = array.array("b", [2] * 65535) + _test_plsql_insert_and_fetch(cursor, vec3, vec4, 256) diff --git a/tests/test_6500_vector_interop.py b/tests/test_6500_vector_interop.py index 5ccb6549..9d291846 100644 --- a/tests/test_6500_vector_interop.py +++ b/tests/test_6500_vector_interop.py @@ -27,221 +27,219 @@ """ import json -import unittest import oracledb -import test_env - - -@unittest.skipIf( - test_env.has_client_version(23, 4), - "client supports vectors directly", -) -@unittest.skipUnless(test_env.has_server_version(23, 4), "unsupported server") -class TestCase(test_env.BaseTestCase): - def test_6500(self): - "6500 - verify fetch information for older clients" - attr_names = ["name", "type_code", "is_json"] - expected_values = [ - ["INTCOL", oracledb.DB_TYPE_NUMBER, False], - ["VECTORFLEXALLCOL", oracledb.DB_TYPE_CLOB, True], - ["VECTORFLEXTYPECOL", oracledb.DB_TYPE_CLOB, True], - ["VECTORFLEX8COL", oracledb.DB_TYPE_CLOB, True], - ["VECTORFLEX32COL", oracledb.DB_TYPE_CLOB, True], - ["VECTORFLEX64COL", oracledb.DB_TYPE_CLOB, True], - ["VECTOR32COL", oracledb.DB_TYPE_CLOB, True], - ["VECTOR64COL", oracledb.DB_TYPE_CLOB, True], - ["VECTOR8COL", oracledb.DB_TYPE_CLOB, True], - ] - self.cursor.execute("select * from TestVectors") - values = [ - [getattr(i, n) for n in attr_names] - for i in self.cursor.description - ] - self.assertEqual(values, expected_values) - - def test_6501(self): - "6501 - verify default fetched value is a Python list" - expected_data = ( - 1, - [6501, 25.25, 18.125, -3.5], - [11, -12.5], - [-5.25, -1.75, 0, 18.375], - [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], - [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], - [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], - ) - self.cursor.execute("delete from TestVectors") - frag = ", ".join(f"'{d}'" for d in expected_data) - sql = f""" - insert into TestVectors - (IntCol, VectorFlexAllCol, VectorFlexTypeCol, - VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) - values ({frag})""" - self.cursor.execute(sql) - self.conn.commit() - self.cursor.execute( - """ - select - IntCol, - VectorFlexAllCol, - VectorFlexTypeCol, - VectorFlex64Col, - Vector32Col, - Vector64Col, - Vector8Col - from TestVectors - """ - ) - fetched_data = self.cursor.fetchone() - self.assertEqual(fetched_data, expected_data) - - def test_6502(self): - "6502 - verify fetched value as intermediate long value" - expected_data = ( - 1, - [6501, 25.25, 18.125, -3.5], - [11, -12.5], - [-5.25, -1.75, 0, 18.375], - [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], - [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], - [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], - ) - self.cursor.execute("delete from TestVectors") - frag = ", ".join(f"'{d}'" for d in expected_data) - sql = f""" - insert into TestVectors - (IntCol, VectorFlexAllCol, VectorFlexTypeCol, - VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) - values ({frag})""" - self.cursor.execute(sql) - self.conn.commit() - executions = [0] - - def type_handler(cursor, fetch_info): - executions[0] += 1 - if fetch_info.type_code is oracledb.DB_TYPE_CLOB: - return cursor.var( - oracledb.DB_TYPE_LONG, - arraysize=cursor.arraysize, - outconverter=lambda x: json.loads(x), - ) - - self.cursor.outputtypehandler = type_handler - self.cursor.execute( - """ - select - IntCol, - VectorFlexAllCol, - VectorFlexTypeCol, - VectorFlex64Col, - Vector32Col, - Vector64Col, - Vector8Col - from TestVectors - """ - ) - fetched_data = self.cursor.fetchone() - self.assertEqual(fetched_data, expected_data) - self.assertEqual(executions[0], 7) - - def test_6503(self): - "6503 - verify fetched value as intermediate string value" - expected_data = ( - 1, - [6501, 25.25, 18.125, -3.5], - [11, -12.5], - [-5.25, -1.75, 0, 18.375], - [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], - [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], - [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], - ) - self.cursor.execute("delete from TestVectors") - frag = ", ".join(f"'{d}'" for d in expected_data) - sql = f""" - insert into TestVectors - (IntCol, VectorFlexAllCol, VectorFlexTypeCol, - VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) - values ({frag})""" - self.cursor.execute(sql) - self.conn.commit() - executions = [0] - - def type_handler(cursor, fetch_info): - executions[0] += 1 - if fetch_info.type_code is oracledb.DB_TYPE_CLOB: - return cursor.var( - oracledb.DB_TYPE_VARCHAR, - arraysize=cursor.arraysize, - outconverter=lambda x: json.loads(x), - ) - - self.cursor.outputtypehandler = type_handler - self.cursor.execute( - """ - select - IntCol, - VectorFlexAllCol, - VectorFlexTypeCol, - VectorFlex64Col, - Vector32Col, - Vector64Col, - Vector8Col - from TestVectors - """ - ) - fetched_data = self.cursor.fetchone() - self.assertEqual(fetched_data, expected_data) - self.assertEqual(executions[0], 7) - - @unittest.skip("awaiting database support") - def test_6504(self): - "6502 - verify fetching large vector as intermediate long value" - num_dimensions = 35655 - expected_data = ( - 1, - [4] * num_dimensions, - [12.5] * num_dimensions, - [128.625] * num_dimensions, - ) - self.cursor.execute("delete from TestVectors") - sql = """ - insert into TestVectors - (IntCol, VectorFlex8Col, VectorFlex32Col, VectorFlex64Col) - values (:1, :2, :3, :4)""" - bind_data = [ - expected_data[0], - json.dumps(expected_data[1]), - json.dumps(expected_data[2]), - json.dumps(expected_data[3]), - ] - self.cursor.execute(sql, bind_data) - self.conn.commit() - executions = [0] - - def type_handler(cursor, fetch_info): - executions[0] += 1 - if fetch_info.type_code is oracledb.DB_TYPE_CLOB: - return cursor.var( - oracledb.DB_TYPE_LONG, - arraysize=cursor.arraysize, - outconverter=lambda x: json.loads(x), - ) - - self.cursor.outputtypehandler = type_handler - self.cursor.execute( - """" - select - IntCol, - VectorFlex8Col, - VectorFlex32Col, - VectorFlex64Col - from TestVectors""" - ) - fetched_data = self.cursor.fetchone() - self.assertEqual(fetched_data, expected_data) - self.assertEqual(executions[0], 7) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(test_env): + if test_env.has_client_version(23, 4): + pytest.skip("client supports vectors directly") + if not test_env.has_server_version(23, 4): + pytest.skip("unsupported server") + + +def test_6500(cursor): + "6500 - verify fetch information for older clients" + attr_names = ["name", "type_code", "is_json"] + expected_values = [ + ["INTCOL", oracledb.DB_TYPE_NUMBER, False], + ["VECTORFLEXALLCOL", oracledb.DB_TYPE_CLOB, True], + ["VECTORFLEXTYPECOL", oracledb.DB_TYPE_CLOB, True], + ["VECTORFLEX8COL", oracledb.DB_TYPE_CLOB, True], + ["VECTORFLEX32COL", oracledb.DB_TYPE_CLOB, True], + ["VECTORFLEX64COL", oracledb.DB_TYPE_CLOB, True], + ["VECTOR32COL", oracledb.DB_TYPE_CLOB, True], + ["VECTOR64COL", oracledb.DB_TYPE_CLOB, True], + ["VECTOR8COL", oracledb.DB_TYPE_CLOB, True], + ] + cursor.execute("select * from TestVectors") + values = [[getattr(i, n) for n in attr_names] for i in cursor.description] + assert values == expected_values + + +def test_6501(conn, cursor): + "6501 - verify default fetched value is a Python list" + expected_data = ( + 1, + [6501, 25.25, 18.125, -3.5], + [11, -12.5], + [-5.25, -1.75, 0, 18.375], + [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], + [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], + [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], + ) + cursor.execute("delete from TestVectors") + frag = ", ".join(f"'{d}'" for d in expected_data) + sql = f""" + insert into TestVectors + (IntCol, VectorFlexAllCol, VectorFlexTypeCol, + VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) + values ({frag})""" + cursor.execute(sql) + conn.commit() + cursor.execute( + """ + select + IntCol, + VectorFlexAllCol, + VectorFlexTypeCol, + VectorFlex64Col, + Vector32Col, + Vector64Col, + Vector8Col + from TestVectors + """ + ) + fetched_data = cursor.fetchone() + assert fetched_data == expected_data + + +def test_6502(conn, cursor): + "6502 - verify fetched value as intermediate long value" + expected_data = ( + 1, + [6501, 25.25, 18.125, -3.5], + [11, -12.5], + [-5.25, -1.75, 0, 18.375], + [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], + [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], + [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], + ) + cursor.execute("delete from TestVectors") + frag = ", ".join(f"'{d}'" for d in expected_data) + sql = f""" + insert into TestVectors + (IntCol, VectorFlexAllCol, VectorFlexTypeCol, + VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) + values ({frag})""" + cursor.execute(sql) + conn.commit() + executions = [0] + + def type_handler(cursor, fetch_info): + executions[0] += 1 + if fetch_info.type_code is oracledb.DB_TYPE_CLOB: + return cursor.var( + oracledb.DB_TYPE_LONG, + arraysize=cursor.arraysize, + outconverter=lambda x: json.loads(x), + ) + + cursor.outputtypehandler = type_handler + cursor.execute( + """ + select + IntCol, + VectorFlexAllCol, + VectorFlexTypeCol, + VectorFlex64Col, + Vector32Col, + Vector64Col, + Vector8Col + from TestVectors + """ + ) + fetched_data = cursor.fetchone() + assert fetched_data == expected_data + assert executions[0] == 7 + + +def test_6503(conn, cursor): + "6503 - verify fetched value as intermediate string value" + expected_data = ( + 1, + [6501, 25.25, 18.125, -3.5], + [11, -12.5], + [-5.25, -1.75, 0, 18.375], + [-1, 1, -2, 2, -3, 3, -4, 4, -5, 5], + [-10, 10, -20, 20, -30, 30, -40, 40, -50, 50], + [-5, 5, -10, 10, -15, 15, -20, 20, -25, 25], + ) + cursor.execute("delete from TestVectors") + frag = ", ".join(f"'{d}'" for d in expected_data) + sql = f""" + insert into TestVectors + (IntCol, VectorFlexAllCol, VectorFlexTypeCol, + VectorFlex64Col, Vector32Col, Vector64Col, Vector8Col) + values ({frag})""" + cursor.execute(sql) + conn.commit() + executions = [0] + + def type_handler(cursor, fetch_info): + executions[0] += 1 + if fetch_info.type_code is oracledb.DB_TYPE_CLOB: + return cursor.var( + oracledb.DB_TYPE_VARCHAR, + arraysize=cursor.arraysize, + outconverter=lambda x: json.loads(x), + ) + + cursor.outputtypehandler = type_handler + cursor.execute( + """ + select + IntCol, + VectorFlexAllCol, + VectorFlexTypeCol, + VectorFlex64Col, + Vector32Col, + Vector64Col, + Vector8Col + from TestVectors + """ + ) + fetched_data = cursor.fetchone() + assert fetched_data == expected_data + assert executions[0] == 7 + + +@pytest.mark.skip("awaiting database support") +def test_6504(conn, cursor): + "6502 - verify fetching large vector as intermediate long value" + num_dimensions = 35655 + expected_data = ( + 1, + [4] * num_dimensions, + [12.5] * num_dimensions, + [128.625] * num_dimensions, + ) + cursor.execute("delete from TestVectors") + sql = """ + insert into TestVectors + (IntCol, VectorFlex8Col, VectorFlex32Col, VectorFlex64Col) + values (:1, :2, :3, :4)""" + bind_data = [ + expected_data[0], + json.dumps(expected_data[1]), + json.dumps(expected_data[2]), + json.dumps(expected_data[3]), + ] + cursor.execute(sql, bind_data) + conn.commit() + executions = [0] + + def type_handler(cursor, fetch_info): + executions[0] += 1 + if fetch_info.type_code is oracledb.DB_TYPE_CLOB: + return cursor.var( + oracledb.DB_TYPE_LONG, + arraysize=cursor.arraysize, + outconverter=lambda x: json.loads(x), + ) + + cursor.outputtypehandler = type_handler + cursor.execute( + """" + select + IntCol, + VectorFlex8Col, + VectorFlex32Col, + VectorFlex64Col + from TestVectors""" + ) + fetched_data = cursor.fetchone() + assert fetched_data == expected_data + assert executions[0] == 7 diff --git a/tests/test_6600_defaults.py b/tests/test_6600_defaults.py index e9078a19..f7c94cf2 100644 --- a/tests/test_6600_defaults.py +++ b/tests/test_6600_defaults.py @@ -31,201 +31,200 @@ import tempfile import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def __verify_network_name_attr(self, name): - """ - Verify that a default attribute is handled properly in both valid - and invalid cases. - """ - for value, ok in [ - ("valid_value", True), - ("'contains_quotes'", False), - ('"contains_double_quotes"', False), - ("contains_opening_paren (", False), - ("contains_closing_paren )", False), - ("contains_equals =", False), - ("contains_trailing_slash\\", False), - ]: - args = {} - args[name] = value - if ok: - with test_env.DefaultsContextManager(name, value): - cp = oracledb.ConnectParams(**args) - self.assertEqual(getattr(cp, name), value) - else: - with self.assertRaisesFullCode("DPY-3029"): - with test_env.DefaultsContextManager(name, value): - pass - - def test_6600(self): - "6600 - test setting defaults.arraysize" - with test_env.DefaultsContextManager("arraysize", 50): - conn = test_env.get_connection() - cursor = conn.cursor() - self.assertEqual(cursor.arraysize, oracledb.defaults.arraysize) - - def test_6601(self): - "6601 - test getting decimals with defaults.fetch_decimals=True" - with test_env.DefaultsContextManager("fetch_decimals", True): - self.cursor.execute("select 9 from dual") - (result,) = self.cursor.fetchone() - self.assertIsInstance(result, decimal.Decimal) - - def test_6602(self): - "6602 - test getting string lob with defaults.fetch_lobs=False" - with test_env.DefaultsContextManager("fetch_lobs", False): - self.cursor.execute("select to_clob('Hello world') from dual") - (result,) = self.cursor.fetchone() - self.assertIsInstance(result, str) - - def test_6603(self): - "6603 - test setting defaults.prefetchrows" - with test_env.DefaultsContextManager("prefetchrows", 20): - conn = test_env.get_connection() - cursor = conn.cursor() - self.assertEqual( - cursor.prefetchrows, oracledb.defaults.prefetchrows - ) - - def test_6604(self): - "6604 - test setting defaults.stmtcachesize (pool)" - new_stmtcachesize = 15 - with test_env.DefaultsContextManager("stmtcachesize", 40): - pool = test_env.get_pool() - self.assertEqual( - pool.stmtcachesize, oracledb.defaults.stmtcachesize - ) - conn = pool.acquire() - self.assertEqual( - conn.stmtcachesize, oracledb.defaults.stmtcachesize - ) - pool = test_env.get_pool(stmtcachesize=new_stmtcachesize) - self.assertEqual(pool.stmtcachesize, new_stmtcachesize) - conn = pool.acquire() - self.assertEqual(conn.stmtcachesize, new_stmtcachesize) - - def test_6605(self): - "6605 - test setting defaults.stmtcachesize (standalone connection)" - new_stmtcachesize = 25 - with test_env.DefaultsContextManager("stmtcachesize", 50): - conn = test_env.get_connection() - self.assertEqual( - conn.stmtcachesize, oracledb.defaults.stmtcachesize - ) - conn = test_env.get_connection(stmtcachesize=new_stmtcachesize) - self.assertEqual(conn.stmtcachesize, new_stmtcachesize) - - def test_6606(self): - "6606 - fetch_lobs does not affect LOBS returned as OUT binds" - with test_env.DefaultsContextManager("fetch_lobs", False): - var = self.cursor.var(oracledb.DB_TYPE_CLOB) - self.cursor.execute( - "begin :value := to_clob('test clob'); end;", - value=var, - ) - self.assertIsInstance(var.getvalue(), oracledb.LOB) - - def test_6607(self): - "6607 - test setting defaults.config_dir" - with tempfile.TemporaryDirectory() as temp_dir: - new_temp_dir = os.path.join(temp_dir, "subdir") - with test_env.DefaultsContextManager("config_dir", temp_dir): - self.assertEqual(oracledb.defaults.config_dir, temp_dir) - params = oracledb.ConnectParams() - self.assertEqual(params.config_dir, temp_dir) - params = oracledb.ConnectParams(config_dir=new_temp_dir) - self.assertEqual(params.config_dir, new_temp_dir) - - def test_6608(self): - "6608 - test setting defaults.stmtcachesize (ConnectParams)" - new_stmtcachesize = 35 - with test_env.DefaultsContextManager("stmtcachesize", 60): + + +def _verify_network_name_attr(test_env, name): + """ + Verify that a default attribute is handled properly in both valid + and invalid cases. + """ + for value, ok in [ + ("valid_value", True), + ("'contains_quotes'", False), + ('"contains_double_quotes"', False), + ("contains_opening_paren (", False), + ("contains_closing_paren )", False), + ("contains_equals =", False), + ("contains_trailing_slash\\", False), + ]: + args = {} + args[name] = value + if ok: + with test_env.defaults_context_manager(name, value): + cp = oracledb.ConnectParams(**args) + assert getattr(cp, name) == value + else: + with test_env.assert_raises_full_code("DPY-3029"): + with test_env.defaults_context_manager(name, value): + pass + + +def test_6600(test_env): + "6600 - test setting defaults.arraysize" + with test_env.defaults_context_manager("arraysize", 50): + conn = test_env.get_connection() + cursor = conn.cursor() + assert cursor.arraysize == oracledb.defaults.arraysize + + +def test_6601(cursor, test_env): + "6601 - test getting decimals with defaults.fetch_decimals=True" + with test_env.defaults_context_manager("fetch_decimals", True): + cursor.execute("select 9 from dual") + (result,) = cursor.fetchone() + assert isinstance(result, decimal.Decimal) + + +def test_6602(cursor, test_env): + "6602 - test getting string lob with defaults.fetch_lobs=False" + with test_env.defaults_context_manager("fetch_lobs", False): + cursor.execute("select to_clob('Hello world') from dual") + (result,) = cursor.fetchone() + assert isinstance(result, str) + + +def test_6603(test_env): + "6603 - test setting defaults.prefetchrows" + with test_env.defaults_context_manager("prefetchrows", 20): + conn = test_env.get_connection() + cursor = conn.cursor() + assert cursor.prefetchrows == oracledb.defaults.prefetchrows + + +def test_6604(test_env): + "6604 - test setting defaults.stmtcachesize (pool)" + new_stmtcachesize = 15 + with test_env.defaults_context_manager("stmtcachesize", 40): + pool = test_env.get_pool() + assert pool.stmtcachesize == oracledb.defaults.stmtcachesize + conn = pool.acquire() + assert conn.stmtcachesize == oracledb.defaults.stmtcachesize + pool = test_env.get_pool(stmtcachesize=new_stmtcachesize) + assert pool.stmtcachesize == new_stmtcachesize + conn = pool.acquire() + assert conn.stmtcachesize == new_stmtcachesize + + +def test_6605(test_env): + "6605 - test setting defaults.stmtcachesize (standalone connection)" + new_stmtcachesize = 25 + with test_env.defaults_context_manager("stmtcachesize", 50): + conn = test_env.get_connection() + assert conn.stmtcachesize == oracledb.defaults.stmtcachesize + conn = test_env.get_connection(stmtcachesize=new_stmtcachesize) + assert conn.stmtcachesize == new_stmtcachesize + + +def test_6606(cursor, test_env): + "6606 - fetch_lobs does not affect LOBS returned as OUT binds" + with test_env.defaults_context_manager("fetch_lobs", False): + var = cursor.var(oracledb.DB_TYPE_CLOB) + cursor.execute( + "begin :value := to_clob('test clob'); end;", + value=var, + ) + assert isinstance(var.getvalue(), oracledb.LOB) + + +def test_6607(test_env): + "6607 - test setting defaults.config_dir" + with tempfile.TemporaryDirectory() as temp_dir: + new_temp_dir = os.path.join(temp_dir, "subdir") + with test_env.defaults_context_manager("config_dir", temp_dir): + assert oracledb.defaults.config_dir == temp_dir params = oracledb.ConnectParams() - self.assertEqual( - params.stmtcachesize, oracledb.defaults.stmtcachesize - ) - params = oracledb.ConnectParams(stmtcachesize=new_stmtcachesize) - self.assertEqual(params.stmtcachesize, new_stmtcachesize) - - def test_6609(self): - "6609 - test defaults.stmtcachesize persists after setting it again" - value = 50 - new_value = 29 - with test_env.DefaultsContextManager("stmtcachesize", value): + assert params.config_dir == temp_dir + params = oracledb.ConnectParams(config_dir=new_temp_dir) + assert params.config_dir == new_temp_dir + + +def test_6608(test_env): + "6608 - test setting defaults.stmtcachesize (ConnectParams)" + new_stmtcachesize = 35 + with test_env.defaults_context_manager("stmtcachesize", 60): + params = oracledb.ConnectParams() + assert params.stmtcachesize == oracledb.defaults.stmtcachesize + params = oracledb.ConnectParams(stmtcachesize=new_stmtcachesize) + assert params.stmtcachesize == new_stmtcachesize + + +def test_6609(test_env): + "6609 - test defaults.stmtcachesize persists after setting it again" + value = 50 + new_value = 29 + with test_env.defaults_context_manager("stmtcachesize", value): + pool = test_env.get_pool() + pooled_conn = pool.acquire() + params = oracledb.ConnectParams() + standalone_conn = test_env.get_connection() + with test_env.defaults_context_manager("stmtcachesize", new_value): + assert pool.stmtcachesize == value + assert pooled_conn.stmtcachesize == value + assert params.stmtcachesize == value + assert standalone_conn.stmtcachesize == value pool = test_env.get_pool() pooled_conn = pool.acquire() params = oracledb.ConnectParams() standalone_conn = test_env.get_connection() - with test_env.DefaultsContextManager("stmtcachesize", new_value): - self.assertEqual(pool.stmtcachesize, value) - self.assertEqual(pooled_conn.stmtcachesize, value) - self.assertEqual(params.stmtcachesize, value) - self.assertEqual(standalone_conn.stmtcachesize, value) - pool = test_env.get_pool() - pooled_conn = pool.acquire() - params = oracledb.ConnectParams() - standalone_conn = test_env.get_connection() - self.assertEqual(pool.stmtcachesize, new_value) - self.assertEqual(pooled_conn.stmtcachesize, new_value) - self.assertEqual(params.stmtcachesize, new_value) - self.assertEqual(standalone_conn.stmtcachesize, new_value) - - def test_6610(self): - "6610 - test setting defaults.terminal" - with test_env.DefaultsContextManager("terminal", "newterminal"): - params = oracledb.ConnectParams() - self.assertEqual(params.terminal, oracledb.defaults.terminal) - - def test_6611(self): - "6611 - test setting defaults.driver_name" - with test_env.DefaultsContextManager("driver_name", "newdriver"): - params = oracledb.ConnectParams() - self.assertEqual(params.driver_name, oracledb.defaults.driver_name) - - def test_6612(self): - "6612 - test setting defaults.program attribute" - self.__verify_network_name_attr("program") - - def test_6613(self): - "6613 - test setting defaults.machine attribute" - self.__verify_network_name_attr("machine") - - def test_6614(self): - "6614 - test setting defaults.osuser attribute" - self.__verify_network_name_attr("osuser") - - @test_env.skip_unless_thin_mode() - def test_6615(self): - "6615 - test program with two pools" - default_value = "defaultprogram" - new_value = "newprogram" - verify_sql = ( - "select program from v$session " - "where sid = sys_context('userenv', 'sid')" - ) - with test_env.DefaultsContextManager("program", default_value): - - # create pool using default value - pool = test_env.get_pool() - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute(verify_sql) - (fetched_value,) = cursor.fetchone() - self.assertEqual(fetched_value, default_value) - pool.close() - - # create pool using new value - pool = test_env.get_pool(program=new_value) - with pool.acquire() as conn: - with conn.cursor() as cursor: - cursor.execute(verify_sql) - (fetched_value,) = cursor.fetchone() - self.assertEqual(fetched_value, new_value) - pool.close() - - -if __name__ == "__main__": - test_env.run_test_cases() + assert pool.stmtcachesize == new_value + assert pooled_conn.stmtcachesize == new_value + assert params.stmtcachesize == new_value + assert standalone_conn.stmtcachesize == new_value + + +def test_6610(test_env): + "6610 - test setting defaults.terminal" + with test_env.defaults_context_manager("terminal", "newterminal"): + params = oracledb.ConnectParams() + assert params.terminal == oracledb.defaults.terminal + + +def test_6611(test_env): + "6611 - test setting defaults.driver_name" + with test_env.defaults_context_manager("driver_name", "newdriver"): + params = oracledb.ConnectParams() + assert params.driver_name == oracledb.defaults.driver_name + + +def test_6612(test_env): + "6612 - test setting defaults.program attribute" + _verify_network_name_attr(test_env, "program") + + +def test_6613(test_env): + "6613 - test setting defaults.machine attribute" + _verify_network_name_attr(test_env, "machine") + + +def test_6614(test_env): + "6614 - test setting defaults.osuser attribute" + _verify_network_name_attr(test_env, "osuser") + + +def test_6615(skip_unless_thin_mode, test_env): + "6615 - test program with two pools" + default_value = "defaultprogram" + new_value = "newprogram" + verify_sql = ( + "select program from v$session " + "where sid = sys_context('userenv', 'sid')" + ) + with test_env.defaults_context_manager("program", default_value): + + # create pool using default value + pool = test_env.get_pool() + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute(verify_sql) + (fetched_value,) = cursor.fetchone() + assert fetched_value == default_value + pool.close() + + # create pool using new value + pool = test_env.get_pool(program=new_value) + with pool.acquire() as conn: + with conn.cursor() as cursor: + cursor.execute(verify_sql) + (fetched_value,) = cursor.fetchone() + assert fetched_value == new_value + pool.close() diff --git a/tests/test_6700_json_23.py b/tests/test_6700_json_23.py index ef3365d7..62766960 100644 --- a/tests/test_6700_json_23.py +++ b/tests/test_6700_json_23.py @@ -29,151 +29,165 @@ import json import oracledb -import test_env - - -@test_env.skip_unless_native_json_extensions_supported() -class TestCase(test_env.BaseTestCase): - def __test_fetch_json(self, value, table_name="TestJson"): - """ - Tests fetching JSON encoded by the database. - """ - self.cursor.execute(f"delete from {table_name}") - self.cursor.execute( - f"insert into {table_name} values (1, :1)", [json.dumps(value)] - ) - self.cursor.execute(f"select JsonCol from {table_name}") - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value, value) - - def __test_round_trip_json(self, value): - """ - Tests fetching JSON encoded by the driver. - """ - self.cursor.execute("delete from TestJson") - self.cursor.setinputsizes(oracledb.DB_TYPE_JSON) - self.cursor.execute("insert into TestJson values (1, :1)", [value]) - self.cursor.execute("select JsonCol from TestJson") - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value, value) - - def test_6700(self): - "6700 - fetch JSON with a field name greater than 255 bytes" - fname_long = "A" * 256 - value = {} - value[fname_long] = 6700 - self.__test_fetch_json(value) - - def test_6701(self): - "6701 - fetch JSON with field names greater and less than 255 bytes" - fname_short = "short_name" - fname_long = "A" * 256 - value = {} - value[fname_short] = "Short name" - value[fname_long] = 6701 - self.__test_fetch_json(value) - - def test_6702(self): - "6702 - fetch JSON with many field names greater than 255 bytes" - value = {} - for i in range(26): - for j in range(26): - fname = chr(i + ord("A")) + chr(j + ord("A")) + "X" * 254 - value[fname] = 12.25 - self.__test_fetch_json(value) - - def test_6703(self): - "6703 - fetch JSON with many field names (large and small)" - value = {} - for i in range(26): - for j in range(26): - short_name = chr(i + ord("A")) + chr(j + ord("A")) - value[short_name] = 6.75 - long_name = short_name + "X" * 254 - value[long_name] = 12.25 - self.__test_fetch_json(value) - - def test_6704(self): - "6704 - fetch JSON with many field names (one large and many small)" - value = {} - long_name = "B" * 256 - value[long_name] = 6704 - for i in range(26): - for j in range(26): - short_name = chr(i + ord("A")) + chr(j + ord("A")) - value[short_name] = 8.625 - self.__test_fetch_json(value) - - def test_6705(self): - "6705 - round trip JSON with a field name greater than 255 bytes" - fname_long = "A" * 256 - value = {} - value[fname_long] = 6705 - self.__test_round_trip_json(value) - - def test_6706(self): - "6706 - round trip JSON with field names (small and large)" - fname_short = "short_name" - fname_long = "A" * 256 - value = {} - value[fname_short] = "Short name" - value[fname_long] = 6706 - self.__test_round_trip_json(value) - - def test_6707(self): - "6707 - round trip JSON with many field names greater than 255 bytes" - value = {} - for i in range(26): - for j in range(26): - fname = chr(i + ord("A")) + chr(j + ord("A")) + "X" * 254 - value[fname] = 12.25 - self.__test_round_trip_json(value) - - def test_6708(self): - "6708 - round trip JSON with many field names (large and small)" - value = {} - for i in range(26): - for j in range(26): - short_name = chr(i + ord("A")) + chr(j + ord("A")) - value[short_name] = 6.75 - long_name = short_name + "X" * 254 - value[long_name] = 12.25 - self.__test_round_trip_json(value) - - def test_6709(self): - "6709 - round trip JSON with many field names (1 large and many small)" - value = {} - long_name = "B" * 256 - value[long_name] = 6704 - for i in range(26): - for j in range(26): - short_name = chr(i + ord("A")) + chr(j + ord("A")) - value[short_name] = 8.625 - self.__test_round_trip_json(value) - - def test_6710(self): - "6710 - fetch JSON with relative offsets" - value = {} - fname_long = "C" * 256 - value[fname_long] = 6710 - value["num_list"] = [1.5, 2.25, 3.75, 5.5] - value["str_list"] = ["string 1", "string 2"] - self.__test_fetch_json(value, "TestCompressedJson") - - def test_6711(self): - "6711 - fetch JSON with relative offsets and shared fields and values" - value = [] - for i in range(15): - value.append(dict(a=6711, b="String Value")) - self.__test_fetch_json(value, "TestCompressedJson") - - def test_6712(self): - "6712 - fetch JSON with relative offsets and shared fields, not values" - value = [] - for i in range(15): - value.append(dict(a=6711 + i, b=f"String Value {i}")) - self.__test_fetch_json(value, "TestCompressedJson") - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(test_env): + if not test_env.has_client_and_server_version(23): + pytest.skip("no native JSON extensions support") + + +def _test_fetch_json(cursor, value, table_name="TestJson"): + """ + Tests fetching JSON encoded by the database. + """ + cursor.execute(f"delete from {table_name}") + cursor.execute( + f"insert into {table_name} values (1, :1)", [json.dumps(value)] + ) + cursor.execute(f"select JsonCol from {table_name}") + (fetched_value,) = cursor.fetchone() + assert fetched_value == value + + +def _test_round_trip_json(cursor, value): + """ + Tests fetching JSON encoded by the driver. + """ + cursor.execute("delete from TestJson") + cursor.setinputsizes(oracledb.DB_TYPE_JSON) + cursor.execute("insert into TestJson values (1, :1)", [value]) + cursor.execute("select JsonCol from TestJson") + (fetched_value,) = cursor.fetchone() + assert fetched_value == value + + +def test_6700(cursor): + "6700 - fetch JSON with a field name greater than 255 bytes" + fname_long = "A" * 256 + value = {} + value[fname_long] = 6700 + _test_fetch_json(cursor, value) + + +def test_6701(cursor): + "6701 - fetch JSON with field names greater and less than 255 bytes" + fname_short = "short_name" + fname_long = "A" * 256 + value = {} + value[fname_short] = "Short name" + value[fname_long] = 6701 + _test_fetch_json(cursor, value) + + +def test_6702(cursor): + "6702 - fetch JSON with many field names greater than 255 bytes" + value = {} + for i in range(26): + for j in range(26): + fname = chr(i + ord("A")) + chr(j + ord("A")) + "X" * 254 + value[fname] = 12.25 + _test_fetch_json(cursor, value) + + +def test_6703(cursor): + "6703 - fetch JSON with many field names (large and small)" + value = {} + for i in range(26): + for j in range(26): + short_name = chr(i + ord("A")) + chr(j + ord("A")) + value[short_name] = 6.75 + long_name = short_name + "X" * 254 + value[long_name] = 12.25 + _test_fetch_json(cursor, value) + + +def test_6704(cursor): + "6704 - fetch JSON with many field names (one large and many small)" + value = {} + long_name = "B" * 256 + value[long_name] = 6704 + for i in range(26): + for j in range(26): + short_name = chr(i + ord("A")) + chr(j + ord("A")) + value[short_name] = 8.625 + _test_fetch_json(cursor, value) + + +def test_6705(cursor): + "6705 - round trip JSON with a field name greater than 255 bytes" + fname_long = "A" * 256 + value = {} + value[fname_long] = 6705 + _test_round_trip_json(cursor, value) + + +def test_6706(cursor): + "6706 - round trip JSON with field names (small and large)" + fname_short = "short_name" + fname_long = "A" * 256 + value = {} + value[fname_short] = "Short name" + value[fname_long] = 6706 + _test_round_trip_json(cursor, value) + + +def test_6707(cursor): + "6707 - round trip JSON with many field names greater than 255 bytes" + value = {} + for i in range(26): + for j in range(26): + fname = chr(i + ord("A")) + chr(j + ord("A")) + "X" * 254 + value[fname] = 12.25 + _test_round_trip_json(cursor, value) + + +def test_6708(cursor): + "6708 - round trip JSON with many field names (large and small)" + value = {} + for i in range(26): + for j in range(26): + short_name = chr(i + ord("A")) + chr(j + ord("A")) + value[short_name] = 6.75 + long_name = short_name + "X" * 254 + value[long_name] = 12.25 + _test_round_trip_json(cursor, value) + + +def test_6709(cursor): + "6709 - round trip JSON with many field names (1 large and many small)" + value = {} + long_name = "B" * 256 + value[long_name] = 6704 + for i in range(26): + for j in range(26): + short_name = chr(i + ord("A")) + chr(j + ord("A")) + value[short_name] = 8.625 + _test_round_trip_json(cursor, value) + + +def test_6710(cursor): + "6710 - fetch JSON with relative offsets" + value = {} + fname_long = "C" * 256 + value[fname_long] = 6710 + value["num_list"] = [1.5, 2.25, 3.75, 5.5] + value["str_list"] = ["string 1", "string 2"] + _test_fetch_json(cursor, value, "TestCompressedJson") + + +def test_6711(cursor): + "6711 - fetch JSON with relative offsets and shared fields and values" + value = [] + for i in range(15): + value.append(dict(a=6711, b="String Value")) + _test_fetch_json(cursor, value, "TestCompressedJson") + + +def test_6712(cursor): + "6712 - fetch JSON with relative offsets and shared fields, not values" + value = [] + for i in range(15): + value.append(dict(a=6711 + i, b=f"String Value {i}")) + _test_fetch_json(cursor, value, "TestCompressedJson") diff --git a/tests/test_6800_error_async.py b/tests/test_6800_error_async.py index 79a765c1..79e8f904 100644 --- a/tests/test_6800_error_async.py +++ b/tests/test_6800_error_async.py @@ -29,195 +29,200 @@ import pickle import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_6800(self): - "6800 - test parse error returns offset correctly" - with self.assertRaises(oracledb.Error) as cm: - await self.cursor.execute("begin t_Missing := 5; end;") - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.full_code, "ORA-06550") - self.assertEqual(error_obj.offset, 6) - - async def test_6801(self): - "6801 - test picking/unpickling an error object" - with self.assertRaises(oracledb.Error) as cm: - await self.cursor.execute( - """ - begin - raise_application_error(-20101, 'Test!'); - end; - """ - ) - (error_obj,) = cm.exception.args - self.assertIsInstance(error_obj, oracledb._Error) - self.assertIn("Test!", error_obj.message) - self.assertEqual(error_obj.code, 20101) - self.assertEqual(error_obj.offset, 0) - self.assertIsInstance(error_obj.isrecoverable, bool) - self.assertFalse(error_obj.isrecoverable) - new_error_obj = pickle.loads(pickle.dumps(error_obj)) - self.assertIsInstance(new_error_obj, oracledb._Error) - self.assertEqual(new_error_obj.message, error_obj.message) - self.assertEqual(new_error_obj.code, error_obj.code) - self.assertEqual(new_error_obj.offset, error_obj.offset) - self.assertEqual(new_error_obj.context, error_obj.context) - self.assertEqual(new_error_obj.isrecoverable, error_obj.isrecoverable) - - async def test_6802(self): - "6802 - test generation of full_code for ORA, DPI and DPY errors" - cursor = self.conn.cursor() - with self.assertRaises(oracledb.Error) as cm: - await cursor.execute(None) - (error_obj,) = cm.exception.args - self.assertEqual(error_obj.full_code, "DPY-2001") - - async def test_6803(self): - "6803 - test generation of error help portal URL" - cursor = self.conn.cursor() - with self.assertRaises(oracledb.Error) as cm: - await cursor.execute("select 1 / 0 from dual") - (error_obj,) = cm.exception.args - to_check = "Help: https://docs.oracle.com/error-help/db/ora-01476/" - self.assertIn(to_check, error_obj.message) - - async def test_6804(self): - "6804 - verify warning is generated when creating a procedure" - proc_name = "bad_proc_1704" - self.assertIsNone(self.cursor.warning) - await self.cursor.execute( - f""" - create or replace procedure {proc_name} as - begin - null - end; - """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - await self.cursor.execute( - f""" - create or replace procedure {proc_name} as - begin - null; - end; - """ - ) - self.assertIsNone(self.cursor.warning) - await self.cursor.execute(f"drop procedure {proc_name}") - - async def test_6805(self): - "6805 - verify warning is generated when creating a function" - func_name = "bad_func_1705" - await self.cursor.execute( - f""" - create or replace function {func_name} - return number as - begin - return null - end; - """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - await self.cursor.execute(f"drop function {func_name}") - self.assertIsNone(self.cursor.warning) - - async def test_6806(self): - "6806 - verify warning is generated when creating a type" - type_name = "bad_type_1706" - await self.cursor.execute( - f""" - create or replace type {type_name} as object ( - x bad_type - ); - """ - ) - self.assertEqual(self.cursor.warning.full_code, "DPY-7000") - await self.cursor.execute(f"drop type {type_name}") - self.assertIsNone(self.cursor.warning) - - async def test_6807(self): - "6807 - verify warning is saved in a pipeline" - proc_name = "bad_proc_1704" - func_name = "bad_func_1705" - type_name = "bad_type_1706" - pipeline = oracledb.create_pipeline() - pipeline.add_execute( - f""" - create or replace procedure {proc_name} as - begin - null - end; - """ - ) - pipeline.add_execute( - f""" - create or replace procedure {proc_name} as - begin - null; - end; - """ - ) - pipeline.add_execute(f"drop procedure {proc_name}") - pipeline.add_execute( - f""" - create or replace function {func_name} - return number as - begin - return null - end; - """ - ) - pipeline.add_execute(f"drop function {func_name}") - pipeline.add_execute( - f""" - create or replace type {type_name} as object ( - x bad_type - ); +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_6800(async_cursor): + "6800 - test parse error returns offset correctly" + with pytest.raises(oracledb.Error) as excinfo: + await async_cursor.execute("begin t_Missing := 5; end;") + (error_obj,) = excinfo.value.args + assert error_obj.full_code == "ORA-06550" + assert error_obj.offset == 6 + + +async def test_6801(async_cursor): + "6801 - test picking/unpickling an error object" + with pytest.raises(oracledb.Error) as excinfo: + await async_cursor.execute( """ - ) - pipeline.add_execute(f"drop type {type_name}") - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[0].warning.full_code, "DPY-7000") - self.assertIsNone(results[1].warning) - self.assertIsNone(results[2].warning) - self.assertEqual(results[3].warning.full_code, "DPY-7000") - self.assertIsNone(results[4].warning) - self.assertEqual(results[5].warning.full_code, "DPY-7000") - self.assertIsNone(results[6].warning) - - async def test_6808(self): - "6808 - verify warning is saved in a pipeline with a single operation" - proc_name = "bad_proc_6808" - pipeline = oracledb.create_pipeline() - pipeline.add_execute( - f""" - create or replace procedure {proc_name} as begin - null + raise_application_error(-20101, 'Test!'); end; """ ) - (result,) = await self.conn.run_pipeline(pipeline) - self.assertEqual(result.warning.full_code, "DPY-7000") - await self.cursor.execute(f"drop procedure {proc_name}") - - @test_env.skip_if_drcp() - async def test_6809(self): - "6809 - error from killed connection is deemed recoverable" - admin_conn = await test_env.get_admin_connection_async() - conn = await test_env.get_connection_async() - sid, serial = await self.get_sid_serial(conn) - with admin_conn.cursor() as admin_cursor: - sql = f"alter system kill session '{sid},{serial}'" - await admin_cursor.execute(sql) - with self.assertRaisesFullCode("DPY-4011") as cm: - with conn.cursor() as cursor: - await cursor.execute("select user from dual") - self.assertTrue(cm.error_obj.isrecoverable) - - -if __name__ == "__main__": - test_env.run_test_cases() + (error_obj,) = excinfo.value.args + assert isinstance(error_obj, oracledb._Error) + assert "Test!" in error_obj.message + assert error_obj.code == 20101 + assert error_obj.offset == 0 + assert isinstance(error_obj.isrecoverable, bool) + assert not error_obj.isrecoverable + new_error_obj = pickle.loads(pickle.dumps(error_obj)) + assert isinstance(new_error_obj, oracledb._Error) + assert new_error_obj.message == error_obj.message + assert new_error_obj.code == error_obj.code + assert new_error_obj.offset == error_obj.offset + assert new_error_obj.context == error_obj.context + assert new_error_obj.isrecoverable == error_obj.isrecoverable + + +async def test_6802(async_cursor): + "6802 - test generation of full_code for ORA, DPI and DPY errors" + with pytest.raises(oracledb.Error) as excinfo: + await async_cursor.execute(None) + (error_obj,) = excinfo.value.args + assert error_obj.full_code == "DPY-2001" + + +async def test_6803(async_cursor): + "6803 - test generation of error help portal URL" + with pytest.raises(oracledb.Error) as excinfo: + await async_cursor.execute("select 1 / 0 from dual") + (error_obj,) = excinfo.value.args + to_check = "Help: https://docs.oracle.com/error-help/db/ora-01476/" + assert to_check in error_obj.message + + +async def test_6804(async_cursor): + "6804 - verify warning is generated when creating a procedure" + proc_name = "bad_proc_1704" + assert async_cursor.warning is None + await async_cursor.execute( + f""" + create or replace procedure {proc_name} as + begin + null + end; + """ + ) + assert async_cursor.warning.full_code == "DPY-7000" + await async_cursor.execute( + f""" + create or replace procedure {proc_name} as + begin + null; + end; + """ + ) + assert async_cursor.warning is None + await async_cursor.execute(f"drop procedure {proc_name}") + + +async def test_6805(async_cursor): + "6805 - verify warning is generated when creating a function" + func_name = "bad_func_1705" + await async_cursor.execute( + f""" + create or replace function {func_name} + return number as + begin + return null + end; + """ + ) + assert async_cursor.warning.full_code == "DPY-7000" + await async_cursor.execute(f"drop function {func_name}") + assert async_cursor.warning is None + + +async def test_6806(async_cursor): + "6806 - verify warning is generated when creating a type" + type_name = "bad_type_1706" + await async_cursor.execute( + f""" + create or replace type {type_name} as object ( + x bad_type + ); + """ + ) + assert async_cursor.warning.full_code == "DPY-7000" + await async_cursor.execute(f"drop type {type_name}") + assert async_cursor.warning is None + + +async def test_6807(async_conn): + "6807 - verify warning is saved in a pipeline" + proc_name = "bad_proc_1704" + func_name = "bad_func_1705" + type_name = "bad_type_1706" + pipeline = oracledb.create_pipeline() + pipeline.add_execute( + f""" + create or replace procedure {proc_name} as + begin + null + end; + """ + ) + pipeline.add_execute( + f""" + create or replace procedure {proc_name} as + begin + null; + end; + """ + ) + pipeline.add_execute(f"drop procedure {proc_name}") + pipeline.add_execute( + f""" + create or replace function {func_name} + return number as + begin + return null + end; + """ + ) + pipeline.add_execute(f"drop function {func_name}") + pipeline.add_execute( + f""" + create or replace type {type_name} as object ( + x bad_type + ); + """ + ) + pipeline.add_execute(f"drop type {type_name}") + results = await async_conn.run_pipeline(pipeline) + assert results[0].warning.full_code == "DPY-7000" + assert results[1].warning is None + assert results[2].warning is None + assert results[3].warning.full_code == "DPY-7000" + assert results[4].warning is None + assert results[5].warning.full_code == "DPY-7000" + assert results[6].warning is None + + +async def test_6808(async_conn, async_cursor): + "6808 - verify warning is saved in a pipeline with a single operation" + proc_name = "bad_proc_6808" + pipeline = oracledb.create_pipeline() + pipeline.add_execute( + f""" + create or replace procedure {proc_name} as + begin + null + end; + """ + ) + (result,) = await async_conn.run_pipeline(pipeline) + assert result.warning.full_code == "DPY-7000" + await async_cursor.execute(f"drop procedure {proc_name}") + + +async def test_6809(skip_if_drcp, test_env): + "6809 - error from killed connection is deemed recoverable" + admin_conn = await test_env.get_admin_connection_async() + conn = await test_env.get_connection_async() + sid, serial = (conn.session_id, conn.serial_num) + with admin_conn.cursor() as admin_cursor: + sql = f"alter system kill session '{sid},{serial}'" + await admin_cursor.execute(sql) + with test_env.assert_raises_full_code("DPY-4011") as cm: + with conn.cursor() as cursor: + await cursor.execute("select user from dual") + assert cm.error_obj.isrecoverable diff --git a/tests/test_6900_oson.py b/tests/test_6900_oson.py index 767e3197..d773eb3c 100644 --- a/tests/test_6900_oson.py +++ b/tests/test_6900_oson.py @@ -27,86 +27,87 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_native_json_supported() -class TestCase(test_env.BaseTestCase): - def test_6900(self): - "6900 - test OSON metadata" - self.cursor.execute("select * from TestOsonCols") - int_col_metadata, oson_col_metadata = self.cursor.description - self.assertFalse(int_col_metadata.is_oson) - self.assertTrue(oson_col_metadata.is_oson) - - def test_6901(self): - "6901 - test simple query of OSON encoded bytes" - self.cursor.execute("delete from TestOsonCols") - self.cursor.execute( - """ - insert into TestOsonCols (IntCol, OsonCol) - values (1, '{"id": 6901, "value" : "string 6901"}')""" - ) - self.conn.commit() - self.cursor.execute("select OsonCol from TestOsonCols") - (oson_val,) = self.cursor.fetchone() - expected_val = dict(id=6901, value="string 6901") - self.assertEqual(oson_val, expected_val) - - def test_6902(self): - "6902 - test round trip of OSON encoded bytes" - value = dict(id=6902, value="string 6902") - self.cursor.execute("delete from TestOsonCols") - encoded_oson = self.conn.encode_oson(value) - self.cursor.execute( - "insert into TestOsonCols values (1, :data)", [encoded_oson] - ) - self.conn.commit() - self.cursor.execute("select OsonCol from TestOsonCols") - (oson_val,) = self.cursor.fetchone() - self.assertEqual(oson_val, value) - - def test_6903(self): - "6903 - test encoding and decoding a value" - value = dict(id=6903, value="string 6903") - out_value = self.conn.decode_oson(self.conn.encode_oson(value)) - self.assertEqual(out_value, value) - - def test_6904(self): - "6904 - test decoding a non encoded value" - value = b"{'not a previous encoded value': 3}" - with self.assertRaisesFullCode("DPY-5004"): - self.conn.decode_oson(value) - - def test_6905(self): - "6905 - test inserting oson inside a lob" - value = dict(id=6905, value="string 6905") - self.cursor.execute("delete from TestOsonCols") - encoded_oson = self.conn.encode_oson(value) - lob = self.conn.createlob(oracledb.DB_TYPE_BLOB, encoded_oson) - self.cursor.execute( - "insert into TestOsonCols values (1, :data)", [lob] - ) - self.conn.commit() - self.cursor.execute("select OsonCol from TestOsonCols") - (oson_val,) = self.cursor.fetchone() - self.assertEqual(oson_val, value) - - def test_6906(self): - "6906 - test inserting oson as json" - self.cursor.execute("delete from TestOsonCols") - value = dict(id=6906, value="string 6906") - oson = self.conn.encode_oson(value) - self.cursor.setinputsizes(oracledb.DB_TYPE_JSON) - self.cursor.execute( - "insert into TestOsonCols values (1, :data)", [oson] - ) - self.conn.commit() - self.cursor.execute("select OsonCol from TestOsonCols") - (oson_val,) = self.cursor.fetchone() - oson_val = self.conn.decode_oson(oson_val) - self.assertEqual(oson_val, value) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_native_json_supported): + pass + + +def test_6900(cursor): + "6900 - test OSON metadata" + cursor.execute("select * from TestOsonCols") + int_col_metadata, oson_col_metadata = cursor.description + assert not int_col_metadata.is_oson + assert oson_col_metadata.is_oson + + +def test_6901(conn, cursor): + "6901 - test simple query of OSON encoded bytes" + cursor.execute("delete from TestOsonCols") + cursor.execute( + """ + insert into TestOsonCols (IntCol, OsonCol) + values (1, '{"id": 6901, "value" : "string 6901"}')""" + ) + conn.commit() + cursor.execute("select OsonCol from TestOsonCols") + (oson_val,) = cursor.fetchone() + expected_val = dict(id=6901, value="string 6901") + assert oson_val == expected_val + + +def test_6902(conn, cursor): + "6902 - test round trip of OSON encoded bytes" + value = dict(id=6902, value="string 6902") + cursor.execute("delete from TestOsonCols") + encoded_oson = conn.encode_oson(value) + cursor.execute( + "insert into TestOsonCols values (1, :data)", [encoded_oson] + ) + conn.commit() + cursor.execute("select OsonCol from TestOsonCols") + (oson_val,) = cursor.fetchone() + assert oson_val == value + + +def test_6903(conn): + "6903 - test encoding and decoding a value" + value = dict(id=6903, value="string 6903") + out_value = conn.decode_oson(conn.encode_oson(value)) + assert out_value == value + + +def test_6904(conn, test_env): + "6904 - test decoding a non encoded value" + value = b"{'not a previous encoded value': 3}" + with test_env.assert_raises_full_code("DPY-5004"): + conn.decode_oson(value) + + +def test_6905(conn, cursor): + "6905 - test inserting oson inside a lob" + value = dict(id=6905, value="string 6905") + cursor.execute("delete from TestOsonCols") + encoded_oson = conn.encode_oson(value) + lob = conn.createlob(oracledb.DB_TYPE_BLOB, encoded_oson) + cursor.execute("insert into TestOsonCols values (1, :data)", [lob]) + conn.commit() + cursor.execute("select OsonCol from TestOsonCols") + (oson_val,) = cursor.fetchone() + assert oson_val == value + + +def test_6906(conn, cursor): + "6906 - test inserting oson as json" + cursor.execute("delete from TestOsonCols") + value = dict(id=6906, value="string 6906") + oson = conn.encode_oson(value) + cursor.setinputsizes(oracledb.DB_TYPE_JSON) + cursor.execute("insert into TestOsonCols values (1, :data)", [oson]) + conn.commit() + cursor.execute("select OsonCol from TestOsonCols") + (oson_val,) = cursor.fetchone() + oson_val = conn.decode_oson(oson_val) + assert oson_val == value diff --git a/tests/test_7000_connection_async_shortcut_methods.py b/tests/test_7000_connection_async_shortcut_methods.py index 6581bfbc..3b384f9a 100644 --- a/tests/test_7000_connection_async_shortcut_methods.py +++ b/tests/test_7000_connection_async_shortcut_methods.py @@ -29,326 +29,343 @@ import decimal import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_7000(self): - "7000 - test execute() and fetchall()" - await self.conn.execute("truncate table TestTempTable") - await self.conn.execute( - "insert into TestTempTable (IntCol) values (:1)", [77] - ) - await self.conn.execute( - "insert into TestTempTable (IntCol) values (:val)", dict(val=15) - ) - await self.conn.commit() - - res = await self.conn.fetchall( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(res, [(15,), (77,)]) - - async def test_7001(self): - "7001 - test executemany()" - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:1)", [(2,), (3,)] - ) - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:data)", - [{"data": 4}, {"data": 5}], - ) - await self.conn.commit() - res = await self.conn.fetchall( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(res, [(2,), (3,), (4,), (5,)]) - - async def test_7002(self): - "7002 - test fetchall() with arraysize" - await self.conn.execute("truncate table TestTempTable") - data = [(1,), (2,), (3,), (4,)] - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:value)", - [{"value": i} for i, in data], - ) - await self.conn.commit() - - await self.setup_round_trip_checker() - res = await self.conn.fetchall( - "select IntCol from TestTempTable order by IntCol", arraysize=1 - ) - self.assertEqual(res, data) - await self.assertRoundTrips(5) - - res = await self.conn.fetchall( - "select IntCol from TestTempTable order by IntCol", - arraysize=len(data), - ) - self.assertEqual(res, data) - await self.assertRoundTrips(2) - - async def test_7003(self): - "7003 - test fetchall() with rowfactory" - await self.conn.execute("truncate table TestTempTable") - await self.conn.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test_7003') - """ - ) - await self.conn.commit() - - column_names = ["INTCOL", "STRINGCOL1"] - - def rowfactory(*row): - return dict(zip(column_names, row)) - - res = await self.conn.fetchall( - "select IntCol, StringCol1 from TestTempTable", - rowfactory=rowfactory, - ) - expected_value = [{"INTCOL": 1, "STRINGCOL1": "test_7003"}] - self.assertEqual(res, expected_value) - - async def test_7004(self): - "7004 - test fetchone()" - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:1)", [(9,), (10,)] - ) - await self.conn.commit() - - res = await self.conn.fetchone( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(res, (9,)) - - res = await self.conn.fetchone("select :1 from dual", [23]) - self.assertEqual(res, (23,)) - - res = await self.conn.fetchone("select :val from dual", {"val": 5}) - self.assertEqual(res, (5,)) - - async def test_7005(self): - "7005 - test fetchone() with rowfactory" - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int, :str) - """, - [{"int": 3, "str": "Mac"}, {"int": 4, "str": "Doc"}], - ) - await self.conn.commit() - - column_names = ["INT", "STRING"] - - def rowfactory(*row): - return dict(zip(column_names, row)) - - res = await self.conn.fetchone( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - rowfactory=rowfactory, - ) - self.assertEqual(res, {"INT": 3, "STRING": "Mac"}) - - async def test_7006(self): - "7006 - test fetchmany()" - data = [(i,) for i in range(10)] - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - await self.conn.commit() - res = await self.conn.fetchmany( - "select IntCol from TestTempTable order by IntCol" - ) - self.assertEqual(res, data) - - res = await self.conn.fetchmany("select :1 from dual", [1099]) - self.assertEqual(res, [(1099,)]) - - res = await self.conn.fetchmany("select :val from dual", {"val": 366}) - self.assertEqual(res, [(366,)]) - - async def test_7007(self): - "7007 - test fetchmany() with num_rows" - data = [(i,) for i in range(10)] - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - num_rows = 7 - res = await self.conn.fetchmany( - "select IntCol from TestTempTable order by IntCol", +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_7000(async_conn): + "7000 - test execute() and fetchall()" + await async_conn.execute("truncate table TestTempTable") + await async_conn.execute( + "insert into TestTempTable (IntCol) values (:1)", [77] + ) + await async_conn.execute( + "insert into TestTempTable (IntCol) values (:val)", dict(val=15) + ) + await async_conn.commit() + + res = await async_conn.fetchall( + "select IntCol from TestTempTable order by IntCol" + ) + assert res == [(15,), (77,)] + + +async def test_7001(async_conn): + "7001 - test executemany()" + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:1)", [(2,), (3,)] + ) + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:data)", + [{"data": 4}, {"data": 5}], + ) + await async_conn.commit() + res = await async_conn.fetchall( + "select IntCol from TestTempTable order by IntCol" + ) + assert res == [(2,), (3,), (4,), (5,)] + + +async def test_7002(async_conn, round_trip_checker_async): + "7002 - test fetchall() with arraysize" + await async_conn.execute("truncate table TestTempTable") + data = [(1,), (2,), (3,), (4,)] + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:value)", + [{"value": i} for i, in data], + ) + await async_conn.commit() + + await round_trip_checker_async.get_value_async() + res = await async_conn.fetchall( + "select IntCol from TestTempTable order by IntCol", arraysize=1 + ) + assert res == data + assert await round_trip_checker_async.get_value_async() == 5 + + res = await async_conn.fetchall( + "select IntCol from TestTempTable order by IntCol", + arraysize=len(data), + ) + assert res == data + assert await round_trip_checker_async.get_value_async() == 2 + + +async def test_7003(async_conn): + "7003 - test fetchall() with rowfactory" + await async_conn.execute("truncate table TestTempTable") + await async_conn.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test_7003') + """ + ) + await async_conn.commit() + + column_names = ["INTCOL", "STRINGCOL1"] + + def rowfactory(*row): + return dict(zip(column_names, row)) + + res = await async_conn.fetchall( + "select IntCol, StringCol1 from TestTempTable", + rowfactory=rowfactory, + ) + expected_value = [{"INTCOL": 1, "STRINGCOL1": "test_7003"}] + assert res == expected_value + + +async def test_7004(async_conn): + "7004 - test fetchone()" + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:1)", [(9,), (10,)] + ) + await async_conn.commit() + + res = await async_conn.fetchone( + "select IntCol from TestTempTable order by IntCol" + ) + assert res == (9,) + + res = await async_conn.fetchone("select :1 from dual", [23]) + assert res == (23,) + + res = await async_conn.fetchone("select :val from dual", {"val": 5}) + assert res == (5,) + + +async def test_7005(async_conn): + "7005 - test fetchone() with rowfactory" + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int, :str) + """, + [{"int": 3, "str": "Mac"}, {"int": 4, "str": "Doc"}], + ) + await async_conn.commit() + + column_names = ["INT", "STRING"] + + def rowfactory(*row): + return dict(zip(column_names, row)) + + res = await async_conn.fetchone( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + rowfactory=rowfactory, + ) + assert res == {"INT": 3, "STRING": "Mac"} + + +async def test_7006(async_conn): + "7006 - test fetchmany()" + data = [(i,) for i in range(10)] + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + await async_conn.commit() + res = await async_conn.fetchmany( + "select IntCol from TestTempTable order by IntCol" + ) + assert res == data + + res = await async_conn.fetchmany("select :1 from dual", [1099]) + assert res == [(1099,)] + + res = await async_conn.fetchmany("select :val from dual", {"val": 366}) + assert res == [(366,)] + + +async def test_7007(async_conn): + "7007 - test fetchmany() with num_rows" + data = [(i,) for i in range(10)] + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + num_rows = 7 + res = await async_conn.fetchmany( + "select IntCol from TestTempTable order by IntCol", + num_rows=num_rows, + ) + assert res == data[:num_rows] + + +async def test_7008(test_env): + "7008 - test fetchmany() with rowfactory and num_rows" + conn = await test_env.get_connection_async() + await conn.execute("truncate table TestTempTable") + await conn.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int, :str) + """, + [{"int": 29, "str": "Feb"}, {"int": 4, "str": "Monday"}], + ) + await conn.commit() + + column_names = ["INT", "STRING"] + + def rowfactory(*row): + return dict(zip(column_names, row)) + + res = await conn.fetchmany( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + rowfactory=rowfactory, + ) + expected_value = [ + {"INT": 4, "STRING": "Monday"}, + {"INT": 29, "STRING": "Feb"}, + ] + assert res == expected_value + + res = await conn.fetchmany( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + rowfactory=rowfactory, + num_rows=1, + ) + assert res == [{"INT": 4, "STRING": "Monday"}] + + +async def test_7009(async_conn): + "7009 - test callfunc()" + # parameters + res = await async_conn.callfunc("func_Test", oracledb.NUMBER, ("Yes", 7)) + assert res == 10 + + # keyword parameters + kwargs = {"a_String": "Keyword", "a_ExtraAmount": 12} + res = await async_conn.callfunc( + "func_Test", oracledb.NUMBER, keyword_parameters=kwargs + ) + assert res == 19 + + # paramters and keyword parameters + kwargs = {"a_ExtraAmount": 25} + res = await async_conn.callfunc( + "func_Test", oracledb.NUMBER, ["Mixed"], kwargs + ) + assert res == 30 + + +async def test_7010(async_conn, async_cursor): + "7010 - test callproc() with parameters" + var = async_cursor.var(oracledb.NUMBER) + results = await async_conn.callproc("proc_Test", ("hi", 5, var)) + assert results == ["hi", 10, 2.0] + + +async def test_7011(async_conn, async_cursor): + "7011 - test callproc() with keyword_parameters" + in_out_value = async_cursor.var(oracledb.NUMBER) + in_out_value.setvalue(0, 7) + out_value = async_cursor.var(oracledb.NUMBER) + kwargs = dict( + a_InValue="Peace", a_InOutValue=in_out_value, a_OutValue=out_value + ) + results = await async_conn.callproc("proc_Test", [], kwargs) + assert results == [] + assert in_out_value.getvalue() == 35 + assert out_value.getvalue() == 5 + + +async def test_7012(async_conn, async_cursor): + "7012 - test callproc() with parameters and keyword_parameters" + in_out_value = async_cursor.var(oracledb.NUMBER) + in_out_value.setvalue(0, 8) + out_value = async_cursor.var(oracledb.NUMBER) + kwargs = dict(a_InOutValue=in_out_value, a_OutValue=out_value) + results = await async_conn.callproc("proc_Test", ["Input_7712"], kwargs) + assert results == ["Input_7712"] + assert in_out_value.getvalue() == 80 + assert out_value.getvalue() == 10 + + +async def test_7013(async_conn): + "7013 - test fetchmany() num_rows with 0 and negative values" + data = [(i,) for i in range(10)] + await async_conn.execute("truncate table TestTempTable") + await async_conn.executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + await async_conn.commit() + for num_rows in (0, -1, -10): + res = await async_conn.fetchmany( + "select IntCol from TestTempTable", num_rows=num_rows, ) - self.assertEqual(res, data[:num_rows]) - - async def test_7008(self): - "7008 - test fetchmany() with rowfactory and num_rows" - conn = await test_env.get_connection_async() - await conn.execute("truncate table TestTempTable") - await conn.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int, :str) - """, - [{"int": 29, "str": "Feb"}, {"int": 4, "str": "Monday"}], - ) - await conn.commit() - - column_names = ["INT", "STRING"] - - def rowfactory(*row): - return dict(zip(column_names, row)) - - res = await conn.fetchmany( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - rowfactory=rowfactory, - ) - expected_value = [ - {"INT": 4, "STRING": "Monday"}, - {"INT": 29, "STRING": "Feb"}, - ] - self.assertEqual(res, expected_value) - - res = await conn.fetchmany( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - rowfactory=rowfactory, - num_rows=1, - ) - self.assertEqual(res, [{"INT": 4, "STRING": "Monday"}]) - - async def test_7009(self): - "7009 - test callfunc()" - # parameters - res = await self.conn.callfunc( - "func_Test", oracledb.NUMBER, ("Yes", 7) - ) - self.assertEqual(res, 10) - - # keyword parameters - kwargs = {"a_String": "Keyword", "a_ExtraAmount": 12} - res = await self.conn.callfunc( - "func_Test", oracledb.NUMBER, keyword_parameters=kwargs - ) - self.assertEqual(res, 19) - - # paramters and keyword parameters - kwargs = {"a_ExtraAmount": 25} - res = await self.conn.callfunc( - "func_Test", oracledb.NUMBER, ["Mixed"], kwargs - ) - self.assertEqual(res, 30) - - async def test_7010(self): - "7010 - test callproc() with parameters" - var = self.cursor.var(oracledb.NUMBER) - results = await self.conn.callproc("proc_Test", ("hi", 5, var)) - self.assertEqual(results, ["hi", 10, 2.0]) - - async def test_7011(self): - "7011 - test callproc() with keyword_parameters" - in_out_value = self.cursor.var(oracledb.NUMBER) - in_out_value.setvalue(0, 7) - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict( - a_InValue="Peace", a_InOutValue=in_out_value, a_OutValue=out_value - ) - results = await self.conn.callproc("proc_Test", [], kwargs) - self.assertEqual(results, []) - self.assertEqual(in_out_value.getvalue(), 35) - self.assertEqual(out_value.getvalue(), 5) - - async def test_7012(self): - "7012 - test callproc() with parameters and keyword_parameters" - in_out_value = self.cursor.var(oracledb.NUMBER) - in_out_value.setvalue(0, 8) - out_value = self.cursor.var(oracledb.NUMBER) - kwargs = dict(a_InOutValue=in_out_value, a_OutValue=out_value) - results = await self.conn.callproc("proc_Test", ["Input_7712"], kwargs) - self.assertEqual(results, ["Input_7712"]) - self.assertEqual(in_out_value.getvalue(), 80) - self.assertEqual(out_value.getvalue(), 10) - - async def test_7013(self): - "7013 - test fetchmany() num_rows with 0 and negative values" - data = [(i,) for i in range(10)] - await self.conn.execute("truncate table TestTempTable") - await self.conn.executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - await self.conn.commit() - for num_rows in (0, -1, -10): - res = await self.conn.fetchmany( - "select IntCol from TestTempTable", - num_rows=num_rows, - ) - self.assertEqual(res, []) - - async def test_7014(self): - "7014 - test shortcut methods with transaction_in_progress" - await self.conn.execute("truncate table TestTempTable") - self.assertFalse(self.conn.transaction_in_progress) - await self.conn.execute( - "insert into TestTempTable (IntCol) values (5)", - ) - self.assertTrue(self.conn.transaction_in_progress) - await self.conn.commit() - self.assertFalse(self.conn.transaction_in_progress) - - async def test_7015(self): - "7015 - test fetchone() with fetch_lobs=False" - value = "test_7015" - (result,) = await self.conn.fetchone( - "select to_clob(:1) from dual", [value], fetch_lobs=False - ) - self.assertEqual(result, value) - - async def test_7016(self): - "7016 - test fetchmany() with fetch_lobs=False" - value = "test_7016" - rows = await self.conn.fetchmany( - "select to_clob(:1) from dual", [value], fetch_lobs=False - ) - self.assertEqual(rows, [(value,)]) - - async def test_7017(self): - "7017 - test fetchall() with fetch_lobs=False" - value = "test_7017" - rows = await self.conn.fetchall( - "select to_clob(:1) from dual", [value], fetch_lobs=False - ) - self.assertEqual(rows, [(value,)]) - - async def test_7018(self): - "7018 - test fetchone() with fetch_decimals=True" - value = 7018 - (result,) = await self.conn.fetchone( - "select :1 from dual", [value], fetch_decimals=True - ) - self.assertTrue(isinstance(result, decimal.Decimal)) - - async def test_7019(self): - "7019 - test fetchmany() with fetch_decimals=True" - value = 7019 - rows = await self.conn.fetchmany( - "select :1 from dual", [value], fetch_decimals=True - ) - self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) - - async def test_7020(self): - "7020 - test fetchall() with fetch_decimals=True" - value = 7020 - rows = await self.conn.fetchall( - "select :1 from dual", [value], fetch_decimals=True - ) - self.assertTrue(isinstance(rows[0][0], decimal.Decimal)) - - -if __name__ == "__main__": - test_env.run_test_cases() + assert res == [] + + +async def test_7014(async_conn): + "7014 - test shortcut methods with transaction_in_progress" + await async_conn.execute("truncate table TestTempTable") + assert not async_conn.transaction_in_progress + await async_conn.execute( + "insert into TestTempTable (IntCol) values (5)", + ) + assert async_conn.transaction_in_progress + await async_conn.commit() + assert not async_conn.transaction_in_progress + + +async def test_7015(async_conn): + "7015 - test fetchone() with fetch_lobs=False" + value = "test_7015" + (result,) = await async_conn.fetchone( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + assert result == value + + +async def test_7016(async_conn): + "7016 - test fetchmany() with fetch_lobs=False" + value = "test_7016" + rows = await async_conn.fetchmany( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + assert rows == [(value,)] + + +async def test_7017(async_conn): + "7017 - test fetchall() with fetch_lobs=False" + value = "test_7017" + rows = await async_conn.fetchall( + "select to_clob(:1) from dual", [value], fetch_lobs=False + ) + assert rows == [(value,)] + + +async def test_7018(async_conn): + "7018 - test fetchone() with fetch_decimals=True" + value = 7018 + (result,) = await async_conn.fetchone( + "select :1 from dual", [value], fetch_decimals=True + ) + assert isinstance(result, decimal.Decimal) + + +async def test_7019(async_conn): + "7019 - test fetchmany() with fetch_decimals=True" + value = 7019 + rows = await async_conn.fetchmany( + "select :1 from dual", [value], fetch_decimals=True + ) + assert isinstance(rows[0][0], decimal.Decimal) + + +async def test_7020(async_conn): + "7020 - test fetchall() with fetch_decimals=True" + value = 7020 + rows = await async_conn.fetchall( + "select :1 from dual", [value], fetch_decimals=True + ) + assert isinstance(rows[0][0], decimal.Decimal) diff --git a/tests/test_7100_interval_ym_var.py b/tests/test_7100_interval_ym_var.py index f182571e..e6f0722e 100644 --- a/tests/test_7100_interval_ym_var.py +++ b/tests/test_7100_interval_ym_var.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -27,174 +27,183 @@ """ import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - def setUp(self): - super().setUp() - self.raw_data = [] - self.data_by_key = {} - for i in range(1, 11): - delta = oracledb.IntervalYM(i - 5, -i if i - 5 < 0 else i) - if i % 2 == 0: - nullable_delta = None - else: - nullable_delta = oracledb.IntervalYM(i + 5, i + 2) - precision_col = oracledb.IntervalYM(3, 8) - data_tuple = (i, delta, nullable_delta, precision_col) - self.raw_data.append(data_tuple) - self.data_by_key[i] = data_tuple - - def test_7100(self): - "7100 - test binding in an interval" - value = oracledb.IntervalYM(1, 6) - self.cursor.execute( - "select * from TestIntervalYMs where IntervalCol = :value", - value=value, - ) - self.assertEqual(self.cursor.fetchall(), [self.data_by_key[6]]) - - def test_7101(self): - "7101 - test binding in a null" - self.cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_YM) - self.cursor.execute( - "select * from TestIntervalYMs where IntervalCol = :value", - value=None, - ) - self.assertEqual(self.cursor.fetchall(), []) - - def test_7102(self): - "7102 - test binding out with set input sizes defined" - bind_vars = self.cursor.setinputsizes( - value=oracledb.DB_TYPE_INTERVAL_YM - ) - self.cursor.execute( - """ - begin - :value := to_yminterval('-25-7'); - end; - """ - ) - expected_value = oracledb.IntervalYM(years=-25, months=-7) - self.assertEqual(bind_vars["value"].getvalue(), expected_value) - - def test_7103(self): - "7103 - test binding in/out with set input sizes defined" - bind_vars = self.cursor.setinputsizes( - value=oracledb.DB_TYPE_INTERVAL_YM - ) - self.cursor.execute( - """ - begin - :value := :value + to_yminterval('3-8'); - end; - """, - value=oracledb.IntervalYM(years=8, months=4), - ) - expected_value = oracledb.IntervalYM(years=12, months=0) - self.assertEqual(bind_vars["value"].getvalue(), expected_value) - - def test_7104(self): - "7104 - test binding out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_INTERVAL_YM) - self.cursor.execute( - """ - begin - :value := to_yminterval('1-9'); - end; - """, - value=var, - ) - expected_value = oracledb.IntervalYM(years=1, months=9) - self.assertEqual(var.getvalue(), expected_value) - - def test_7105(self): - "7105 - test binding in/out with cursor.var() method" - var = self.cursor.var(oracledb.DB_TYPE_INTERVAL_YM) - var.setvalue(0, oracledb.IntervalYM(years=3, months=10)) - self.cursor.execute( - """ - begin - :value := :value + to_yminterval('2-5'); - end; - """, - value=var, - ) - expected_value = oracledb.IntervalYM(years=6, months=3) - self.assertEqual(var.getvalue(), expected_value) - - def test_7106(self): - "7106 - test cursor description is accurate" - self.cursor.execute("select * from TestIntervalYMs") - expected_value = [ - ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), - ( - "INTERVALCOL", - oracledb.DB_TYPE_INTERVAL_YM, - None, - None, - 2, - 0, - False, - ), - ( - "NULLABLECOL", - oracledb.DB_TYPE_INTERVAL_YM, - None, - None, - 2, - 0, - True, - ), - ( - "INTERVALPRECISIONCOL", - oracledb.DB_TYPE_INTERVAL_YM, - None, - None, - 3, - 0, - True, - ), - ] - self.assertEqual(self.cursor.description, expected_value) - - def test_7107(self): - "7107 - test that fetching all of the data returns the correct results" - self.cursor.execute("select * From TestIntervalYMs order by IntCol") - self.assertEqual(self.cursor.fetchall(), self.raw_data) - self.assertEqual(self.cursor.fetchall(), []) - - def test_7108(self): - "7108 - test that fetching data in chunks returns the correct results" - self.cursor.execute("select * From TestIntervalYMs order by IntCol") - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[0:3]) - self.assertEqual(self.cursor.fetchmany(2), self.raw_data[3:5]) - self.assertEqual(self.cursor.fetchmany(4), self.raw_data[5:9]) - self.assertEqual(self.cursor.fetchmany(3), self.raw_data[9:]) - self.assertEqual(self.cursor.fetchmany(3), []) - - def test_7109(self): - "7109 - test that fetching a single row returns the correct results" - self.cursor.execute( - """ - select * - from TestIntervalYMs - where IntCol in (3, 4) - order by IntCol - """ - ) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[3]) - self.assertEqual(self.cursor.fetchone(), self.data_by_key[4]) - self.assertIsNone(self.cursor.fetchone()) - - def test_7110(self): - "7110 - test binding and fetching a negative interval" - value = oracledb.IntervalYM(years=-12, months=-5) - self.cursor.execute("select :1 from dual", [value]) - (result,) = self.cursor.fetchone() - self.assertEqual(result, value) - - -if __name__ == "__main__": - test_env.run_test_cases() +import pytest + + +@pytest.fixture(scope="module") +def module_data(): + data = [] + for i in range(1, 11): + delta = oracledb.IntervalYM(i - 5, -i if i - 5 < 0 else i) + if i % 2 == 0: + nullable_delta = None + else: + nullable_delta = oracledb.IntervalYM(i + 5, i + 2) + precision_col = oracledb.IntervalYM(3, 8) + data_tuple = (i, delta, nullable_delta, precision_col) + data.append(data_tuple) + return data + + +@pytest.fixture(scope="module") +def module_data_by_key(module_data): + data_by_key = {} + for row in module_data: + data_by_key[row[0]] = row + return data_by_key + + +def test_7100(cursor, module_data_by_key): + "7100 - test binding in an interval" + value = oracledb.IntervalYM(1, 6) + cursor.execute( + "select * from TestIntervalYMs where IntervalCol = :value", + value=value, + ) + assert cursor.fetchall() == [module_data_by_key[6]] + + +def test_7101(cursor): + "7101 - test binding in a null" + cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_YM) + cursor.execute( + "select * from TestIntervalYMs where IntervalCol = :value", + value=None, + ) + assert cursor.fetchall() == [] + + +def test_7102(cursor): + "7102 - test binding out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_YM) + cursor.execute( + """ + begin + :value := to_yminterval('-25-7'); + end; + """ + ) + expected_value = oracledb.IntervalYM(years=-25, months=-7) + assert bind_vars["value"].getvalue() == expected_value + + +def test_7103(cursor): + "7103 - test binding in/out with set input sizes defined" + bind_vars = cursor.setinputsizes(value=oracledb.DB_TYPE_INTERVAL_YM) + cursor.execute( + """ + begin + :value := :value + to_yminterval('3-8'); + end; + """, + value=oracledb.IntervalYM(years=8, months=4), + ) + expected_value = oracledb.IntervalYM(years=12, months=0) + assert bind_vars["value"].getvalue() == expected_value + + +def test_7104(cursor): + "7104 - test binding out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_INTERVAL_YM) + cursor.execute( + """ + begin + :value := to_yminterval('1-9'); + end; + """, + value=var, + ) + expected_value = oracledb.IntervalYM(years=1, months=9) + assert var.getvalue() == expected_value + + +def test_7105(cursor): + "7105 - test binding in/out with cursor.var() method" + var = cursor.var(oracledb.DB_TYPE_INTERVAL_YM) + var.setvalue(0, oracledb.IntervalYM(years=3, months=10)) + cursor.execute( + """ + begin + :value := :value + to_yminterval('2-5'); + end; + """, + value=var, + ) + expected_value = oracledb.IntervalYM(years=6, months=3) + assert var.getvalue() == expected_value + + +def test_7106(cursor): + "7106 - test cursor description is accurate" + cursor.execute("select * from TestIntervalYMs") + expected_value = [ + ("INTCOL", oracledb.DB_TYPE_NUMBER, 10, None, 9, 0, False), + ( + "INTERVALCOL", + oracledb.DB_TYPE_INTERVAL_YM, + None, + None, + 2, + 0, + False, + ), + ( + "NULLABLECOL", + oracledb.DB_TYPE_INTERVAL_YM, + None, + None, + 2, + 0, + True, + ), + ( + "INTERVALPRECISIONCOL", + oracledb.DB_TYPE_INTERVAL_YM, + None, + None, + 3, + 0, + True, + ), + ] + assert cursor.description == expected_value + + +def test_7107(cursor, module_data): + "7107 - test that fetching all of the data returns the correct results" + cursor.execute("select * From TestIntervalYMs order by IntCol") + assert cursor.fetchall() == module_data + assert cursor.fetchall() == [] + + +def test_7108(cursor, module_data): + "7108 - test that fetching data in chunks returns the correct results" + cursor.execute("select * From TestIntervalYMs order by IntCol") + assert cursor.fetchmany(3) == module_data[0:3] + assert cursor.fetchmany(2) == module_data[3:5] + assert cursor.fetchmany(4) == module_data[5:9] + assert cursor.fetchmany(3) == module_data[9:] + assert cursor.fetchmany(3) == [] + + +def test_7109(cursor, module_data_by_key): + "7109 - test that fetching a single row returns the correct results" + cursor.execute( + """ + select * + from TestIntervalYMs + where IntCol in (3, 4) + order by IntCol + """ + ) + assert cursor.fetchone() == module_data_by_key[3] + assert cursor.fetchone() == module_data_by_key[4] + assert cursor.fetchone() is None + + +def test_7110(cursor): + "7110 - test binding and fetching a negative interval" + value = oracledb.IntervalYM(years=-12, months=-5) + cursor.execute("select :1 from dual", [value]) + (result,) = cursor.fetchone() + assert result == value diff --git a/tests/test_7200_tnsnames.py b/tests/test_7200_tnsnames.py index 0126b465..7526bfbf 100644 --- a/tests/test_7200_tnsnames.py +++ b/tests/test_7200_tnsnames.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# Copyright (c) 2024, Oracle and/or its affiliates. +# Copyright (c) 2024, 2025, Oracle and/or its affiliates. # # This software is dual-licensed to you under the Universal Permissive License # (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License @@ -30,517 +30,523 @@ import tempfile import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - requires_connection = False - - def test_7200(self): - "7200 - test simple tnsnames.ora entry" - host = "host_7200" - port = 7200 - service_name = "service_7200" - network_service_name = "nsn_7200" - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) - (CONNECT_DATA=(SERVICE_NAME={service_name})))""" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - def test_7201(self): - "7201 - test missing entry in tnsnames.ora" - with tempfile.TemporaryDirectory() as temp_dir: - params = oracledb.ConnectParams(config_dir=temp_dir) - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write("# no entries") - with self.assertRaisesFullCode("DPY-4000"): - params.parse_connect_string("nsn_7201") - self.assertEqual(params.get_network_service_names(), []) - - def test_7202(self): - "7202 - test missing tnsnames.ora in configuration directory" - with tempfile.TemporaryDirectory() as temp_dir: - params = oracledb.ConnectParams(config_dir=temp_dir) - with self.assertRaisesFullCode("DPY-4026"): - params.parse_connect_string("nsn_7202") - with self.assertRaisesFullCode("DPY-4026"): - params.get_network_service_names() - - def test_7203(self): - "7203 - test tnsnames.ora with invalid entries" - host = "host_7203" - port = 7203 - service_name = "service_7203" - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) - (CONNECT_DATA=(SERVICE_NAME={service_name})))""" - network_service_name = "nsn_7203" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write("some garbage data which is not a valid entry\n") - f.write(f"{network_service_name} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_7204(self): - "7204 - test tnsnames.ora with multiple aliases on one line" - host = "host_7204" - port = 7204 - service_name = "service_7204" - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) - (CONNECT_DATA=(SERVICE_NAME={service_name})))""" - network_service_names = "nsn_7204a,nsn_7204b" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_names} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - for name in network_service_names.split(","): - params.parse_connect_string(name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_network_service_names(), - network_service_names.upper().split(","), - ) - def test_7205(self): - "7205 - test easy connect string in tnsnames.ora" - host = "host_7205" - port = 7205 - service_name = "service_7205" - connect_string = f"tcp://{host}:{port}/{service_name}" - network_service_name = "nsn_7205" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual( - params.get_network_service_names(), - [network_service_name.upper()], - ) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_7206(self): - "7206 - parse connect descriptor with / character in tnsnames.ora" - host = "host_7206" - port = 7206 - service_name = "service_7206" - wallet_location = "/some/dir/7206" - connect_string = f""" - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) - (CONNECT_DATA=(SERVICE_NAME={service_name})) - (SECURITY=(MY_WALLET_DIRECTORY={wallet_location})))""" - network_service_name = "nsn_7206" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual(params.wallet_location, wallet_location) - - def test_7207(self): - "7207 - parse IFILE with files in same directory" - host_a = "host_7207a" - host_b = "host_7207b" - port_a = 72071 - port_b = 72072 - service_name_a = "service_7207a" - service_name_b = "service_7207b" - connect_string_a = f"{host_a}:{port_a}/{service_name_a}" - connect_string_b = f"{host_b}:{port_b}/{service_name_b}" - network_service_name_a = "nsn_7207a" - network_service_name_b = "nsn_7207b" - include_file_name = "inc_7207.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, include_file_name) - with open(file_name, "w") as f: - f.write(f"{network_service_name_b} = {connect_string_b}") - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name_a} = {connect_string_a}\n") - f.write(f"ifile = {include_file_name}") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name_a) - self.assertEqual(params.host, host_a) - self.assertEqual(params.port, port_a) - self.assertEqual(params.service_name, service_name_a) - params.parse_connect_string(network_service_name_b) - self.assertEqual(params.host, host_b) - self.assertEqual(params.port, port_b) - self.assertEqual(params.service_name, service_name_b) - self.assertEqual( - params.get_network_service_names(), - [ - network_service_name_a.upper(), - network_service_name_b.upper(), - ], - ) +def test_7200(): + "7200 - test simple tnsnames.ora entry" + host = "host_7200" + port = 7200 + service_name = "service_7200" + network_service_name = "nsn_7200" + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) + (CONNECT_DATA=(SERVICE_NAME={service_name})))""" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_7201(test_env): + "7201 - test missing entry in tnsnames.ora" + with tempfile.TemporaryDirectory() as temp_dir: + params = oracledb.ConnectParams(config_dir=temp_dir) + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write("# no entries") + with test_env.assert_raises_full_code("DPY-4000"): + params.parse_connect_string("nsn_7201") + assert params.get_network_service_names() == [] + + +def test_7202(test_env): + "7202 - test missing tnsnames.ora in configuration directory" + with tempfile.TemporaryDirectory() as temp_dir: + params = oracledb.ConnectParams(config_dir=temp_dir) + with test_env.assert_raises_full_code("DPY-4026"): + params.parse_connect_string("nsn_7202") + with test_env.assert_raises_full_code("DPY-4026"): + params.get_network_service_names() + + +def test_7203(): + "7203 - test tnsnames.ora with invalid entries" + host = "host_7203" + port = 7203 + service_name = "service_7203" + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) + (CONNECT_DATA=(SERVICE_NAME={service_name})))""" + network_service_name = "nsn_7203" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write("some garbage data which is not a valid entry\n") + f.write(f"{network_service_name} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_7204(): + "7204 - test tnsnames.ora with multiple aliases on one line" + host = "host_7204" + port = 7204 + service_name = "service_7204" + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) + (CONNECT_DATA=(SERVICE_NAME={service_name})))""" + network_service_names = "nsn_7204a,nsn_7204b" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_names} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + for name in network_service_names.split(","): + params.parse_connect_string(name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert ( + params.get_network_service_names() + == network_service_names.upper().split(",") + ) + + +def test_7205(): + "7205 - test easy connect string in tnsnames.ora" + host = "host_7205" + port = 7205 + service_name = "service_7205" + connect_string = f"tcp://{host}:{port}/{service_name}" + network_service_name = "nsn_7205" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.get_network_service_names() == [ + network_service_name.upper() + ] + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_7206(): + "7206 - parse connect descriptor with / character in tnsnames.ora" + host = "host_7206" + port = 7206 + service_name = "service_7206" + wallet_location = "/some/dir/7206" + connect_string = f""" + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) + (CONNECT_DATA=(SERVICE_NAME={service_name})) + (SECURITY=(MY_WALLET_DIRECTORY={wallet_location})))""" + network_service_name = "nsn_7206" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.wallet_location == wallet_location + + +def test_7207(): + "7207 - parse IFILE with files in same directory" + host_a = "host_7207a" + host_b = "host_7207b" + port_a = 72071 + port_b = 72072 + service_name_a = "service_7207a" + service_name_b = "service_7207b" + connect_string_a = f"{host_a}:{port_a}/{service_name_a}" + connect_string_b = f"{host_b}:{port_b}/{service_name_b}" + network_service_name_a = "nsn_7207a" + network_service_name_b = "nsn_7207b" + include_file_name = "inc_7207.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, include_file_name) + with open(file_name, "w") as f: + f.write(f"{network_service_name_b} = {connect_string_b}") + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name_a} = {connect_string_a}\n") + f.write(f"ifile = {include_file_name}") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name_a) + assert params.host == host_a + assert params.port == port_a + assert params.service_name == service_name_a + params.parse_connect_string(network_service_name_b) + assert params.host == host_b + assert params.port == port_b + assert params.service_name == service_name_b + assert params.get_network_service_names() == [ + network_service_name_a.upper(), + network_service_name_b.upper(), + ] - def test_7208(self): - "7208 - parse IFILE with files in different directories" - host_a = "host_7208a" - host_b = "host_7208b" - port_a = 72081 - port_b = 72082 - service_name_a = "service_7208a" - service_name_b = "service_7208b" - connect_string_a = f"{host_a}:{port_a}/{service_name_a}" - connect_string_b = f"{host_b}:{port_b}/{service_name_b}" - network_service_name_a = "nsn_7208a" - network_service_name_b = "nsn_7208b" - include_file_name = "inc_7208.ora" - dir_1 = tempfile.TemporaryDirectory() - dir_2 = tempfile.TemporaryDirectory() - with dir_1 as primary_temp_dir, dir_2 as included_temp_dir: - file_name = os.path.join(included_temp_dir, include_file_name) - with open(file_name, "w") as f: - f.write(f"{network_service_name_b} = {connect_string_b}") - primary_file_name = os.path.join(primary_temp_dir, "tnsnames.ora") - with open(primary_file_name, "w") as f: - f.write(f"{network_service_name_a} = {connect_string_a}\n") - f.write(f"ifile = {file_name}") - params = oracledb.ConnectParams(config_dir=primary_temp_dir) - params.parse_connect_string(network_service_name_a) - self.assertEqual(params.host, host_a) - self.assertEqual(params.port, port_a) - self.assertEqual(params.service_name, service_name_a) - params.parse_connect_string(network_service_name_b) - self.assertEqual(params.host, host_b) - self.assertEqual(params.port, port_b) - self.assertEqual(params.service_name, service_name_b) - self.assertEqual( - params.get_network_service_names(), - [ - network_service_name_a.upper(), - network_service_name_b.upper(), - ], - ) - def test_7209(self): - "7209 - cycle detection in same file" - with tempfile.TemporaryDirectory() as temp_dir: - network_service_name = "nsn_7209" - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = some_host/some_service\n") - f.write("IFILE = tnsnames.ora") - params = oracledb.ConnectParams(config_dir=temp_dir) - with self.assertRaisesFullCode("DPY-4030"): - params.parse_connect_string(network_service_name) - - def test_7210(self): - "7210 - cycle detection in directly included file" - with tempfile.TemporaryDirectory() as temp_dir: - network_service_name = "nsn_7210" - include_name = "included_7210.ora" - primary_file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name = os.path.join(temp_dir, include_name) - with open(primary_file_name, "w") as f: - f.write(f"{network_service_name} = some_host/some_service\n") - f.write(f"IFILE = {include_name}") - with open(include_file_name, "w") as f: - f.write("IFILE = tnsnames.ora") - params = oracledb.ConnectParams(config_dir=temp_dir) - with self.assertRaisesFullCode("DPY-4030"): - params.parse_connect_string(network_service_name) - - def test_7211(self): - "7211 - cycle detection in indirectly included file" - with tempfile.TemporaryDirectory() as temp_dir: - network_service_name = "nsn_7211" - include_name_a = "included_7211_a.ora" - include_name_b = "included_7211_b.ora" - primary_file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name_a = os.path.join(temp_dir, include_name_a) - include_file_name_b = os.path.join(temp_dir, include_name_b) - with open(primary_file_name, "w") as f: - f.write(f"{network_service_name} = some_host/some_service\n") - f.write(f"IFILE = {include_name_a}") - with open(include_file_name_a, "w") as f: - f.write(f"IFILE = {include_name_b}") - with open(include_file_name_b, "w") as f: - f.write("IFILE = tnsnames.ora") - params = oracledb.ConnectParams(config_dir=temp_dir) - with self.assertRaisesFullCode("DPY-4030"): - params.parse_connect_string(network_service_name) - - def test_7212(self): - "7212 - duplicate entry in same file, but identical connect strings" - host = "host_7212" - port = 7212 - service_name = "service_7212" - connect_string = f"{host}:{port}/{service_name}" - network_service_name = "nsn_7212" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}\n") - f.write("some_other_nsn = some_host/some_service\n") - f.write(f"{network_service_name} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_7213(self): - "7213 - duplicate entry in same file, but different connect strings" - host_a = "host_7213a" - port = 7213 - service_name_a = "service_7213a" - host_b = "host_7213b" - service_name_b = "service_7213b" - connect_string_a = f"{host_a}:{port}/{service_name_a}" - connect_string_b = f"{host_b}:{port}/{service_name_b}" - network_service_name = "nsn_7213" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string_a}\n") - f.write("some_other_nsn = some_host/some_service\n") - f.write(f"{network_service_name} = {connect_string_b}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host_b) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name_b) - - def test_7214(self): - "7214 - duplicate entry in other file, but identical connect strings" - host = "host_7214" - port = 7214 - service_name = "service_7214" - connect_string = f"{host}:{port}/{service_name}" - network_service_name = "nsn_7214" - include_name = "inc_7214.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name = os.path.join(temp_dir, include_name) - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}\n") - f.write(f"IFILE = {include_name}") - with open(include_file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - - def test_7215(self): - "7215 - duplicate entry in other file, but different connect strings" - host_a = "host_7215a" - port = 7215 - service_name_a = "service_7215a" - host_b = "host_7215b" - service_name_b = "service_7215b" - connect_string_a = f"{host_a}:{port}/{service_name_a}" - connect_string_b = f"{host_b}:{port}/{service_name_b}" - network_service_name = "nsn_7215" - include_name = "inc_7215.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name = os.path.join(temp_dir, include_name) - with open(file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string_a}\n") - f.write(f"IFILE = {include_name}") - with open(include_file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string_b}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host_b) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name_b) - - def test_7216(self): - "7216 - test missing IFILE in tnsnames.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write("IFILE = missing.ora\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - with self.assertRaisesFullCode("DPY-4026"): - params.parse_connect_string("anything") - - def test_7217(self): - "7217 - test duplicate IFILE, same file" - host = "host_7217" - port = 7217 - service_name = "service_7217" - connect_string = f"{host}:{port}/{service_name}" - network_service_name = "nsn_7217" - include_name = "inc_7217.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name = os.path.join(temp_dir, include_name) - with open(file_name, "w") as f: - f.write(f"IFILE = {include_name}\n") - f.write("another_nsn = some_host/some_service\n") - f.write(f"IFILE = {include_name}\n") - with open(include_file_name, "w") as f: - f.write(f"{network_service_name} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) +def test_7208(): + "7208 - parse IFILE with files in different directories" + host_a = "host_7208a" + host_b = "host_7208b" + port_a = 72081 + port_b = 72082 + service_name_a = "service_7208a" + service_name_b = "service_7208b" + connect_string_a = f"{host_a}:{port_a}/{service_name_a}" + connect_string_b = f"{host_b}:{port_b}/{service_name_b}" + network_service_name_a = "nsn_7208a" + network_service_name_b = "nsn_7208b" + include_file_name = "inc_7208.ora" + dir_1 = tempfile.TemporaryDirectory() + dir_2 = tempfile.TemporaryDirectory() + with dir_1 as primary_temp_dir, dir_2 as included_temp_dir: + file_name = os.path.join(included_temp_dir, include_file_name) + with open(file_name, "w") as f: + f.write(f"{network_service_name_b} = {connect_string_b}") + primary_file_name = os.path.join(primary_temp_dir, "tnsnames.ora") + with open(primary_file_name, "w") as f: + f.write(f"{network_service_name_a} = {connect_string_a}\n") + f.write(f"ifile = {file_name}") + params = oracledb.ConnectParams(config_dir=primary_temp_dir) + params.parse_connect_string(network_service_name_a) + assert params.host == host_a + assert params.port == port_a + assert params.service_name == service_name_a + params.parse_connect_string(network_service_name_b) + assert params.host == host_b + assert params.port == port_b + assert params.service_name == service_name_b + assert params.get_network_service_names() == [ + network_service_name_a.upper(), + network_service_name_b.upper(), + ] + + +def test_7209(test_env): + "7209 - cycle detection in same file" + with tempfile.TemporaryDirectory() as temp_dir: + network_service_name = "nsn_7209" + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = some_host/some_service\n") + f.write("IFILE = tnsnames.ora") + params = oracledb.ConnectParams(config_dir=temp_dir) + with test_env.assert_raises_full_code("DPY-4030"): params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_network_service_names(), - [network_service_name.upper(), "ANOTHER_NSN"], - ) - def test_7218(self): - "7218 - test duplicate IFILE, different files" - host = "host_7218" - port = 7218 - service_name = "service_7218" - connect_string = f"{host}:{port}/{service_name}" - network_service_name = "nsn_7218" - include_name_a = "inc_7218_a.ora" - include_name_b = "inc_7218_b.ora" - include_name_c = "inc_7218_c.ora" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - include_file_name_a = os.path.join(temp_dir, include_name_a) - include_file_name_b = os.path.join(temp_dir, include_name_b) - include_file_name_c = os.path.join(temp_dir, include_name_c) - with open(file_name, "w") as f: - f.write(f"IFILE = {include_name_a}\n") - f.write("another_nsn = some_host/some_service\n") - f.write(f"IFILE = {include_name_b}\n") - with open(include_file_name_a, "w") as f: - f.write("in_a = some_host/some_service\n") - f.write(f"IFILE = {include_name_c}\n") - with open(include_file_name_b, "w") as f: - f.write("in_b = some_host/some_service\n") - f.write(f"IFILE = {include_name_c}\n") - with open(include_file_name_c, "w") as f: - f.write(f"{network_service_name} = {connect_string}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) + +def test_7210(test_env): + "7210 - cycle detection in directly included file" + with tempfile.TemporaryDirectory() as temp_dir: + network_service_name = "nsn_7210" + include_name = "included_7210.ora" + primary_file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name = os.path.join(temp_dir, include_name) + with open(primary_file_name, "w") as f: + f.write(f"{network_service_name} = some_host/some_service\n") + f.write(f"IFILE = {include_name}") + with open(include_file_name, "w") as f: + f.write("IFILE = tnsnames.ora") + params = oracledb.ConnectParams(config_dir=temp_dir) + with test_env.assert_raises_full_code("DPY-4030"): params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_network_service_names(), - ["IN_A", network_service_name.upper(), "ANOTHER_NSN", "IN_B"], - ) - def test_7219(self): - "7219 - test tnsnames.ora with multiple aliases on different lines" - host = "host_7219" - port = 7219 - service_name = "service_7219" - connect_string = f"{host}:{port}/{service_name}" - network_service_names = ["nsn_7219a", "nsn_7219b", "nsn_7219c"] - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(",\n".join(network_service_names)) - f.write(f" = {connect_string}") - params = oracledb.ConnectParams(config_dir=temp_dir) - for name in network_service_names: - params.parse_connect_string(name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_network_service_names(), - [n.upper() for n in network_service_names], - ) - def test_7220(self): - "7220 - test tnsnames.ora with comment embedded in dsn" - host = "host_7220" - port = 7220 - service_name = "service_7220" - network_service_name = "nsn_7220" - connect_string = f""" - (DESCRIPTION= - (ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) - (CONNECT_DATA= - (SERVICE_NAME={service_name}) - # embedded comment - ) - )""" - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - print(f"{network_service_name} = {connect_string}", file=f) - params = oracledb.ConnectParams(config_dir=temp_dir) +def test_7211(test_env): + "7211 - cycle detection in indirectly included file" + with tempfile.TemporaryDirectory() as temp_dir: + network_service_name = "nsn_7211" + include_name_a = "included_7211_a.ora" + include_name_b = "included_7211_b.ora" + primary_file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name_a = os.path.join(temp_dir, include_name_a) + include_file_name_b = os.path.join(temp_dir, include_name_b) + with open(primary_file_name, "w") as f: + f.write(f"{network_service_name} = some_host/some_service\n") + f.write(f"IFILE = {include_name_a}") + with open(include_file_name_a, "w") as f: + f.write(f"IFILE = {include_name_b}") + with open(include_file_name_b, "w") as f: + f.write("IFILE = tnsnames.ora") + params = oracledb.ConnectParams(config_dir=temp_dir) + with test_env.assert_raises_full_code("DPY-4030"): params.parse_connect_string(network_service_name) - self.assertEqual(params.host, host) - self.assertEqual(params.port, port) - self.assertEqual(params.service_name, service_name) - self.assertEqual( - params.get_network_service_names(), - [network_service_name.upper()], - ) - def test_7221(self): - "7221 - test tnsnames.ora with a comment between aliases" - test_values = [ - ("nsn_7221_1", "tcp://host_7221:7221/service_7222_1"), - ("nsn_7221_2", "tcp://host_7222:7222/service_7222_2"), + +def test_7212(): + "7212 - duplicate entry in same file, but identical connect strings" + host = "host_7212" + port = 7212 + service_name = "service_7212" + connect_string = f"{host}:{port}/{service_name}" + network_service_name = "nsn_7212" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}\n") + f.write("some_other_nsn = some_host/some_service\n") + f.write(f"{network_service_name} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_7213(): + "7213 - duplicate entry in same file, but different connect strings" + host_a = "host_7213a" + port = 7213 + service_name_a = "service_7213a" + host_b = "host_7213b" + service_name_b = "service_7213b" + connect_string_a = f"{host_a}:{port}/{service_name_a}" + connect_string_b = f"{host_b}:{port}/{service_name_b}" + network_service_name = "nsn_7213" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string_a}\n") + f.write("some_other_nsn = some_host/some_service\n") + f.write(f"{network_service_name} = {connect_string_b}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host_b + assert params.port == port + assert params.service_name == service_name_b + + +def test_7214(): + "7214 - duplicate entry in other file, but identical connect strings" + host = "host_7214" + port = 7214 + service_name = "service_7214" + connect_string = f"{host}:{port}/{service_name}" + network_service_name = "nsn_7214" + include_name = "inc_7214.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name = os.path.join(temp_dir, include_name) + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}\n") + f.write(f"IFILE = {include_name}") + with open(include_file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + + +def test_7215(): + "7215 - duplicate entry in other file, but different connect strings" + host_a = "host_7215a" + port = 7215 + service_name_a = "service_7215a" + host_b = "host_7215b" + service_name_b = "service_7215b" + connect_string_a = f"{host_a}:{port}/{service_name_a}" + connect_string_b = f"{host_b}:{port}/{service_name_b}" + network_service_name = "nsn_7215" + include_name = "inc_7215.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name = os.path.join(temp_dir, include_name) + with open(file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string_a}\n") + f.write(f"IFILE = {include_name}") + with open(include_file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string_b}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host_b + assert params.port == port + assert params.service_name == service_name_b + + +def test_7216(test_env): + "7216 - test missing IFILE in tnsnames.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write("IFILE = missing.ora\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + with test_env.assert_raises_full_code("DPY-4026"): + params.parse_connect_string("anything") + + +def test_7217(): + "7217 - test duplicate IFILE, same file" + host = "host_7217" + port = 7217 + service_name = "service_7217" + connect_string = f"{host}:{port}/{service_name}" + network_service_name = "nsn_7217" + include_name = "inc_7217.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name = os.path.join(temp_dir, include_name) + with open(file_name, "w") as f: + f.write(f"IFILE = {include_name}\n") + f.write("another_nsn = some_host/some_service\n") + f.write(f"IFILE = {include_name}\n") + with open(include_file_name, "w") as f: + f.write(f"{network_service_name} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_network_service_names() == [ + network_service_name.upper(), + "ANOTHER_NSN", ] - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - for i in range(3): - entries = [f"{n} = {c}\n" for n, c in test_values] - entries.insert(i, "# COMMENT \n") - with open(file_name, "w") as f: - f.writelines(entries) - params = oracledb.ConnectParams(config_dir=temp_dir) - self.assertEqual( - params.get_network_service_names(), - [n.upper() for n, _ in test_values], - ) - - def test_7222(self): - "7222 - test tnsnames.ora with easy connect and connect descriptors" - network_service_name1 = "nsn_7222_1" - connect_string1 = """ - (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=host_7220)(PORT=7222)) - (CONNECT_DATA=(SERVICE_NAME=service_7222_1)))""" - - network_service_name2 = "nsn_7222_2" - connect_string2 = "tcp://host_7222:7222/service_7222_2" - - with tempfile.TemporaryDirectory() as temp_dir: - file_name = os.path.join(temp_dir, "tnsnames.ora") - with open(file_name, "w") as f: - f.write(f"{network_service_name1} = {connect_string1}\n") - f.write(f"{network_service_name2} = {connect_string2}\n") - params = oracledb.ConnectParams(config_dir=temp_dir) - self.assertEqual( - params.get_network_service_names(), - [network_service_name1.upper(), network_service_name2.upper()], + + +def test_7218(): + "7218 - test duplicate IFILE, different files" + host = "host_7218" + port = 7218 + service_name = "service_7218" + connect_string = f"{host}:{port}/{service_name}" + network_service_name = "nsn_7218" + include_name_a = "inc_7218_a.ora" + include_name_b = "inc_7218_b.ora" + include_name_c = "inc_7218_c.ora" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + include_file_name_a = os.path.join(temp_dir, include_name_a) + include_file_name_b = os.path.join(temp_dir, include_name_b) + include_file_name_c = os.path.join(temp_dir, include_name_c) + with open(file_name, "w") as f: + f.write(f"IFILE = {include_name_a}\n") + f.write("another_nsn = some_host/some_service\n") + f.write(f"IFILE = {include_name_b}\n") + with open(include_file_name_a, "w") as f: + f.write("in_a = some_host/some_service\n") + f.write(f"IFILE = {include_name_c}\n") + with open(include_file_name_b, "w") as f: + f.write("in_b = some_host/some_service\n") + f.write(f"IFILE = {include_name_c}\n") + with open(include_file_name_c, "w") as f: + f.write(f"{network_service_name} = {connect_string}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_network_service_names() == [ + "IN_A", + network_service_name.upper(), + "ANOTHER_NSN", + "IN_B", + ] + + +def test_7219(): + "7219 - test tnsnames.ora with multiple aliases on different lines" + host = "host_7219" + port = 7219 + service_name = "service_7219" + connect_string = f"{host}:{port}/{service_name}" + network_service_names = ["nsn_7219a", "nsn_7219b", "nsn_7219c"] + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(",\n".join(network_service_names)) + f.write(f" = {connect_string}") + params = oracledb.ConnectParams(config_dir=temp_dir) + for name in network_service_names: + params.parse_connect_string(name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_network_service_names() == [ + n.upper() for n in network_service_names + ] + + +def test_7220(): + "7220 - test tnsnames.ora with comment embedded in dsn" + host = "host_7220" + port = 7220 + service_name = "service_7220" + network_service_name = "nsn_7220" + connect_string = f""" + (DESCRIPTION= + (ADDRESS=(PROTOCOL=TCP)(HOST={host})(PORT={port})) + (CONNECT_DATA= + (SERVICE_NAME={service_name}) + # embedded comment ) + )""" + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + print(f"{network_service_name} = {connect_string}", file=f) + params = oracledb.ConnectParams(config_dir=temp_dir) + params.parse_connect_string(network_service_name) + assert params.host == host + assert params.port == port + assert params.service_name == service_name + assert params.get_network_service_names() == [ + network_service_name.upper() + ] -if __name__ == "__main__": - test_env.run_test_cases() +def test_7221(): + "7221 - test tnsnames.ora with a comment between aliases" + test_values = [ + ("nsn_7221_1", "tcp://host_7221:7221/service_7222_1"), + ("nsn_7221_2", "tcp://host_7222:7222/service_7222_2"), + ] + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + for i in range(3): + entries = [f"{n} = {c}\n" for n, c in test_values] + entries.insert(i, "# COMMENT \n") + with open(file_name, "w") as f: + f.writelines(entries) + params = oracledb.ConnectParams(config_dir=temp_dir) + assert params.get_network_service_names() == [ + n.upper() for n, _ in test_values + ] + + +def test_7222(): + "7222 - test tnsnames.ora with easy connect and connect descriptors" + network_service_name1 = "nsn_7222_1" + connect_string1 = """ + (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=host_7220)(PORT=7222)) + (CONNECT_DATA=(SERVICE_NAME=service_7222_1)))""" + + network_service_name2 = "nsn_7222_2" + connect_string2 = "tcp://host_7222:7222/service_7222_2" + + with tempfile.TemporaryDirectory() as temp_dir: + file_name = os.path.join(temp_dir, "tnsnames.ora") + with open(file_name, "w") as f: + f.write(f"{network_service_name1} = {connect_string1}\n") + f.write(f"{network_service_name2} = {connect_string2}\n") + params = oracledb.ConnectParams(config_dir=temp_dir) + assert params.get_network_service_names() == [ + network_service_name1.upper(), + network_service_name2.upper(), + ] diff --git a/tests/test_7300_unsupported_features_thin.py b/tests/test_7300_unsupported_features_thin.py index d7c4b6bd..439bf70f 100644 --- a/tests/test_7300_unsupported_features_thin.py +++ b/tests/test_7300_unsupported_features_thin.py @@ -27,45 +27,46 @@ """ import oracledb -import test_env +import pytest -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseTestCase): +@pytest.fixture(autouse=True) +def module_checks(skip_unless_thin_mode): + pass - def test_7300(self): - "7300 - test getting and setting thick attributes" - pool = test_env.get_pool() - with self.assertRaisesFullCode("DPY-3001"): - pool.soda_metadata_cache - with self.assertRaisesFullCode("DPY-3001"): - pool.soda_metadata_cache = True - with self.assertRaisesFullCode("DPY-3001"): - pool.max_sessions_per_shard - with self.assertRaisesFullCode("DPY-3001"): - pool.max_sessions_per_shard = 2 - def test_7302(self): - "7302 - test connection with sharding and supersharding keys" - with self.assertRaisesFullCode("DPY-3001"): - test_env.get_connection(shardingkey=[27]) - with self.assertRaisesFullCode("DPY-3001"): - test_env.get_connection(supershardingkey=[17, 23]) +def test_7300(test_env): + "7300 - test getting and setting thick attributes" + pool = test_env.get_pool() + with test_env.assert_raises_full_code("DPY-3001"): + pool.soda_metadata_cache + with test_env.assert_raises_full_code("DPY-3001"): + pool.soda_metadata_cache = True + with test_env.assert_raises_full_code("DPY-3001"): + pool.max_sessions_per_shard + with test_env.assert_raises_full_code("DPY-3001"): + pool.max_sessions_per_shard = 2 - def test_7303(self): - "7303 - test connect() without a connect string (bequeath)" - with self.assertRaisesFullCode("DPY-3001"): - oracledb.connect( - user=test_env.get_main_user(), - password=test_env.get_main_password(), - ) - def test_7304(self): - "7304 - test acquire() from a pool with a session tag" - pool = test_env.get_pool() - with self.assertRaisesFullCode("DPY-3001"): - pool.acquire(tag="unimportant") +def test_7302(test_env): + "7302 - test connection with sharding and supersharding keys" + with test_env.assert_raises_full_code("DPY-3001"): + test_env.get_connection(shardingkey=[27]) + with test_env.assert_raises_full_code("DPY-3001"): + test_env.get_connection(supershardingkey=[17, 23]) -if __name__ == "__main__": - test_env.run_test_cases() +def test_7303(test_env): + "7303 - test connect() without a connect string (bequeath)" + with test_env.assert_raises_full_code("DPY-3001"): + oracledb.connect( + user=test_env.main_user, + password=test_env.main_password, + ) + + +def test_7304(test_env): + "7304 - test acquire() from a pool with a session tag" + pool = test_env.get_pool() + with test_env.assert_raises_full_code("DPY-3001"): + pool.acquire(tag="unimportant") diff --git a/tests/test_7400_tpc_async.py b/tests/test_7400_tpc_async.py index d2b50c70..dc4933ef 100644 --- a/tests/test_7400_tpc_async.py +++ b/tests/test_7400_tpc_async.py @@ -27,338 +27,345 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_7400(self): - "7400 - test begin, prepare, roll back global transaction" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3900, b"txn3900", b"branchId") - await self.conn.tpc_begin(xid) - self.assertEqual(await self.conn.tpc_prepare(), False) - await self.conn.tpc_begin(xid) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.assertEqual(await self.conn.tpc_prepare(), True) - await self.conn.tpc_rollback() - await self.cursor.execute("select count(*) from TestTempTable") - (count,) = await self.cursor.fetchone() - self.assertEqual(count, 0) - - async def test_7401(self): - "7401 - test begin, prepare, commit global transaction" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3901, "txn3901", "branchId") - await self.conn.tpc_begin(xid) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - self.assertEqual(await self.conn.tpc_prepare(), True) - await self.conn.tpc_commit() - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await self.cursor.fetchall(), [(1, "tesName")]) - - async def test_7402(self): - "7402 - test multiple global transactions on the same connection" - await self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3902, "txn3902", "branch1") - xid2 = self.conn.xid(3902, b"txn3902", b"branch2") - await self.conn.tpc_begin(xid1) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'tesName') - """ - ) - await self.conn.tpc_end() - await self.conn.tpc_begin(xid2) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (2, 'tesName') - """ - ) - await self.conn.tpc_end() - needs_commit1 = await self.conn.tpc_prepare(xid1) - needs_commit2 = await self.conn.tpc_prepare(xid2) - if needs_commit1: - await self.conn.tpc_commit(xid1) - if needs_commit2: - await self.conn.tpc_commit(xid2) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - expected_rows = [(1, "tesName"), (2, "tesName")] - self.assertEqual(await self.cursor.fetchall(), expected_rows) - - async def test_7403(self): - "7403 - test rollback with parameter xid" - await self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3901, b"txn3901", b"branch1") - xid2 = self.conn.xid(3902, "txn3902", "branch2") - for count, xid in enumerate([xid1, xid2]): - await self.conn.tpc_begin(xid) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:id, 'tesName') - """, - id=count, - ) - await self.conn.tpc_end() - await self.conn.tpc_rollback(xid1) - - with self.assertRaisesFullCode("ORA-24756"): - await self.conn.tpc_prepare(xid1) - needs_commit = await self.conn.tpc_prepare(xid2) - if needs_commit: - await self.conn.tpc_commit(xid2) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - self.assertEqual(await self.cursor.fetchall(), [(1, "tesName")]) - - async def test_7404(self): - "7404 - test resuming a transaction" - await self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(3939, "txn3939", "branch39") - xid2 = self.conn.xid(3940, "txn3940", "branch40") - values = [[xid1, (1, "User Info")], [xid2, (2, "Other User Info")]] - for xid, data in values: - await self.conn.tpc_begin(xid) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data, - ) - await self.conn.tpc_end() - for xid, data in values: - await self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - (res,) = await self.cursor.fetchall() - self.assertEqual(res, data) - await self.conn.tpc_rollback(xid) - - async def test_7405(self): - "7405 - test promoting a local transaction to a tpc transaction" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(3941, "txn3941", "branch41") - values = (1, "String 1") - await self.cursor.execute( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - values, - ) - with self.assertRaisesFullCode("ORA-24776"): - await self.conn.tpc_begin(xid) - await self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) - await self.cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - (res,) = await self.cursor.fetchall() - self.assertEqual(res, values) - await self.conn.tpc_rollback(xid) - - async def test_7406(self): - "7406 - test ending a transaction with parameter xid" - await self.cursor.execute("truncate table TestTempTable") - xid1 = self.conn.xid(7406, "txn7406a", "branch3") - xid2 = self.conn.xid(7406, b"txn7406b", b"branch4") - await self.conn.tpc_begin(xid1) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test7406a') - """ - ) - await self.conn.tpc_begin(xid2) - with self.assertRaisesFullCode("ORA-24758"): - await self.conn.tpc_end(xid1) - await self.cursor.execute( +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_7400(async_conn, async_cursor): + "7400 - test begin, prepare, roll back global transaction" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(3900, b"txn3900", b"branchId") + await async_conn.tpc_begin(xid) + assert not await async_conn.tpc_prepare() + await async_conn.tpc_begin(xid) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert await async_conn.tpc_prepare() + await async_conn.tpc_rollback() + await async_cursor.execute("select count(*) from TestTempTable") + (count,) = await async_cursor.fetchone() + assert count == 0 + + +async def test_7401(async_conn, async_cursor): + "7401 - test begin, prepare, commit global transaction" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(3901, "txn3901", "branchId") + await async_conn.tpc_begin(xid) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + assert await async_conn.tpc_prepare() + await async_conn.tpc_commit() + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await async_cursor.fetchall() == [(1, "tesName")] + + +async def test_7402(async_conn, async_cursor): + "7402 - test multiple global transactions on the same connection" + await async_cursor.execute("truncate table TestTempTable") + xid1 = async_conn.xid(3902, "txn3902", "branch1") + xid2 = async_conn.xid(3902, b"txn3902", b"branch2") + await async_conn.tpc_begin(xid1) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'tesName') + """ + ) + await async_conn.tpc_end() + await async_conn.tpc_begin(xid2) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'tesName') + """ + ) + await async_conn.tpc_end() + needs_commit1 = await async_conn.tpc_prepare(xid1) + needs_commit2 = await async_conn.tpc_prepare(xid2) + if needs_commit1: + await async_conn.tpc_commit(xid1) + if needs_commit2: + await async_conn.tpc_commit(xid2) + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + expected_rows = [(1, "tesName"), (2, "tesName")] + assert await async_cursor.fetchall() == expected_rows + + +async def test_7403(async_conn, async_cursor, test_env): + "7403 - test rollback with parameter xid" + await async_cursor.execute("truncate table TestTempTable") + xid1 = async_conn.xid(3901, b"txn3901", b"branch1") + xid2 = async_conn.xid(3902, "txn3902", "branch2") + for count, xid in enumerate([xid1, xid2]): + await async_conn.tpc_begin(xid) + await async_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (2, 'test7406b') - """ + values (:id, 'tesName') + """, + id=count, ) - await self.conn.tpc_end(xid2) - with self.assertRaisesFullCode("ORA-25352"): - await self.conn.tpc_end(xid1) - await self.conn.tpc_rollback(xid1) - await self.conn.tpc_rollback(xid2) - - async def test_7407(self): - "7407 - test tpc_recover()" - await self.cursor.execute("truncate table TestTempTable") - n_xids = 10 - for i in range(n_xids): - xid = self.conn.xid(7407 + i, f"txn7407{i}", f"branch{i}") - await self.conn.tpc_begin(xid) - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, 'test7407') - """, - [i + 1], - ) - await self.conn.tpc_prepare(xid) - recovers = await self.conn.tpc_recover() - self.assertEqual(len(recovers), n_xids) - - await self.cursor.execute("select * from DBA_PENDING_TRANSACTIONS") - self.assertEqual(await self.cursor.fetchall(), recovers) - - for xid in recovers: - if xid.format_id % 2 == 0: - await self.conn.tpc_commit(xid) - recovers = await self.conn.tpc_recover() - self.assertEqual(len(recovers), n_xids // 2) - - for xid in recovers: - await self.conn.tpc_rollback(xid) - recovers = await self.conn.tpc_recover() - self.assertEqual(len(recovers), 0) - - async def test_7408(self): - "7408 - test tpc_recover() with read-only transaction" - await self.cursor.execute("truncate table TestTempTable") - for i in range(4): - xid = self.conn.xid(7408 + i, f"txn7408{i}", f"branch{i}") - await self.conn.tpc_begin(xid) - await self.cursor.execute("select * from TestTempTable") - await self.conn.tpc_prepare(xid) - recovers = await self.conn.tpc_recover() - self.assertEqual(len(recovers), 0) - - async def test_7409(self): - "7409 - test tpc_commit() with one_phase parameter" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7409, "txn7409", "branch1") - await self.conn.tpc_begin(xid) - values = (1, "test7409") - await self.cursor.execute( + await async_conn.tpc_end() + await async_conn.tpc_rollback(xid1) + + with test_env.assert_raises_full_code("ORA-24756"): + await async_conn.tpc_prepare(xid1) + needs_commit = await async_conn.tpc_prepare(xid2) + if needs_commit: + await async_conn.tpc_commit(xid2) + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert await async_cursor.fetchall() == [(1, "tesName")] + + +async def test_7404(async_conn, async_cursor): + "7404 - test resuming a transaction" + await async_cursor.execute("truncate table TestTempTable") + xid1 = async_conn.xid(3939, "txn3939", "branch39") + xid2 = async_conn.xid(3940, "txn3940", "branch40") + values = [[xid1, (1, "User Info")], [xid2, (2, "Other User Info")]] + for xid, data in values: + await async_conn.tpc_begin(xid) + await async_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - values, + data, ) - await self.cursor.execute( + await async_conn.tpc_end() + for xid, data in values: + await async_conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) + await async_cursor.execute( "select IntCol, StringCol1 from TestTempTable" ) - await self.conn.tpc_commit(xid, one_phase=True) - self.assertEqual(await self.cursor.fetchall(), [values]) - - async def test_7410(self): - "7410 - test negative cases for tpc_commit()" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7410, "txn7410", "branch1") - await self.conn.tpc_begin(xid) - await self.cursor.execute( + (res,) = await async_cursor.fetchall() + assert res == data + await async_conn.tpc_rollback(xid) + + +async def test_7405(async_conn, async_cursor, test_env): + "7405 - test promoting a local transaction to a tpc transaction" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(3941, "txn3941", "branch41") + values = (1, "String 1") + await async_cursor.execute( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + values, + ) + with test_env.assert_raises_full_code("ORA-24776"): + await async_conn.tpc_begin(xid) + await async_conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + (res,) = await async_cursor.fetchall() + assert res == values + await async_conn.tpc_rollback(xid) + + +async def test_7406(async_conn, async_cursor, test_env): + "7406 - test ending a transaction with parameter xid" + await async_cursor.execute("truncate table TestTempTable") + xid1 = async_conn.xid(7406, "txn7406a", "branch3") + xid2 = async_conn.xid(7406, b"txn7406b", b"branch4") + await async_conn.tpc_begin(xid1) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test7406a') + """ + ) + await async_conn.tpc_begin(xid2) + with test_env.assert_raises_full_code("ORA-24758"): + await async_conn.tpc_end(xid1) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'test7406b') + """ + ) + await async_conn.tpc_end(xid2) + with test_env.assert_raises_full_code("ORA-25352"): + await async_conn.tpc_end(xid1) + await async_conn.tpc_rollback(xid1) + await async_conn.tpc_rollback(xid2) + + +async def test_7407(async_conn, async_cursor): + "7407 - test tpc_recover()" + await async_cursor.execute("truncate table TestTempTable") + n_xids = 10 + for i in range(n_xids): + xid = async_conn.xid(7407 + i, f"txn7407{i}", f"branch{i}") + await async_conn.tpc_begin(xid) + await async_cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) - values (1, 'test7410') - """ - ) - with self.assertRaises(TypeError): - await self.conn.tpc_commit("invalid xid") - await self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-02053"): - await self.conn.tpc_commit(xid, one_phase=True) - with self.assertRaisesFullCode("ORA-24756"): - await self.conn.tpc_commit(xid) - - async def test_7411(self): - "7411 - test starting an already created transaction" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7411, "txn7411", "branch1") - await self.conn.tpc_begin(xid) - with self.assertRaisesFullCode("ORA-24757"): - await self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_NEW) - with self.assertRaisesFullCode("ORA-24797"): - await self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) - await self.conn.tpc_end() - for flag in [oracledb.TPC_BEGIN_NEW, oracledb.TPC_BEGIN_PROMOTE]: - with self.assertRaisesFullCode("ORA-24757"): - await self.conn.tpc_begin(xid, flag) - await self.conn.tpc_rollback(xid) - - async def test_7412(self): - "7412 - test resuming a prepared transaction" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7412, "txn7412", "branch1") - await self.conn.tpc_begin(xid) - await self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-24756"): - await self.conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) - - async def test_7413(self): - "7413 - test tpc_begin and tpc_end with invalid parameters" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7413, "txn7413", "branch1") - test_values = [ - (self.conn.tpc_begin, "DPY-2050"), - (self.conn.tpc_end, "DPY-2051"), - ] - for tpc_function, error_code in test_values: - with self.assertRaises(TypeError): - await tpc_function("invalid xid") - with self.assertRaisesFullCode(error_code): - await tpc_function(xid, "invalid flag") - with self.assertRaisesFullCode(error_code): - await tpc_function(xid, 70) - - async def test_7414(self): - "7414 - test commiting transaction without tpc_commit" - xid = self.conn.xid(7414, "txn7409", "branch1") - await self.conn.tpc_begin(xid) - with self.assertRaisesFullCode("ORA-02089"): - await self.cursor.execute("truncate table TestTempTable") - - async def test_7415(self): - "7415 - test tpc_commit when a commit is not needed" - xid = self.conn.xid(7416, "txn7416", "branch1") - await self.conn.tpc_begin(xid) - await self.cursor.execute("select * from TestTempTable") - await self.conn.tpc_end(xid) - await self.conn.tpc_prepare(xid) - with self.assertRaisesFullCode("ORA-24756"): - await self.conn.tpc_commit(xid) - - async def test_7416(self): - "7416 - test transaction_in_progress" - await self.cursor.execute("truncate table TestTempTable") - xid = self.conn.xid(7415, "txn7415", "branch1") - self.assertFalse(self.conn.transaction_in_progress) - - await self.conn.tpc_begin(xid) - self.assertTrue(self.conn.transaction_in_progress) - await self.cursor.execute( - "insert into TestTempTable (IntCol) values (2)" + values (:1, 'test7407') + """, + [i + 1], ) + await async_conn.tpc_prepare(xid) + recovers = await async_conn.tpc_recover() + assert len(recovers) == n_xids + + await async_cursor.execute("select * from DBA_PENDING_TRANSACTIONS") + assert await async_cursor.fetchall() == recovers + + for xid in recovers: + if xid.format_id % 2 == 0: + await async_conn.tpc_commit(xid) + recovers = await async_conn.tpc_recover() + assert len(recovers) == n_xids // 2 + + for xid in recovers: + await async_conn.tpc_rollback(xid) + recovers = await async_conn.tpc_recover() + assert len(recovers) == 0 + + +async def test_7408(async_conn, async_cursor): + "7408 - test tpc_recover() with read-only transaction" + await async_cursor.execute("truncate table TestTempTable") + for i in range(4): + xid = async_conn.xid(7408 + i, f"txn7408{i}", f"branch{i}") + await async_conn.tpc_begin(xid) + await async_cursor.execute("select * from TestTempTable") + await async_conn.tpc_prepare(xid) + recovers = await async_conn.tpc_recover() + assert len(recovers) == 0 + + +async def test_7409(async_conn, async_cursor): + "7409 - test tpc_commit() with one_phase parameter" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7409, "txn7409", "branch1") + await async_conn.tpc_begin(xid) + values = (1, "test7409") + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + values, + ) + await async_cursor.execute("select IntCol, StringCol1 from TestTempTable") + await async_conn.tpc_commit(xid, one_phase=True) + assert await async_cursor.fetchall() == [values] + + +async def test_7410(async_conn, async_cursor, test_env): + "7410 - test negative cases for tpc_commit()" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7410, "txn7410", "branch1") + await async_conn.tpc_begin(xid) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test7410') + """ + ) + with pytest.raises(TypeError): + await async_conn.tpc_commit("invalid xid") + await async_conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-02053"): + await async_conn.tpc_commit(xid, one_phase=True) + with test_env.assert_raises_full_code("ORA-24756"): + await async_conn.tpc_commit(xid) + + +async def test_7411(async_conn, async_cursor, test_env): + "7411 - test starting an already created transaction" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7411, "txn7411", "branch1") + await async_conn.tpc_begin(xid) + with test_env.assert_raises_full_code("ORA-24757"): + await async_conn.tpc_begin(xid, oracledb.TPC_BEGIN_NEW) + with test_env.assert_raises_full_code("ORA-24797"): + await async_conn.tpc_begin(xid, oracledb.TPC_BEGIN_PROMOTE) + await async_conn.tpc_end() + for flag in [oracledb.TPC_BEGIN_NEW, oracledb.TPC_BEGIN_PROMOTE]: + with test_env.assert_raises_full_code("ORA-24757"): + await async_conn.tpc_begin(xid, flag) + await async_conn.tpc_rollback(xid) + + +async def test_7412(async_conn, async_cursor, test_env): + "7412 - test resuming a prepared transaction" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7412, "txn7412", "branch1") + await async_conn.tpc_begin(xid) + await async_conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-24756"): + await async_conn.tpc_begin(xid, oracledb.TPC_BEGIN_RESUME) + + +async def test_7413(async_conn, async_cursor, test_env): + "7413 - test tpc_begin and tpc_end with invalid parameters" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7413, "txn7413", "branch1") + test_values = [ + (async_conn.tpc_begin, "DPY-2050"), + (async_conn.tpc_end, "DPY-2051"), + ] + for tpc_function, error_code in test_values: + with pytest.raises(TypeError): + await tpc_function("invalid xid") + with test_env.assert_raises_full_code(error_code): + await tpc_function(xid, "invalid flag") + with test_env.assert_raises_full_code(error_code): + await tpc_function(xid, 70) + + +async def test_7414(async_conn, async_cursor, test_env): + "7414 - test commiting transaction without tpc_commit" + xid = async_conn.xid(7414, "txn7409", "branch1") + await async_conn.tpc_begin(xid) + with test_env.assert_raises_full_code("ORA-02089"): + await async_cursor.execute("truncate table TestTempTable") + + +async def test_7415(async_conn, async_cursor, test_env): + "7415 - test tpc_commit when a commit is not needed" + xid = async_conn.xid(7416, "txn7416", "branch1") + await async_conn.tpc_begin(xid) + await async_cursor.execute("select * from TestTempTable") + await async_conn.tpc_end(xid) + await async_conn.tpc_prepare(xid) + with test_env.assert_raises_full_code("ORA-24756"): + await async_conn.tpc_commit(xid) + - await self.conn.tpc_end(xid) - self.assertFalse(self.conn.transaction_in_progress) +async def test_7416(async_conn, async_cursor): + "7416 - test transaction_in_progress" + await async_cursor.execute("truncate table TestTempTable") + xid = async_conn.xid(7415, "txn7415", "branch1") + assert not async_conn.transaction_in_progress - await self.conn.tpc_prepare(xid) - self.assertFalse(self.conn.transaction_in_progress) + await async_conn.tpc_begin(xid) + assert async_conn.transaction_in_progress + await async_cursor.execute("insert into TestTempTable (IntCol) values (2)") - await self.conn.tpc_commit(xid) - self.assertFalse(self.conn.transaction_in_progress) + await async_conn.tpc_end(xid) + assert not async_conn.transaction_in_progress + await async_conn.tpc_prepare(xid) + assert not async_conn.transaction_in_progress -if __name__ == "__main__": - test_env.run_test_cases() + await async_conn.tpc_commit(xid) + assert not async_conn.transaction_in_progress diff --git a/tests/test_7500_binary_vector.py b/tests/test_7500_binary_vector.py index 625a093d..04905618 100644 --- a/tests/test_7500_binary_vector.py +++ b/tests/test_7500_binary_vector.py @@ -30,80 +30,74 @@ import array import oracledb -import test_env +import pytest -@test_env.skip_unless_binary_vectors_supported() -class TestCase(test_env.BaseTestCase): +@pytest.fixture(autouse=True) +def module_checks(skip_unless_binary_vectors_supported): + pass - def test_7500(self): - "7500 - test binding and fetching a BINARY format vector." - value = array.array("B", [4, 8, 12, 4, 98, 127, 25, 78]) - self.cursor.execute("delete from TestBinaryVectors") - self.cursor.execute( - """ - insert into TestBinaryVectors (IntCol, VectorBinaryCol) - values(1, :value) - """, - value=value, - ) - self.conn.commit() - self.cursor.execute("select VectorBinaryCol from TestBinaryVectors") - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, "B") - self.assertEqual(fetched_value, value) - def test_7501(self): - "7501 - verify fetch info contents" - attr_names = [ - "name", - "type_code", - "vector_dimensions", - "vector_format", - ] - expected_values = [ - ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None], - [ - "VECTORBINARYCOL", - oracledb.DB_TYPE_VECTOR, - 64, - oracledb.VECTOR_FORMAT_BINARY, - ], - ] - self.cursor.execute("select * from TestBinaryVectors") - values = [ - [getattr(i, n) for n in attr_names] - for i in self.cursor.description - ] - self.assertEqual(values, expected_values) - self.assertIs( - self.cursor.description[1].vector_format, - oracledb.VectorFormat.BINARY, - ) +def test_7500(conn, cursor): + "7500 - test binding and fetching a BINARY format vector." + value = array.array("B", [4, 8, 12, 4, 98, 127, 25, 78]) + cursor.execute("delete from TestBinaryVectors") + cursor.execute( + """ + insert into TestBinaryVectors (IntCol, VectorBinaryCol) + values(1, :value) + """, + value=value, + ) + conn.commit() + cursor.execute("select VectorBinaryCol from TestBinaryVectors") + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == "B" + assert fetched_value == value - def test_7502(self): - "7502 - test comparing BINARY vectors" - value = array.array("B", [20, 9, 15, 34, 108, 125, 35, 88]) - self.cursor.execute("delete from TestBinaryVectors") - self.cursor.execute( - """ - insert into TestBinaryVectors (IntCol, VectorBinaryCol) - values(1, :value) - """, - value=value, - ) - self.conn.commit() - self.cursor.execute( - """ - select vector_distance(VectorBinaryCol, :value) - from TestBinaryVectors - """, - value=value, - ) - (result,) = self.cursor.fetchone() - self.assertAlmostEqual(result, 0) +def test_7501(cursor): + "7501 - verify fetch info contents" + attr_names = [ + "name", + "type_code", + "vector_dimensions", + "vector_format", + ] + expected_values = [ + ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None], + [ + "VECTORBINARYCOL", + oracledb.DB_TYPE_VECTOR, + 64, + oracledb.VECTOR_FORMAT_BINARY, + ], + ] + cursor.execute("select * from TestBinaryVectors") + values = [[getattr(i, n) for n in attr_names] for i in cursor.description] + assert values == expected_values + assert cursor.description[1].vector_format is oracledb.VectorFormat.BINARY -if __name__ == "__main__": - test_env.run_test_cases() + +def test_7502(conn, cursor): + "7502 - test comparing BINARY vectors" + value = array.array("B", [20, 9, 15, 34, 108, 125, 35, 88]) + cursor.execute("delete from TestBinaryVectors") + cursor.execute( + """ + insert into TestBinaryVectors (IntCol, VectorBinaryCol) + values(1, :value) + """, + value=value, + ) + conn.commit() + cursor.execute( + """ + select vector_distance(VectorBinaryCol, :value) + from TestBinaryVectors + """, + value=value, + ) + (result,) = cursor.fetchone() + assert result == pytest.approx(0) diff --git a/tests/test_7600_pipelining_async.py b/tests/test_7600_pipelining_async.py index 40017749..ad84328c 100644 --- a/tests/test_7600_pipelining_async.py +++ b/tests/test_7600_pipelining_async.py @@ -30,49 +30,740 @@ import decimal import oracledb -import test_env +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_7600(async_conn): + "7600 - test execute() and fetchall()." + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute("insert into TestTempTable (IntCol) values (:1)", [1]) + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (:val)", dict(val=2) + ) + pipeline.add_commit() + pipeline.add_fetchall("select IntCol from TestTempTable order by IntCol") + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [(1,), (2,)] + + +async def test_7601(async_conn): + "7601 - test executemany()" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", [(2,), (3,)] + ) + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:data)", + [{"data": 4}, {"data": 5}], + ) + pipeline.add_commit() + pipeline.add_fetchall("select IntCol from TestTempTable order by IntCol") + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [(2,), (3,), (4,), (5,)] + + +async def test_7602(async_conn): + "7602 - test fetchall() with arraysize" + data = [(1,), (2,), (3,), (4,)] + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:value)", + [{"value": i} for i, in data], + ) + pipeline.add_commit() + arraysize = 1 + op = pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + arraysize=arraysize, + ) + assert op.arraysize == arraysize + arraysize = len(data) + op = pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + arraysize=arraysize, + ) + assert op.arraysize == arraysize + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == data + assert results[-2].rows == data + + +async def test_7603(async_conn): + "7603 - test fetchall() with rowfactory" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'test_7003') + """ + ) + pipeline.add_commit() + + def rowfactory(*row): + column_names = ["INTCOL", "STRINGCOL1"] + return dict(zip(column_names, row)) + + pipeline.add_fetchall( + "select IntCol, StringCol1 from TestTempTable", + rowfactory=rowfactory, + ) + results = await async_conn.run_pipeline(pipeline) + expected_value = [{"INTCOL": 1, "STRINGCOL1": "test_7003"}] + assert results[-1].rows == expected_value + + +async def test_7604(async_conn): + "7604 - test fetchone()" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", [(9,), (10,)] + ) + pipeline.add_commit() + pipeline.add_fetchone("select IntCol from TestTempTable order by IntCol") + pipeline.add_fetchone("select :1 from dual", [23]) + pipeline.add_fetchone("select :val from dual", {"val": 5}) + results = await async_conn.run_pipeline(pipeline) + assert results[-3].rows == [(9,)] + assert results[-2].rows == [(23,)] + assert results[-1].rows == [(5,)] + + +async def test_7605(async_conn): + "7605 - test fetchone() with rowfactory" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int, :str) + """, + [{"int": 3, "str": "Mac"}, {"int": 4, "str": "Doc"}], + ) + pipeline.add_commit() + + def rowfactory(*row): + column_names = ["INT", "STRING"] + return dict(zip(column_names, row)) + + op = pipeline.add_fetchone( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + rowfactory=rowfactory, + ) + assert op.rowfactory == rowfactory + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [{"INT": 3, "STRING": "Mac"}] + + +async def test_7606(async_conn): + "7606 - test fetchmany()" + data = [(i,) for i in range(10)] + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + pipeline.add_commit() + pipeline.add_fetchmany("select IntCol from TestTempTable order by IntCol") + pipeline.add_fetchmany("select :1 from dual", [1099]) + pipeline.add_fetchmany("select :val from dual", {"val": 366}) + results = await async_conn.run_pipeline(pipeline) + assert results[-3].rows == data + assert results[-2].rows == [(1099,)] + assert results[-1].rows == [(366,)] + + +async def test_7607(async_conn): + "7607 - test fetchmany() with num_rows" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + data = [(i,) for i in range(10)] + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + pipeline.add_commit() + num_rows = 7 + op = pipeline.add_fetchmany( + "select IntCol from TestTempTable order by IntCol", + num_rows=num_rows, + ) + assert op.num_rows == num_rows + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == data[:num_rows] + + +async def test_7608(async_conn): + "7608 - test fetchmany() with rowfactory and num_rows" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:int, :str) + """, + [{"int": 29, "str": "Feb"}, {"int": 4, "str": "Monday"}], + ) + pipeline.add_commit() + + def rowfactory(*row): + column_names = ["INT", "STRING"] + return dict(zip(column_names, row)) + + num_rows = 2 + op = pipeline.add_fetchmany( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + num_rows=num_rows, + rowfactory=rowfactory, + ) + assert op.num_rows == num_rows + assert op.rowfactory == rowfactory + expected_value = [ + {"INT": 4, "STRING": "Monday"}, + {"INT": 29, "STRING": "Feb"}, + ] + num_rows = 1 + op = pipeline.add_fetchmany( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + rowfactory=rowfactory, + num_rows=num_rows, + ) + assert op.num_rows == num_rows + assert op.rowfactory == rowfactory + results = await async_conn.run_pipeline(pipeline) + assert results[-2].rows == expected_value + assert results[-1].rows == [{"INT": 4, "STRING": "Monday"}] + + +async def test_7609(async_conn): + "7609 - test callfunc(), return_value and return_type" + pipeline = oracledb.create_pipeline() + pipeline.add_callfunc("func_Test", oracledb.DB_TYPE_NUMBER, ("Yes", 7)) + kwargs = {"a_String": "Keyword", "a_ExtraAmount": 12} + pipeline.add_callfunc( + "func_Test", oracledb.DB_TYPE_NUMBER, keyword_parameters=kwargs + ) + + # paramters and keyword parameters + kwargs = {"a_ExtraAmount": 25} + func_name = "func_Test" + op = pipeline.add_callfunc( + func_name, oracledb.DB_TYPE_NUMBER, ["Mixed"], kwargs + ) + assert op.name == func_name + assert op.return_type == oracledb.DB_TYPE_NUMBER + assert op.statement is None + results = await async_conn.run_pipeline(pipeline) + assert results[0].return_value == 10 + assert results[1].return_value == 19 + assert results[2].return_value == 30 + + +async def test_7610(async_conn, async_cursor): + "7610 - test callproc() with parameters" + pipeline = oracledb.create_pipeline() + var = async_cursor.var(oracledb.DB_TYPE_NUMBER) + proc_name = "proc_Test" + params = ("hi", 5, var) + op = pipeline.add_callproc(proc_name, params) + assert op.name == proc_name + assert op.parameters == params + assert op.keyword_parameters is None + assert op.statement is None + assert op.arraysize == 0 + assert op.num_rows == 0 + await async_conn.run_pipeline(pipeline) + assert var.getvalue() == 2 + + +async def test_7611(async_conn, async_cursor): + "7611 - test callproc() with keyword_parameters" + in_out_value = async_cursor.var(oracledb.DB_TYPE_NUMBER) + in_out_value.setvalue(0, 7) + out_value = async_cursor.var(oracledb.DB_TYPE_NUMBER) + params = [] + kwargs = dict( + a_InValue="Peace", a_InOutValue=in_out_value, a_OutValue=out_value + ) + pipeline = oracledb.create_pipeline() + op = pipeline.add_callproc("proc_Test", params, kwargs) + assert op.parameters == params + assert op.keyword_parameters == kwargs + await async_conn.run_pipeline(pipeline) + assert in_out_value.getvalue() == 35 + assert out_value.getvalue() == 5 + + +async def test_7612(async_conn, async_cursor): + "7612 - test callproc() with parameters and keyword_parameters" + in_out_value = async_cursor.var(oracledb.DB_TYPE_NUMBER) + in_out_value.setvalue(0, 8) + out_value = async_cursor.var(oracledb.DB_TYPE_NUMBER) + params = ["Input_7612"] + kwargs = dict(a_InOutValue=in_out_value, a_OutValue=out_value) + pipeline = oracledb.create_pipeline() + op = pipeline.add_callproc("proc_Test", params, kwargs) + assert op.parameters == params + assert op.keyword_parameters == kwargs + await async_conn.run_pipeline(pipeline) + assert in_out_value.getvalue() == 80 + assert out_value.getvalue() == 10 + + +async def test_7613(async_conn): + "7613 - test fetchmany() num_rows with 0 and negative values" + data = [(i,) for i in range(10)] + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", data + ) + sql = "select IntCol from TestTempTable" + op = pipeline.add_fetchmany(sql, num_rows=0) + assert op.statement == sql + with pytest.raises(OverflowError): + pipeline.add_fetchmany(sql, num_rows=-1) + with pytest.raises(OverflowError): + pipeline.add_fetchmany(sql, num_rows=-10) + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [] + + +async def test_7614(async_conn): + "7614 - test add_commit with transaction_in_progress" + assert not async_conn.transaction_in_progress + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (5)", + ) + await async_conn.run_pipeline(pipeline) + assert async_conn.transaction_in_progress + pipeline = oracledb.create_pipeline() + pipeline.add_commit() + await async_conn.run_pipeline(pipeline) + assert not async_conn.transaction_in_progress + + +async def test_7615(async_conn): + "7615 - test getting an error in the middle of pipeline" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute("insert into TestTempTable (IntCol) values (:1)", [5]) + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (9, 'too many values')" + ) + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline, continue_on_error=True) + expected_value = [(5,)] + assert results[-3].rows == expected_value + assert results[-2].error.full_code == "ORA-00913" + assert results[-1].rows == expected_value + + +async def test_7617(async_conn): + "7617 - test insert and update the inserted row" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute("insert into TestTempTable (IntCol) values (5)") + pipeline.add_execute("update TestTempTable set IntCol=25 where IntCol=5") + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [(25,)] + + +async def test_7618(async_conn): + "7618 - test insert and update inserted rows" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", + [(i,) for i in range(100)], + ) + pipeline.add_execute("update TestTempTable set StringCol1 = 'UPD'") + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol, StringCol1 from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [(i, "UPD") for i in range(100)] + + +async def test_7619(async_conn): + "7619 - test insert many rows twice" + values1 = [(i,) for i in range(100)] + values2 = [(i,) for i in range(200, 205)] + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + sql = "insert into TestTempTable (IntCol) values (:1)" + pipeline.add_executemany(sql, values1) + pipeline.add_executemany(sql, values2) + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == values1 + values2 + + +async def test_7620(async_conn): + "7620 - test insert and delete value" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", + [(i,) for i in range(100)], + ) + pipeline.add_execute("delete TestTempTable") + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [] + + +async def test_7621(async_conn): + "7621 - test PipelineOp op_type" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable (IntCol) values (:1)", + [(1,), (2,)], + ) + pipeline.add_commit() + sql = "select IntCol from TestTempTable" + pipeline.add_fetchone(sql) + pipeline.add_fetchall(sql) + pipeline.add_fetchmany(sql) + pipeline.add_callproc("proc_Test", ("hi", 5, 2)) + pipeline.add_callfunc("func_Test", oracledb.DB_TYPE_NUMBER, ("Yes", 7)) + results = await async_conn.run_pipeline(pipeline) + expected_values = [ + oracledb.PIPELINE_OP_TYPE_EXECUTE, + oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY, + oracledb.PIPELINE_OP_TYPE_COMMIT, + oracledb.PIPELINE_OP_TYPE_FETCH_ONE, + oracledb.PIPELINE_OP_TYPE_FETCH_ALL, + oracledb.PIPELINE_OP_TYPE_FETCH_MANY, + oracledb.PIPELINE_OP_TYPE_CALL_PROC, + oracledb.PIPELINE_OP_TYPE_CALL_FUNC, + ] + for result, expected_value in zip(results, expected_values): + assert result.operation.op_type == expected_value + + +async def test_7622(async_conn): + "7622 - test Pipeline, PipelineOp and PipelineOpResult repr()" + pipeline = oracledb.create_pipeline() + assert repr(pipeline) == "" + op = pipeline.add_commit() + assert repr(pipeline) == "" + assert repr(op) == "" + results = await async_conn.run_pipeline(pipeline) + assert ( + repr(results[0]) + == "" + ) + + +async def test_7623(async_conn): + "7623 - test getting an error at the beginning of a pipeline" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table NonExistentTable") + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute("insert into TestTempTable (IntCol) values (:1)", [5]) + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline, continue_on_error=True) + expected_value = [(5,)] + assert results[0].error.full_code == "ORA-00942" + assert results[-1].rows == expected_value + + +async def test_7624(async_conn): + "7624 - test getting an error at the end of pipeline" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute("insert into TestTempTable (IntCol) values (:1)", [5]) + pipeline.add_fetchall( + "select IntCol from TestTempTable order by IntCol", + ) + pipeline.add_execute("insert into TestTempTable (IntCol) values (:1)", [5]) + results = await async_conn.run_pipeline(pipeline, continue_on_error=True) + expected_value = [(5,)] + assert results[-2].rows == expected_value + assert results[-1].error.full_code == "ORA-00001" + + +async def test_7625(async_conn): + "7625 - test pipeline with clobs" + clob = await async_conn.createlob(oracledb.DB_TYPE_CLOB, "Temp CLOB") + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", ["CLOB"] + ) + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + ) + pipeline.add_fetchall( + "select CLOBCol from TestCLOBs order by IntCol", + ) + results = await async_conn.run_pipeline(pipeline) + rows = results[-1].rows + + assert [await lob.read() for lob, in rows] == ["CLOB", "Temp CLOB"] + + +async def test_7626(async_conn): + "7626 - test nested cursors" + sql = """ + select 'Level 1 String', + cursor( + select 'Level 2 String', + cursor( + select 'Level3 String' from dual + ) from dual + ) from dual + """ + pipeline = oracledb.create_pipeline() + pipeline.add_fetchone(sql) + pipeline.add_fetchone("select user from dual") + results = await async_conn.run_pipeline(pipeline) + rows = results[0].rows + + async def transform_row(r): + return tuple([await transform_fn(v) for v in r]) + + async def transform_fn(v): + if isinstance(v, oracledb.AsyncCursor): + return [await transform_row(r) async for r in v] + return v + + rows = [await transform_row(r) async for r in rows[0][1]] + assert rows == [("Level 2 String", [("Level3 String",)])] + + +async def test_7627(async_conn): + "7627 - test executemany with number of iterations" + num_iterations = 4 + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestAllTypes") + pipeline.add_executemany( + "insert into TestAllTypes (NumberValue) values (1)", num_iterations + ) + pipeline.add_commit() + pipeline.add_fetchall( + "select NumberValue from TestAllTypes order by NumberValue", + ) + results = await async_conn.run_pipeline(pipeline) + expected_value = [(1,) for _ in range(num_iterations)] + assert results[-1].rows == expected_value + + +async def test_7628(async_conn, async_cursor): + "7628 - test anonymous PL/SQL" + var = async_cursor.var(int) + pipeline = oracledb.create_pipeline() + sql = "begin :var := :value; end;" + pipeline.add_execute(sql, [var, 5]) + pipeline.add_execute(sql, [var, 10]) + pipeline.add_execute(sql, [var, 15]) + await async_conn.run_pipeline(pipeline) + assert var.getvalue() == 15 + + +async def test_7629(async_conn, async_cursor): + "7629 - test executemany() with PL/SQL" + values = [31, 6, 21, 17, 43] + out_bind = async_cursor.var(oracledb.DB_TYPE_NUMBER, arraysize=len(values)) + data = [(i, f"Test {i}", out_bind) for i in values] + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + """ + begin + insert into TestTempTable (IntCol, StringCol1) + values (:int_val, :str_val) + returning IntCol into :out_bind; + end; + """, + data, + ) + pipeline.add_commit() + await async_conn.run_pipeline(pipeline) + assert out_bind.values == values + + +async def test_7630(disable_fetch_lobs, async_conn): + "7630 - test fetch_lobs with add_fetchone()" + clob_value = "CLOB Data 7630" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", + [clob_value], + ) + pipeline.add_fetchone("select CLOBCol from TestCLOBs order by IntCol") + pipeline.add_fetchone( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) + res = await async_conn.run_pipeline(pipeline) + assert [res[-2].rows] == [[(clob_value,)]] + assert [res[-1].rows] == [[(clob_value,)]] + + +async def test_7631(async_conn): + "7631 - test pipeline with lobs > 32K" + blob_1_data = b"T" * 33000 + blob_2_data = b"B" * 33000 + blob = await async_conn.createlob(oracledb.DB_TYPE_BLOB, blob_1_data) + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestBLOBs") + pipeline.add_execute( + "insert into TestBLOBs (IntCol, BLOBCol) values (1, :1)", [blob] + ) + pipeline.add_execute( + "insert into TestBLOBs (IntCol, BLOBCol) values (2, :1)", + [blob_2_data], + ) + pipeline.add_fetchall( + "select BLOBCol from TestBLOBs order by IntCol", + ) + res = await async_conn.run_pipeline(pipeline) + expected_value = [blob_1_data, blob_2_data] + fetched_value = [await lob.read() for lob, in res[-1].rows] + assert fetched_value == expected_value + + +async def test_7632(async_conn): + "7632 - test ref cursor" + ref_cursor1 = async_conn.cursor() + ref_cursor2 = async_conn.cursor() + sql = """ + begin + open :pcursor for + select IntCol + from TestNumbers + order by IntCol; + end;""" + pipeline = oracledb.create_pipeline() + pipeline.add_execute(sql, [ref_cursor1]) + pipeline.add_execute(sql, [ref_cursor2]) + await async_conn.run_pipeline(pipeline) + assert await ref_cursor1.fetchall() == await ref_cursor2.fetchall() + + +async def test_7633(async_conn): + "7633 - test add_callproc() with ref cursor" + values = [(2, None, None, None), (3, None, None, None)] + ref_cursor = async_conn.cursor() + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_executemany( + "insert into TestTempTable values (:1, :2, :3, :4)", values + ) + pipeline.add_callproc("myrefcursorproc", [ref_cursor]) + await async_conn.run_pipeline(pipeline) + assert await ref_cursor.fetchall() == values + + +async def test_7634(async_conn): + "7634 - test empty pipeline" + pipeline = oracledb.create_pipeline() + results = await async_conn.run_pipeline(pipeline) + assert results == [] + + +async def test_7635(async_conn): + "7635 - test alter session" + sql = """ + select value FROM nls_session_parameters + WHERE parameter = 'NLS_DATE_FORMAT' + """ + (default_date_format,) = await async_conn.fetchone(sql) + date = datetime.datetime(2000, 12, 15, 7, 3) + pipeline = oracledb.create_pipeline() + pipeline.add_fetchone("select to_char(:1) from dual", [date]) + pipeline.add_execute( + "alter session set NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI'" + ) + pipeline.add_fetchone("select to_char(:1) from dual", [date]) + pipeline.add_execute( + f"alter session set NLS_DATE_FORMAT='{default_date_format}'" + ) + pipeline.add_fetchone("select to_char(:1) from dual", [date]) + pipeline.add_fetchone(sql) + results = await async_conn.run_pipeline(pipeline) + assert results[2].rows == [("2000-12-15 07:03",)] + assert results[4].rows == results[0].rows + assert results[-1].rows == [(default_date_format,)] + + +async def test_7636(async_conn): + "7636 - test connection inputtypehandler" + + def input_type_handler(cursor, value, num_elements): + if isinstance(value, str): + return cursor.var( + oracledb.DB_TYPE_NUMBER, + arraysize=num_elements, + inconverter=lambda x: int(x), + ) + async_conn.inputtypehandler = input_type_handler + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + "insert into TestTempTable (IntCol) values (:1)", ["12"] + ) + pipeline.add_commit() + pipeline.add_fetchall("select IntCol from TestTempTable") + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == [(12,)] -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_7600(self): - "7600 - test execute() and fetchall()." +async def test_7637(async_conn, test_env): + "7637 - test fetch_decimals with add_fetchone()" + value = 7637 + with test_env.defaults_context_manager("fetch_decimals", True): pipeline = oracledb.create_pipeline() pipeline.add_execute("truncate table TestTempTable") pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [1] - ) - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:val)", dict(val=2) + "insert into TestTempTable (IntCol) values (:1)", [value] ) - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol" + pipeline.add_fetchone("select IntCol from TestTempTable") + pipeline.add_fetchone( + "select IntCol from TestTempTable", fetch_decimals=False ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [(1,), (2,)]) + res = await async_conn.run_pipeline(pipeline) + assert isinstance(res[-2].rows[0][0], decimal.Decimal) + assert isinstance(res[-1].rows[0][0], int) - async def test_7601(self): - "7601 - test executemany()" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", [(2,), (3,)] - ) - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:data)", - [{"data": 4}, {"data": 5}], - ) - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol" - ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [(2,), (3,), (4,), (5,)]) - async def test_7602(self): - "7602 - test fetchall() with arraysize" +async def test_7638(async_conn, test_env): + "7638 - test oracledb.defaults.arraysize" + arraysize = 1 + with test_env.defaults_context_manager("arraysize", arraysize): data = [(1,), (2,), (3,), (4,)] pipeline = oracledb.create_pipeline() pipeline.add_execute("truncate table TestTempTable") @@ -81,932 +772,240 @@ async def test_7602(self): [{"value": i} for i, in data], ) pipeline.add_commit() - arraysize = 1 op = pipeline.add_fetchall( "select IntCol from TestTempTable order by IntCol", - arraysize=arraysize, ) - self.assertEqual(op.arraysize, arraysize) - arraysize = len(data) + assert op.arraysize == arraysize + new_arraysize = 4 + oracledb.defaults.arraysize = new_arraysize op = pipeline.add_fetchall( "select IntCol from TestTempTable order by IntCol", - arraysize=arraysize, - ) - self.assertEqual(op.arraysize, arraysize) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, data) - self.assertEqual(results[-2].rows, data) - - async def test_7603(self): - "7603 - test fetchall() with rowfactory" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'test_7003') - """ - ) - pipeline.add_commit() - - def rowfactory(*row): - column_names = ["INTCOL", "STRINGCOL1"] - return dict(zip(column_names, row)) - - pipeline.add_fetchall( - "select IntCol, StringCol1 from TestTempTable", - rowfactory=rowfactory, - ) - results = await self.conn.run_pipeline(pipeline) - expected_value = [{"INTCOL": 1, "STRINGCOL1": "test_7003"}] - self.assertEqual(results[-1].rows, expected_value) - - async def test_7604(self): - "7604 - test fetchone()" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", [(9,), (10,)] - ) - pipeline.add_commit() - pipeline.add_fetchone( - "select IntCol from TestTempTable order by IntCol" - ) - pipeline.add_fetchone("select :1 from dual", [23]) - pipeline.add_fetchone("select :val from dual", {"val": 5}) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-3].rows, [(9,)]) - self.assertEqual(results[-2].rows, [(23,)]) - self.assertEqual(results[-1].rows, [(5,)]) - - async def test_7605(self): - "7605 - test fetchone() with rowfactory" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int, :str) - """, - [{"int": 3, "str": "Mac"}, {"int": 4, "str": "Doc"}], ) - pipeline.add_commit() + assert op.arraysize == new_arraysize + results = await async_conn.run_pipeline(pipeline) + assert results[-1].rows == data + assert results[-2].rows == data - def rowfactory(*row): - column_names = ["INT", "STRING"] - return dict(zip(column_names, row)) - op = pipeline.add_fetchone( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - rowfactory=rowfactory, - ) - self.assertEqual(op.rowfactory, rowfactory) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [{"INT": 3, "STRING": "Mac"}]) +async def test_7639(test_env): + "7639 - test autocommit" + conn1 = await test_env.get_connection_async() + conn1.autocommit = True + conn2 = await test_env.get_connection_async() - async def test_7606(self): - "7606 - test fetchmany()" - data = [(i,) for i in range(10)] - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - pipeline.add_commit() - pipeline.add_fetchmany( - "select IntCol from TestTempTable order by IntCol" - ) - pipeline.add_fetchmany("select :1 from dual", [1099]) - pipeline.add_fetchmany("select :val from dual", {"val": 366}) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-3].rows, data) - self.assertEqual(results[-2].rows, [(1099,)]) - self.assertEqual(results[-1].rows, [(366,)]) - - async def test_7607(self): - "7607 - test fetchmany() with num_rows" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - data = [(i,) for i in range(10)] - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - pipeline.add_commit() - num_rows = 7 - op = pipeline.add_fetchmany( - "select IntCol from TestTempTable order by IntCol", - num_rows=num_rows, - ) - self.assertEqual(op.num_rows, num_rows) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, data[:num_rows]) + pipeline1 = oracledb.create_pipeline() + pipeline1.add_execute("truncate table TestTempTable") + pipeline1.add_execute("insert into TestTempTable (IntCol) values (1)") - async def test_7608(self): - "7608 - test fetchmany() with rowfactory and num_rows" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:int, :str) - """, - [{"int": 29, "str": "Feb"}, {"int": 4, "str": "Monday"}], - ) - pipeline.add_commit() + pipeline2 = oracledb.create_pipeline() + pipeline2.add_execute("insert into TestTempTable (IntCol) values (2)") + pipeline2.add_commit() + pipeline2.add_fetchall("select IntCol from TestTempTable order by IntCol") - def rowfactory(*row): - column_names = ["INT", "STRING"] - return dict(zip(column_names, row)) + await conn1.run_pipeline(pipeline1) + results = await conn2.run_pipeline(pipeline2) + assert results[-1].rows == [(1,), (2,)] - num_rows = 2 - op = pipeline.add_fetchmany( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - num_rows=num_rows, - rowfactory=rowfactory, - ) - self.assertEqual(op.num_rows, num_rows) - self.assertEqual(op.rowfactory, rowfactory) - expected_value = [ - {"INT": 4, "STRING": "Monday"}, - {"INT": 29, "STRING": "Feb"}, - ] - num_rows = 1 - op = pipeline.add_fetchmany( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - rowfactory=rowfactory, - num_rows=num_rows, - ) - self.assertEqual(op.num_rows, num_rows) - self.assertEqual(op.rowfactory, rowfactory) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-2].rows, expected_value) - self.assertEqual(results[-1].rows, [{"INT": 4, "STRING": "Monday"}]) - - async def test_7609(self): - "7609 - test callfunc(), return_value and return_type" - pipeline = oracledb.create_pipeline() - pipeline.add_callfunc("func_Test", oracledb.DB_TYPE_NUMBER, ("Yes", 7)) - kwargs = {"a_String": "Keyword", "a_ExtraAmount": 12} - pipeline.add_callfunc( - "func_Test", oracledb.DB_TYPE_NUMBER, keyword_parameters=kwargs - ) - # paramters and keyword parameters - kwargs = {"a_ExtraAmount": 25} - func_name = "func_Test" - op = pipeline.add_callfunc( - func_name, oracledb.DB_TYPE_NUMBER, ["Mixed"], kwargs - ) - self.assertEqual(op.name, func_name) - self.assertEqual(op.return_type, oracledb.DB_TYPE_NUMBER) - self.assertIsNone(op.statement) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[0].return_value, 10) - self.assertEqual(results[1].return_value, 19) - self.assertEqual(results[2].return_value, 30) - - async def test_7610(self): - "7610 - test callproc() with parameters" - pipeline = oracledb.create_pipeline() - var = self.cursor.var(oracledb.DB_TYPE_NUMBER) - proc_name = "proc_Test" - params = ("hi", 5, var) - op = pipeline.add_callproc(proc_name, params) - self.assertEqual(op.name, proc_name) - self.assertEqual(op.parameters, params) - self.assertIsNone(op.keyword_parameters) - self.assertIsNone(op.statement) - self.assertEqual(op.arraysize, 0) - self.assertEqual(op.num_rows, 0) - await self.conn.run_pipeline(pipeline) - self.assertEqual(var.getvalue(), 2) - - async def test_7611(self): - "7611 - test callproc() with keyword_parameters" - in_out_value = self.cursor.var(oracledb.DB_TYPE_NUMBER) - in_out_value.setvalue(0, 7) - out_value = self.cursor.var(oracledb.DB_TYPE_NUMBER) - params = [] - kwargs = dict( - a_InValue="Peace", a_InOutValue=in_out_value, a_OutValue=out_value - ) - pipeline = oracledb.create_pipeline() - op = pipeline.add_callproc("proc_Test", params, kwargs) - self.assertEqual(op.parameters, params) - self.assertEqual(op.keyword_parameters, kwargs) - await self.conn.run_pipeline(pipeline) - self.assertEqual(in_out_value.getvalue(), 35) - self.assertEqual(out_value.getvalue(), 5) - - async def test_7612(self): - "7612 - test callproc() with parameters and keyword_parameters" - in_out_value = self.cursor.var(oracledb.DB_TYPE_NUMBER) - in_out_value.setvalue(0, 8) - out_value = self.cursor.var(oracledb.DB_TYPE_NUMBER) - params = ["Input_7612"] - kwargs = dict(a_InOutValue=in_out_value, a_OutValue=out_value) - pipeline = oracledb.create_pipeline() - op = pipeline.add_callproc("proc_Test", params, kwargs) - self.assertEqual(op.parameters, params) - self.assertEqual(op.keyword_parameters, kwargs) - await self.conn.run_pipeline(pipeline) - self.assertEqual(in_out_value.getvalue(), 80) - self.assertEqual(out_value.getvalue(), 10) - - async def test_7613(self): - "7613 - test fetchmany() num_rows with 0 and negative values" - data = [(i,) for i in range(10)] - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", data - ) - sql = "select IntCol from TestTempTable" - op = pipeline.add_fetchmany(sql, num_rows=0) - self.assertEqual(op.statement, sql) - with self.assertRaises(OverflowError): - pipeline.add_fetchmany(sql, num_rows=-1) - with self.assertRaises(OverflowError): - pipeline.add_fetchmany(sql, num_rows=-10) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, []) - - async def test_7614(self): - "7614 - test add_commit with transaction_in_progress" - self.assertFalse(self.conn.transaction_in_progress) - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (5)", - ) - await self.conn.run_pipeline(pipeline) - self.assertTrue(self.conn.transaction_in_progress) - pipeline = oracledb.create_pipeline() - pipeline.add_commit() - await self.conn.run_pipeline(pipeline) - self.assertFalse(self.conn.transaction_in_progress) - - async def test_7615(self): - "7615 - test getting an error in the middle of pipeline" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [5] - ) - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (9, 'too many values')" - ) - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline( - pipeline, continue_on_error=True - ) - expected_value = [(5,)] - self.assertEqual(results[-3].rows, expected_value) - self.assertEqual(results[-2].error.full_code, "ORA-00913") - self.assertEqual(results[-1].rows, expected_value) - - async def test_7617(self): - "7617 - test insert and update the inserted row" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute("insert into TestTempTable (IntCol) values (5)") - pipeline.add_execute( - "update TestTempTable set IntCol=25 where IntCol=5" - ) - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [(25,)]) - - async def test_7618(self): - "7618 - test insert and update inserted rows" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", - [(i,) for i in range(100)], - ) - pipeline.add_execute("update TestTempTable set StringCol1 = 'UPD'") - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol, StringCol1 from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [(i, "UPD") for i in range(100)]) - - async def test_7619(self): - "7619 - test insert many rows twice" - values1 = [(i,) for i in range(100)] - values2 = [(i,) for i in range(200, 205)] - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - sql = "insert into TestTempTable (IntCol) values (:1)" - pipeline.add_executemany(sql, values1) - pipeline.add_executemany(sql, values2) - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, values1 + values2) - - async def test_7620(self): - "7620 - test insert and delete value" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", - [(i,) for i in range(100)], - ) - pipeline.add_execute("delete TestTempTable") - pipeline.add_commit() - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, []) - - async def test_7621(self): - "7621 - test PipelineOp op_type" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:1)", - [(1,), (2,)], - ) - pipeline.add_commit() - sql = "select IntCol from TestTempTable" - pipeline.add_fetchone(sql) - pipeline.add_fetchall(sql) - pipeline.add_fetchmany(sql) - pipeline.add_callproc("proc_Test", ("hi", 5, 2)) - pipeline.add_callfunc("func_Test", oracledb.DB_TYPE_NUMBER, ("Yes", 7)) - results = await self.conn.run_pipeline(pipeline) - expected_values = [ - oracledb.PIPELINE_OP_TYPE_EXECUTE, - oracledb.PIPELINE_OP_TYPE_EXECUTE_MANY, - oracledb.PIPELINE_OP_TYPE_COMMIT, - oracledb.PIPELINE_OP_TYPE_FETCH_ONE, - oracledb.PIPELINE_OP_TYPE_FETCH_ALL, - oracledb.PIPELINE_OP_TYPE_FETCH_MANY, - oracledb.PIPELINE_OP_TYPE_CALL_PROC, - oracledb.PIPELINE_OP_TYPE_CALL_FUNC, - ] - for result, expected_value in zip(results, expected_values): - self.assertEqual(result.operation.op_type, expected_value) - - async def test_7622(self): - "7622 - test Pipeline, PipelineOp and PipelineOpResult repr()" - pipeline = oracledb.create_pipeline() - self.assertEqual( - repr(pipeline), "" - ) - op = pipeline.add_commit() - self.assertEqual( - repr(pipeline), "" - ) - self.assertEqual(repr(op), "") - results = await self.conn.run_pipeline(pipeline) - self.assertEqual( - repr(results[0]), - "", - ) - - async def test_7623(self): - "7623 - test getting an error at the beginning of a pipeline" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table NonExistentTable") - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [5] - ) - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - results = await self.conn.run_pipeline( - pipeline, continue_on_error=True - ) - expected_value = [(5,)] - self.assertEqual(results[0].error.full_code, "ORA-00942") - self.assertEqual(results[-1].rows, expected_value) - - async def test_7624(self): - "7624 - test getting an error at the end of pipeline" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [5] - ) - pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [5] - ) - results = await self.conn.run_pipeline( - pipeline, continue_on_error=True - ) - expected_value = [(5,)] - self.assertEqual(results[-2].rows, expected_value) - self.assertEqual(results[-1].error.full_code, "ORA-00001") - - async def test_7625(self): - "7625 - test pipeline with clobs" - clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, "Temp CLOB") - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestCLOBs") - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", ["CLOB"] - ) - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] - ) - pipeline.add_fetchall( - "select CLOBCol from TestCLOBs order by IntCol", - ) - results = await self.conn.run_pipeline(pipeline) - rows = results[-1].rows - - self.assertEqual( - [await lob.read() for lob, in rows], ["CLOB", "Temp CLOB"] - ) - - async def test_7626(self): - "7626 - test nested cursors" - sql = """ - select 'Level 1 String', - cursor( - select 'Level 2 String', - cursor( - select 'Level3 String' from dual - ) from dual - ) from dual +async def test_7640(async_conn, async_cursor): + "7640 - test DML returning" + out_value = async_cursor.var(str, arraysize=2) + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( """ - pipeline = oracledb.create_pipeline() - pipeline.add_fetchone(sql) - pipeline.add_fetchone("select user from dual") - results = await self.conn.run_pipeline(pipeline) - rows = results[0].rows - - async def transform_row(r): - return tuple([await transform_fn(v) for v in r]) - - async def transform_fn(v): - if isinstance(v, oracledb.AsyncCursor): - return [await transform_row(r) async for r in v] - return v - - rows = [await transform_row(r) async for r in rows[0][1]] - self.assertEqual(rows, [("Level 2 String", [("Level3 String",)])]) - - async def test_7627(self): - "7627 - test executemany with number of iterations" - num_iterations = 4 - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestAllTypes") - pipeline.add_executemany( - "insert into TestAllTypes (NumberValue) values (1)", num_iterations - ) - pipeline.add_commit() - pipeline.add_fetchall( - "select NumberValue from TestAllTypes order by NumberValue", - ) - results = await self.conn.run_pipeline(pipeline) - expected_value = [(1,) for _ in range(num_iterations)] - self.assertEqual(results[-1].rows, expected_value) - - async def test_7628(self): - "7628 - test anonymous PL/SQL" - var = self.cursor.var(int) - pipeline = oracledb.create_pipeline() - sql = "begin :var := :value; end;" - pipeline.add_execute(sql, [var, 5]) - pipeline.add_execute(sql, [var, 10]) - pipeline.add_execute(sql, [var, 15]) - await self.conn.run_pipeline(pipeline) - self.assertEqual(var.getvalue(), 15) - - async def test_7629(self): - "7629 - test executemany() with PL/SQL" - values = [31, 6, 21, 17, 43] - out_bind = self.cursor.var( - oracledb.DB_TYPE_NUMBER, arraysize=len(values) - ) - data = [(i, f"Test {i}", out_bind) for i in values] - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - """ - begin - insert into TestTempTable (IntCol, StringCol1) - values (:int_val, :str_val) - returning IntCol into :out_bind; - end; - """, - data, - ) - pipeline.add_commit() - await self.conn.run_pipeline(pipeline) - self.assertEqual(out_bind.values, values) - - async def test_7630(self): - "7630 - test fetch_lobs with add_fetchone()" - clob_value = "CLOB Data 7630" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestCLOBs") - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", - [clob_value], - ) - with test_env.DefaultsContextManager("fetch_lobs", False): - pipeline.add_fetchone( - "select CLOBCol from TestCLOBs order by IntCol", - ) - pipeline.add_fetchone( - "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False - ) - res = await self.conn.run_pipeline(pipeline) - self.assertEqual([res[-2].rows], [[(clob_value,)]]) - self.assertEqual([res[-1].rows], [[(clob_value,)]]) - - async def test_7631(self): - "7631 - test pipeline with lobs > 32K" - blob_1_data = b"T" * 33000 - blob_2_data = b"B" * 33000 - blob = await self.conn.createlob(oracledb.DB_TYPE_BLOB, blob_1_data) - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestBLOBs") - pipeline.add_execute( - "insert into TestBLOBs (IntCol, BLOBCol) values (1, :1)", [blob] - ) - pipeline.add_execute( - "insert into TestBLOBs (IntCol, BLOBCol) values (2, :1)", - [blob_2_data], - ) - pipeline.add_fetchall( - "select BLOBCol from TestBLOBs order by IntCol", - ) - res = await self.conn.run_pipeline(pipeline) - expected_value = [blob_1_data, blob_2_data] - fetched_value = [await lob.read() for lob, in res[-1].rows] - self.assertEqual(fetched_value, expected_value) - - async def test_7632(self): - "7632 - test ref cursor" - ref_cursor1 = self.conn.cursor() - ref_cursor2 = self.conn.cursor() - sql = """ - begin - open :pcursor for - select IntCol - from TestNumbers - order by IntCol; - end;""" - pipeline = oracledb.create_pipeline() - pipeline.add_execute(sql, [ref_cursor1]) - pipeline.add_execute(sql, [ref_cursor2]) - await self.conn.run_pipeline(pipeline) - self.assertEqual( - await ref_cursor1.fetchall(), await ref_cursor2.fetchall() - ) - - async def test_7633(self): - "7633 - test add_callproc() with ref cursor" - values = [(2, None, None, None), (3, None, None, None)] - ref_cursor = self.conn.cursor() - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable values (:1, :2, :3, :4)", values - ) - pipeline.add_callproc("myrefcursorproc", [ref_cursor]) - await self.conn.run_pipeline(pipeline) - self.assertEqual(await ref_cursor.fetchall(), values) - - async def test_7634(self): - "7634 - test empty pipeline" - pipeline = oracledb.create_pipeline() - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results, []) - - async def test_7635(self): - "7635 - test alter session" - sql = """ - select value FROM nls_session_parameters - WHERE parameter = 'NLS_DATE_FORMAT' + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Value for first row') """ - (default_date_format,) = await self.conn.fetchone(sql) - date = datetime.datetime(2000, 12, 15, 7, 3) - pipeline = oracledb.create_pipeline() - pipeline.add_fetchone("select to_char(:1) from dual", [date]) - pipeline.add_execute( - "alter session set NLS_DATE_FORMAT='YYYY-MM-DD HH24:MI'" - ) - pipeline.add_fetchone("select to_char(:1) from dual", [date]) - pipeline.add_execute( - f"alter session set NLS_DATE_FORMAT='{default_date_format}'" - ) - pipeline.add_fetchone("select to_char(:1) from dual", [date]) - pipeline.add_fetchone(sql) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[2].rows, [("2000-12-15 07:03",)]) - self.assertEqual(results[4].rows, results[0].rows) - self.assertEqual(results[-1].rows, [(default_date_format,)]) - - async def test_7636(self): - "7636 - test connection inputtypehandler" - - def input_type_handler(cursor, value, num_elements): - if isinstance(value, str): - return cursor.var( - oracledb.DB_TYPE_NUMBER, - arraysize=num_elements, - inconverter=lambda x: int(x), - ) - - self.conn.inputtypehandler = input_type_handler + ) + pipeline.add_execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (2, 'Value for second row') + """ + ) + pipeline.add_execute( + """ + update TestTempTable set + StringCol1 = StringCol1 || ' (Modified)' + returning StringCol1 into :1 + """, + [out_value], + ) + pipeline.add_execute("update TestTempTable set StringCol1 = 'Fixed'") + pipeline.add_commit() + pipeline.add_fetchall( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + results = await async_conn.run_pipeline(pipeline) + expected_data = [(1, "Fixed"), (2, "Fixed")] + assert results[-1].rows == expected_data + assert out_value.getvalue() == [ + "Value for first row (Modified)", + "Value for second row (Modified)", + ] + + +async def test_7641(async_conn): + "7641 - test the columns attribute on results" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Value for first row') + """ + ) + pipeline.add_commit() + pipeline.add_fetchone("select IntCol, StringCol1 from TestTempTable") + results = await async_conn.run_pipeline(pipeline) + assert results[0].columns is None + assert results[1].columns is None + assert results[2].columns is None + names = [i.name for i in results[3].columns] + assert names == ["INTCOL", "STRINGCOL1"] + + +async def test_7642(async_conn): + "7642 - test the columns attribute on single operation" + pipeline = oracledb.create_pipeline() + pipeline.add_fetchone("select user from dual") + results = await async_conn.run_pipeline(pipeline) + names = [i.name for i in results[0].columns] + assert names == ["USER"] + + +async def test_7643(async_conn, async_cursor, test_env): + "7643 - test DML returning with error - pipeline error" + out_value = async_cursor.var(oracledb.DB_TYPE_RAW) + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Value for first row') + returning StringCol1 into :1 + """, + [out_value], + ) + pipeline.add_commit() + pipeline.add_fetchone("select user from dual") + with test_env.assert_raises_full_code("ORA-01465"): + await async_conn.run_pipeline(pipeline) + await async_cursor.execute("select user from dual") + (fetched_value,) = await async_cursor.fetchone() + assert fetched_value == test_env.main_user.upper() + + +async def test_7644(async_conn, async_cursor, test_env): + "7644 - test DML returning with error - pipeline continue" + out_value = async_cursor.var(oracledb.DB_TYPE_RAW) + pipeline = oracledb.create_pipeline() + pipeline.add_execute("truncate table TestTempTable") + pipeline.add_execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (1, 'Value for first row') + returning StringCol1 into :1 + """, + [out_value], + ) + pipeline.add_commit() + pipeline.add_fetchone("select user from dual") + results = await async_conn.run_pipeline(pipeline, continue_on_error=True) + assert results[1].error.full_code == "ORA-01465" + user = test_env.main_user.upper() + assert results[3].rows == [(user,)] + await async_cursor.execute("select user from dual") + (fetched_value,) = await async_cursor.fetchone() + assert fetched_value == test_env.main_user.upper() + + +async def test_7645(async_conn, test_env): + "7645 - test fetch_decimals with add_fetchmany()" + value = 7645 + with test_env.defaults_context_manager("fetch_decimals", True): pipeline = oracledb.create_pipeline() pipeline.add_execute("truncate table TestTempTable") pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", ["12"] + "insert into TestTempTable (IntCol) values (:1)", [value] ) - pipeline.add_commit() - pipeline.add_fetchall("select IntCol from TestTempTable") - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, [(12,)]) - - async def test_7637(self): - "7637 - test fetch_decimals with add_fetchone()" - value = 7637 - with test_env.DefaultsContextManager("fetch_decimals", True): - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [value] - ) - pipeline.add_fetchone("select IntCol from TestTempTable") - pipeline.add_fetchone( - "select IntCol from TestTempTable", fetch_decimals=False - ) - res = await self.conn.run_pipeline(pipeline) - self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) - self.assertTrue(isinstance(res[-1].rows[0][0], int)) - - async def test_7638(self): - "7638 - test oracledb.defaults.arraysize" - arraysize = 1 - with test_env.DefaultsContextManager("arraysize", arraysize): - data = [(1,), (2,), (3,), (4,)] - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_executemany( - "insert into TestTempTable (IntCol) values (:value)", - [{"value": i} for i, in data], - ) - pipeline.add_commit() - op = pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - self.assertEqual(op.arraysize, arraysize) - new_arraysize = 4 - oracledb.defaults.arraysize = new_arraysize - op = pipeline.add_fetchall( - "select IntCol from TestTempTable order by IntCol", - ) - self.assertEqual(op.arraysize, new_arraysize) - results = await self.conn.run_pipeline(pipeline) - self.assertEqual(results[-1].rows, data) - self.assertEqual(results[-2].rows, data) - - async def test_7639(self): - "7639 - test autocommit" - conn1 = await test_env.get_connection_async() - conn1.autocommit = True - conn2 = await test_env.get_connection_async() - - pipeline1 = oracledb.create_pipeline() - pipeline1.add_execute("truncate table TestTempTable") - pipeline1.add_execute("insert into TestTempTable (IntCol) values (1)") - - pipeline2 = oracledb.create_pipeline() - pipeline2.add_execute("insert into TestTempTable (IntCol) values (2)") - pipeline2.add_commit() - pipeline2.add_fetchall( - "select IntCol from TestTempTable order by IntCol" + pipeline.add_fetchmany("select IntCol from TestTempTable") + pipeline.add_fetchmany( + "select IntCol from TestTempTable", fetch_decimals=False ) + res = await async_conn.run_pipeline(pipeline) + assert isinstance(res[-2].rows[0][0], decimal.Decimal) + assert isinstance(res[-1].rows[0][0], int) - await conn1.run_pipeline(pipeline1) - results = await conn2.run_pipeline(pipeline2) - self.assertEqual(results[-1].rows, [(1,), (2,)]) - async def test_7640(self): - "7640 - test DML returning" - out_value = self.cursor.var(str, arraysize=2) +async def test_7646(async_conn, test_env): + "7646 - test fetch_decimals with add_fetchall()" + value = 7646 + with test_env.defaults_context_manager("fetch_decimals", True): pipeline = oracledb.create_pipeline() pipeline.add_execute("truncate table TestTempTable") pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Value for first row') - """ - ) - pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (2, 'Value for second row') - """ + "insert into TestTempTable (IntCol) values (:1)", [value] ) - pipeline.add_execute( - """ - update TestTempTable set - StringCol1 = StringCol1 || ' (Modified)' - returning StringCol1 into :1 - """, - [out_value], - ) - pipeline.add_execute("update TestTempTable set StringCol1 = 'Fixed'") - pipeline.add_commit() + pipeline.add_fetchall("select IntCol from TestTempTable") pipeline.add_fetchall( - "select IntCol, StringCol1 from TestTempTable order by IntCol" - ) - results = await self.conn.run_pipeline(pipeline) - expected_data = [(1, "Fixed"), (2, "Fixed")] - self.assertEqual(results[-1].rows, expected_data) - self.assertEqual( - out_value.getvalue(), - [ - "Value for first row (Modified)", - "Value for second row (Modified)", - ], - ) - - async def test_7641(self): - "7641 - test the columns attribute on results" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Value for first row') - """ - ) - pipeline.add_commit() - pipeline.add_fetchone("select IntCol, StringCol1 from TestTempTable") - results = await self.conn.run_pipeline(pipeline) - self.assertIsNone(results[0].columns) - self.assertIsNone(results[1].columns) - self.assertIsNone(results[2].columns) - names = [i.name for i in results[3].columns] - self.assertEqual(names, ["INTCOL", "STRINGCOL1"]) - - async def test_7642(self): - "7642 - test the columns attribute on single operation" - pipeline = oracledb.create_pipeline() - pipeline.add_fetchone("select user from dual") - results = await self.conn.run_pipeline(pipeline) - names = [i.name for i in results[0].columns] - self.assertEqual(names, ["USER"]) - - async def test_7643(self): - "7643 - test DML returning with error - pipeline error" - out_value = self.cursor.var(oracledb.DB_TYPE_RAW) - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Value for first row') - returning StringCol1 into :1 - """, - [out_value], - ) - pipeline.add_commit() - pipeline.add_fetchone("select user from dual") - with self.assertRaisesFullCode("ORA-01465"): - await self.conn.run_pipeline(pipeline) - await self.cursor.execute("select user from dual") - (fetched_value,) = await self.cursor.fetchone() - self.assertEqual(fetched_value, test_env.get_main_user().upper()) - - async def test_7644(self): - "7644 - test DML returning with error - pipeline continue" - out_value = self.cursor.var(oracledb.DB_TYPE_RAW) - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (1, 'Value for first row') - returning StringCol1 into :1 - """, - [out_value], - ) - pipeline.add_commit() - pipeline.add_fetchone("select user from dual") - results = await self.conn.run_pipeline( - pipeline, continue_on_error=True - ) - self.assertEqual(results[1].error.full_code, "ORA-01465") - user = test_env.get_main_user().upper() - self.assertEqual(results[3].rows, [(user,)]) - await self.cursor.execute("select user from dual") - (fetched_value,) = await self.cursor.fetchone() - self.assertEqual(fetched_value, test_env.get_main_user().upper()) - - async def test_7645(self): - "7645 - test fetch_decimals with add_fetchmany()" - value = 7645 - with test_env.DefaultsContextManager("fetch_decimals", True): - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [value] - ) - pipeline.add_fetchmany("select IntCol from TestTempTable") - pipeline.add_fetchmany( - "select IntCol from TestTempTable", fetch_decimals=False - ) - res = await self.conn.run_pipeline(pipeline) - self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) - self.assertTrue(isinstance(res[-1].rows[0][0], int)) - - async def test_7646(self): - "7646 - test fetch_decimals with add_fetchall()" - value = 7646 - with test_env.DefaultsContextManager("fetch_decimals", True): - pipeline = oracledb.create_pipeline() - pipeline.add_execute("truncate table TestTempTable") - pipeline.add_execute( - "insert into TestTempTable (IntCol) values (:1)", [value] - ) - pipeline.add_fetchall("select IntCol from TestTempTable") - pipeline.add_fetchall( - "select IntCol from TestTempTable", fetch_decimals=False - ) - res = await self.conn.run_pipeline(pipeline) - self.assertTrue(isinstance(res[-2].rows[0][0], decimal.Decimal)) - self.assertTrue(isinstance(res[-1].rows[0][0], int)) - - async def test_7647(self): - "7647 - test fetch_lobs with add_fetchmany()" - clob_1_value = "CLOB Data 7647 - One" - clob_2_value = "CLOB Data 7647 - Two" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestCLOBs") - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", - [clob_1_value], - ) - clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] - ) - with test_env.DefaultsContextManager("fetch_lobs", False): - pipeline.add_fetchmany( - "select CLOBCol from TestCLOBs order by IntCol", - ) + "select IntCol from TestTempTable", fetch_decimals=False + ) + res = await async_conn.run_pipeline(pipeline) + assert isinstance(res[-2].rows[0][0], decimal.Decimal) + assert isinstance(res[-1].rows[0][0], int) + + +async def test_7647(async_conn, test_env): + "7647 - test fetch_lobs with add_fetchmany()" + clob_1_value = "CLOB Data 7647 - One" + clob_2_value = "CLOB Data 7647 - Two" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", + [clob_1_value], + ) + clob = await async_conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + ) + with test_env.defaults_context_manager("fetch_lobs", False): pipeline.add_fetchmany( - "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False - ) - - res = await self.conn.run_pipeline(pipeline) - self.assertEqual([res[-1].rows], [[(clob_1_value,), (clob_2_value,)]]) - self.assertEqual([res[-2].rows], [[(clob_1_value,), (clob_2_value,)]]) - - async def test_7648(self): - "7648 - test fetch_lobs with add_fetchall()" - clob_1_value = "CLOB Data 7648 - One" - clob_2_value = "CLOB Data 7648 - Two" - pipeline = oracledb.create_pipeline() - pipeline.add_execute("delete from TestCLOBs") - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", - [clob_1_value], - ) - clob = await self.conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) - pipeline.add_execute( - "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + "select CLOBCol from TestCLOBs order by IntCol", ) - with test_env.DefaultsContextManager("fetch_lobs", False): - pipeline.add_fetchall( - "select CLOBCol from TestCLOBs order by IntCol", - ) + pipeline.add_fetchmany( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) + + res = await async_conn.run_pipeline(pipeline) + assert [res[-1].rows] == [[(clob_1_value,), (clob_2_value,)]] + assert [res[-2].rows] == [[(clob_1_value,), (clob_2_value,)]] + + +async def test_7648(async_conn, test_env): + "7648 - test fetch_lobs with add_fetchall()" + clob_1_value = "CLOB Data 7648 - One" + clob_2_value = "CLOB Data 7648 - Two" + pipeline = oracledb.create_pipeline() + pipeline.add_execute("delete from TestCLOBs") + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (1, :1)", + [clob_1_value], + ) + clob = await async_conn.createlob(oracledb.DB_TYPE_CLOB, clob_2_value) + pipeline.add_execute( + "insert into TestCLOBs (IntCol, CLOBCol) values (2, :1)", [clob] + ) + with test_env.defaults_context_manager("fetch_lobs", False): pipeline.add_fetchall( - "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + "select CLOBCol from TestCLOBs order by IntCol", ) + pipeline.add_fetchall( + "select CLOBCol from TestCLOBs order by IntCol", fetch_lobs=False + ) - res = await self.conn.run_pipeline(pipeline) - self.assertEqual([res[-1].rows], [[(clob_1_value,), (clob_2_value,)]]) - self.assertEqual([res[-2].rows], [[(clob_1_value,), (clob_2_value,)]]) - - -if __name__ == "__main__": - test_env.run_test_cases() + res = await async_conn.run_pipeline(pipeline) + assert [res[-1].rows] == [[(clob_1_value,), (clob_2_value,)]] + assert [res[-2].rows] == [[(clob_1_value,), (clob_2_value,)]] diff --git a/tests/test_7700_sparse_vector.py b/tests/test_7700_sparse_vector.py index 5b706d8c..41481155 100644 --- a/tests/test_7700_sparse_vector.py +++ b/tests/test_7700_sparse_vector.py @@ -29,764 +29,737 @@ import array import json -import oracledb -import test_env - -@test_env.skip_unless_sparse_vectors_supported() -class TestCase(test_env.BaseTestCase): - def __test_insert_and_fetch(self, vector, column_name, expected_typecode): - """ - Test inserting sparse and fetching from a dense vector column. - """ - self.cursor.execute("delete from TestVectors") - self.cursor.execute( - f""" - insert into TestVectors (IntCol, {column_name}) - values(1, :vector) - """, - vector=vector, - ) - self.conn.commit() - self.cursor.execute(f"select {column_name} from TestVectors") - (fetched_value,) = self.cursor.fetchone() - dense_values = [0 for _ in range(vector.num_dimensions)] - for i, index in enumerate(vector.indices): - if expected_typecode == "b": - dense_values[index] = int(vector.values[i]) - else: - dense_values[index] = vector.values[i] - expected_value = array.array(expected_typecode, dense_values) - self.assertEqual(fetched_value, expected_value) - self.assertEqual(fetched_value.typecode, expected_typecode) - - def __test_insert_and_fetch_sparse( - self, vector, column_name, expected_typecode - ): +import oracledb +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_sparse_vectors_supported): + pass + + +def _test_insert_and_fetch(cursor, vector, column_name, expected_typecode): + """ + Test inserting sparse and fetching from a dense vector column. + """ + cursor.execute("delete from TestVectors") + cursor.execute( + f""" + insert into TestVectors (IntCol, {column_name}) + values(1, :vector) + """, + vector=vector, + ) + cursor.connection.commit() + cursor.execute(f"select {column_name} from TestVectors") + (fetched_value,) = cursor.fetchone() + dense_values = [0 for _ in range(vector.num_dimensions)] + for i, index in enumerate(vector.indices): + if expected_typecode == "b": + dense_values[index] = int(vector.values[i]) + else: + dense_values[index] = vector.values[i] + expected_value = array.array(expected_typecode, dense_values) + assert fetched_value == expected_value + assert fetched_value.typecode == expected_typecode + + +def _test_insert_and_fetch_sparse( + cursor, vector, column_name, expected_typecode +): + """ + Test inserting and fetching from a sparse vector column. + """ + cursor.execute("delete from TestSparseVectors") + cursor.execute( + f""" + insert into TestSparseVectors (IntCol, {column_name}) + values(1, :vector) + """, + vector=vector, + ) + cursor.connection.commit() + cursor.execute(f"select {column_name} from TestSparseVectors") + (fetched_value,) = cursor.fetchone() + expected_value = vector.values + if fetched_value.values.typecode == "b": + expected_value = array.array("b", [int(i) for i in vector.values]) + expected_indices = vector.indices + expected_num_dimensions = vector.num_dimensions + assert fetched_value.values == expected_value + assert fetched_value.indices == expected_indices + assert fetched_value.num_dimensions == expected_num_dimensions + + +def _fetch_with_vector( + cursor, + vector, + column_name, + dimensions, + vector_format, + expected_typecode, +): + """ + Test fetching a vector with vector() function. + """ + cursor.execute("delete from TestSparseVectors") + cursor.execute( + f""" + insert into TestSparseVectors (IntCol, {column_name}) + values(1, :vector) + """, + vector=vector, + ) + cursor.execute( + f""" + select + vector({column_name}, {dimensions}, {vector_format}, DENSE) + from TestSparseVectors """ - Test inserting and fetching from a sparse vector column. + ) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, array.array) + assert fetched_value.typecode == expected_typecode + + cursor.execute( + f""" + select + vector({column_name}, {dimensions}, {vector_format}, SPARSE) + from TestSparseVectors """ - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - f""" - insert into TestSparseVectors (IntCol, {column_name}) - values(1, :vector) - """, - vector=vector, - ) - self.conn.commit() - self.cursor.execute(f"select {column_name} from TestSparseVectors") - (fetched_value,) = self.cursor.fetchone() - expected_value = vector.values - if fetched_value.values.typecode == "b": - expected_value = array.array("b", [int(i) for i in vector.values]) - expected_indices = vector.indices - expected_num_dimensions = vector.num_dimensions - self.assertEqual(fetched_value.values, expected_value) - self.assertEqual(fetched_value.indices, expected_indices) - self.assertEqual(fetched_value.num_dimensions, expected_num_dimensions) - - def __fetch_with_vector( - self, - vector, - column_name, - dimensions, - vector_format, - expected_typecode, - ): + ) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, oracledb.SparseVector) + assert fetched_value.values.typecode == expected_typecode + + +def test_7700(cursor): + "7700 - test binding in a sparse vector with oracledb.SparseVector" + vector = oracledb.SparseVector(3, [1], [9]) + cursor.execute("select :1 from dual", [vector]) + (fetched_value,) = cursor.fetchone() + assert isinstance(fetched_value, oracledb.SparseVector) + assert fetched_value.num_dimensions == vector.num_dimensions + assert fetched_value.indices == vector.indices + assert fetched_value.values == vector.values + + +def test_7701(cursor): + "7701 - test binding in a sparse vector of type float32" + vector = oracledb.SparseVector(3, [1], array.array("f", [0.5])) + cursor.execute("select :1 from dual", [vector]) + (fetched_value,) = cursor.fetchone() + assert fetched_value.values == vector.values + assert fetched_value.indices == vector.indices + assert fetched_value.num_dimensions == vector.num_dimensions + assert fetched_value.values.typecode == "f" + + +def test_7702(cursor): + "7702 - test binding in a sparse vector of type float64" + vector = oracledb.SparseVector(3, [1], array.array("d", [0.25])) + cursor.execute("select :1 from dual", [vector]) + (fetched_value,) = cursor.fetchone() + assert fetched_value.values == vector.values + assert fetched_value.indices == vector.indices + assert fetched_value.num_dimensions == vector.num_dimensions + assert fetched_value.values.typecode == "d" + assert isinstance(fetched_value, oracledb.SparseVector) + + +def test_7703(cursor): + "7703 - test binding in a sparse vector of type int8" + vector = oracledb.SparseVector(3, [1], array.array("b", [3])) + cursor.execute("select :1 from dual", [vector]) + (fetched_value,) = cursor.fetchone() + assert fetched_value.values == vector.values + assert fetched_value.indices == vector.indices + assert fetched_value.num_dimensions == vector.num_dimensions + assert fetched_value.values.typecode == "b" + assert isinstance(fetched_value, oracledb.SparseVector) + + +def test_7704(cursor): + "7704 - insert a float32 sparse vector into a float32 column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector32Col", "f") + + +def test_7705(cursor): + "7705 - insert a float32 vector into a float64 column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector64Col", "d") + + +def test_7706(cursor): + "7706 - insert a float32 vector into a flexible format column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "f") + _test_insert_and_fetch_sparse(cursor, value, "SparseVectorFlexAllCol", "f") + + +def test_7707(cursor): + "7707 - insert a float64 vector into a float64 column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector64Col", "d") + + +def test_7708(cursor): + "7708 - insert float64 vector into a float32 column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector32Col", "f") + + +def test_7709(cursor): + "7709 - insert float64 vector into a flexible type column" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "d") + _test_insert_and_fetch_sparse(cursor, value, "SparseVectorFlexAllCol", "d") + + +def test_7710(cursor, test_env): + "7710 - insert a vector with an invalid size" + cursor.execute("delete from TestVectors") + cursor.execute("delete from TestSparseVectors") + statements = [ """ - Test fetching a vector with vector() function. + insert into TestVectors (IntCol, Vector64Col) + values(1, :1) + """, """ - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - f""" - insert into TestSparseVectors (IntCol, {column_name}) - values(1, :vector) - """, - vector=vector, - ) - self.cursor.execute( - f""" - select - vector({column_name}, {dimensions}, {vector_format}, DENSE) - from TestSparseVectors - """ - ) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, array.array) - self.assertEqual(fetched_value.typecode, expected_typecode) - - self.cursor.execute( - f""" - select - vector({column_name}, {dimensions}, {vector_format}, SPARSE) - from TestSparseVectors - """ - ) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, oracledb.SparseVector) - self.assertEqual(fetched_value.values.typecode, expected_typecode) - - def test_7700(self): - "7700 - test binding in a sparse vector with oracledb.SparseVector" - vector = oracledb.SparseVector(3, [1], [9]) - self.cursor.execute("select :1 from dual", [vector]) - (fetched_value,) = self.cursor.fetchone() - self.assertIsInstance(fetched_value, oracledb.SparseVector) - self.assertEqual(fetched_value.num_dimensions, vector.num_dimensions) - self.assertEqual(fetched_value.indices, vector.indices) - self.assertEqual(fetched_value.values, vector.values) - - def test_7701(self): - "7701 - test binding in a sparse vector of type float32" - vector = oracledb.SparseVector(3, [1], array.array("f", [0.5])) - self.cursor.execute("select :1 from dual", [vector]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value.values, vector.values) - self.assertEqual(fetched_value.indices, vector.indices) - self.assertEqual(fetched_value.num_dimensions, vector.num_dimensions) - self.assertEqual(fetched_value.values.typecode, "f") - - def test_7702(self): - "7702 - test binding in a sparse vector of type float64" - vector = oracledb.SparseVector(3, [1], array.array("d", [0.25])) - self.cursor.execute("select :1 from dual", [vector]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value.values, vector.values) - self.assertEqual(fetched_value.indices, vector.indices) - self.assertEqual(fetched_value.num_dimensions, vector.num_dimensions) - self.assertEqual(fetched_value.values.typecode, "d") - self.assertIsInstance(fetched_value, oracledb.SparseVector) - - def test_7703(self): - "7703 - test binding in a sparse vector of type int8" - vector = oracledb.SparseVector(3, [1], array.array("b", [3])) - self.cursor.execute("select :1 from dual", [vector]) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(fetched_value.values, vector.values) - self.assertEqual(fetched_value.indices, vector.indices) - self.assertEqual(fetched_value.num_dimensions, vector.num_dimensions) - self.assertEqual(fetched_value.values.typecode, "b") - self.assertIsInstance(fetched_value, oracledb.SparseVector) - - def test_7704(self): - "7704 - insert a float32 sparse vector into a float32 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - self.__test_insert_and_fetch_sparse(value, "SparseVector32Col", "f") + insert into TestSparseVectors (IntCol, SparseVector64Col) + values(2, :1) + """, + ] + for statement in statements: + for num_elems in [4, 20]: + vector = oracledb.SparseVector( + num_elems, [2, 3], array.array("f", [6.54, 9.6]) + ) + with test_env.assert_raises_full_code("ORA-51803"): + cursor.execute(statement, [vector]) + + +def test_7711(cursor): + "7711 - verify fetch info for vectors" + attr_names = [ + "name", + "type_code", + "vector_dimensions", + "vector_format", + "vector_is_sparse", + ] + expected_values = [ + ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None, None], + [ + "SPARSEVECTORFLEXALLCOL", + oracledb.DB_TYPE_VECTOR, + None, + None, + True, + ], + [ + "SPARSEVECTORFLEXTYPECOL", + oracledb.DB_TYPE_VECTOR, + 2, + None, + True, + ], + [ + "SPARSEVECTORFLEX8COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_INT8, + True, + ], + [ + "SPARSEVECTORFLEX32COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_FLOAT32, + True, + ], + [ + "SPARSEVECTORFLEX64COL", + oracledb.DB_TYPE_VECTOR, + None, + oracledb.VECTOR_FORMAT_FLOAT64, + True, + ], + [ + "SPARSEVECTOR8COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_INT8, + True, + ], + [ + "SPARSEVECTOR32COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_FLOAT32, + True, + ], + [ + "SPARSEVECTOR64COL", + oracledb.DB_TYPE_VECTOR, + 16, + oracledb.VECTOR_FORMAT_FLOAT64, + True, + ], + ] + cursor.execute("select * from TestSparseVectors") + values = [[getattr(i, n) for n in attr_names] for i in cursor.description] + assert values == expected_values + assert cursor.description[6].vector_format is oracledb.VectorFormat.INT8 - def test_7705(self): - "7705 - insert a float32 vector into a float64 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - self.__test_insert_and_fetch_sparse(value, "SparseVector64Col", "d") - def test_7706(self): - "7706 - insert a float32 vector into a flexible format column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", "f" - ) +def test_7712(cursor): + "7712 - insert an int8 vector into an int8 column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector8Col", "b") - def test_7707(self): - "7707 - insert a float64 vector into a float64 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - self.__test_insert_and_fetch_sparse(value, "SparseVector64Col", "d") - def test_7708(self): - "7708 - insert float64 vector into a float32 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - self.__test_insert_and_fetch_sparse(value, "SparseVector32Col", "f") +def test_7713(cursor): + "7713 - insert an int8 vector into a float32 column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "Vector32Col", "f") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector32Col", "f") - def test_7709(self): - "7709 - insert float64 vector into a flexible type column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", "d" - ) - def test_7710(self): - "7710 - insert a vector with an invalid size" - self.cursor.execute("delete from TestVectors") - self.cursor.execute("delete from TestSparseVectors") - statements = [ - """ - insert into TestVectors (IntCol, Vector64Col) - values(1, :1) - """, - """ - insert into TestSparseVectors (IntCol, SparseVector64Col) - values(2, :1) - """, - ] - for statement in statements: - for num_elems in [4, 20]: - vector = oracledb.SparseVector( - num_elems, [2, 3], array.array("f", [6.54, 9.6]) - ) - with self.assertRaisesFullCode("ORA-51803"): - self.cursor.execute(statement, [vector]) - - def test_7711(self): - "7711 - verify fetch info for vectors" - attr_names = [ - "name", - "type_code", - "vector_dimensions", - "vector_format", - "vector_is_sparse", - ] - expected_values = [ - ["INTCOL", oracledb.DB_TYPE_NUMBER, None, None, None], - [ - "SPARSEVECTORFLEXALLCOL", - oracledb.DB_TYPE_VECTOR, - None, - None, - True, - ], - [ - "SPARSEVECTORFLEXTYPECOL", - oracledb.DB_TYPE_VECTOR, - 2, - None, - True, - ], - [ - "SPARSEVECTORFLEX8COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_INT8, - True, - ], - [ - "SPARSEVECTORFLEX32COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_FLOAT32, - True, - ], - [ - "SPARSEVECTORFLEX64COL", - oracledb.DB_TYPE_VECTOR, - None, - oracledb.VECTOR_FORMAT_FLOAT64, - True, - ], - [ - "SPARSEVECTOR8COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_INT8, - True, - ], - [ - "SPARSEVECTOR32COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_FLOAT32, - True, - ], - [ - "SPARSEVECTOR64COL", - oracledb.DB_TYPE_VECTOR, - 16, - oracledb.VECTOR_FORMAT_FLOAT64, - True, - ], - ] - self.cursor.execute("select * from TestSparseVectors") - values = [ - [getattr(i, n) for n in attr_names] - for i in self.cursor.description - ] - self.assertEqual(values, expected_values) - self.assertIs( - self.cursor.description[6].vector_format, - oracledb.VectorFormat.INT8, - ) +def test_7714(cursor): + "7714 - insert an int8 vector into a float64 column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("b", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "Vector64Col", "d") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector64Col", "d") - def test_7712(self): - "7712 - insert an int8 vector into an int8 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - self.__test_insert_and_fetch_sparse(value, "SparseVector8Col", "b") - def test_7713(self): - "7713 - insert an int8 vector into a float32 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "Vector32Col", "f") - self.__test_insert_and_fetch_sparse(value, "SparseVector32Col", "f") +def test_7715(cursor): + "7715 - insert an int8 vector into a flexible column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("b", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "b") + _test_insert_and_fetch_sparse(cursor, value, "SparseVectorFlexAllCol", "b") - def test_7714(self): - "7714 - insert an int8 vector into a float64 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("b", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "Vector64Col", "d") - self.__test_insert_and_fetch_sparse(value, "SparseVector64Col", "d") - def test_7715(self): - "7715 - insert an int8 vector into a flexible column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("b", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "b") - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", "b" - ) +def test_7716(cursor): + "7716 - insert a float32 vector into an int8 column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector8Col", "b") - def test_7716(self): - "7716 - insert a float32 vector into an int8 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - self.__test_insert_and_fetch_sparse(value, "SparseVector8Col", "b") - def test_7717(self): - "7717 - insert a float64 vector into an int8 column" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "Vector8Col", "b") - self.__test_insert_and_fetch_sparse(value, "SparseVector8Col", "b") +def test_7717(cursor): + "7717 - insert a float64 vector into an int8 column" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("d", [1, 0, 5])) + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + _test_insert_and_fetch_sparse(cursor, value, "SparseVector8Col", "b") - def test_7718(self): - "7718 - test dml returning vector type" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - out_var = self.cursor.var(oracledb.DB_TYPE_VECTOR) - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - """ - insert into TestSparseVectors (IntCol, SparseVectorFlex32Col) - values (1, :value) - returning SparseVectorFlex32Col into :out_value - """, - [value, out_var], - ) - self.conn.commit() - vector = out_var.getvalue()[0] - self.assertEqual(vector.values, value.values) - self.assertEqual(vector.indices, value.indices) - self.assertEqual(vector.num_dimensions, value.num_dimensions) - - def test_7719(self): - "7719 - test handling of NULL vector value" - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - "insert into TestSparseVectors (IntCol) values (1)" - ) - self.conn.commit() - self.cursor.execute( - "select SparseVectorFlexTypeCol from TestSparseVectors" - ) - (fetched_value,) = self.cursor.fetchone() - self.assertIsNone(fetched_value) - - def test_7720(self): - "7720 - insert a float32 vector into an int8 column (negative)" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [-130, 400, 5]) - ) - with self.assertRaisesFullCode("ORA-51806"): - self.__test_insert_and_fetch(value, "Vector8Col", "b") - with self.assertRaisesFullCode("ORA-51806"): - self.__test_insert_and_fetch_sparse(value, "SparseVector8Col", "b") - - def test_7721(self): - "7721 - insert a float32 vector with 65,533 dimensions" - value = oracledb.SparseVector( - 65533, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "f") - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", "f" - ) - def test_7722(self): - "7722 - insert vectors with different dimensions" - for dim in [30, 70, 255, 256, 65534, 65535]: - for typ in ["f", "d", "b"]: - with self.subTest(dim=dim, typ=typ): - element_value = 3 if typ == "b" else 1.5 - value = oracledb.SparseVector( - dim, [1, 3, 5], array.array(typ, [element_value] * 3) - ) - self.__test_insert_and_fetch( - value, "VectorFlexAllCol", typ - ) - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", typ - ) - - def test_7723(self): - "7723 - insert and fetch VECTOR data using strings" - values = [16, [1, 3, 5], [1, 0, 5]] - vector = oracledb.SparseVector(*values) - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - """ - insert into TestSparseVectors (IntCol, SparseVectorFlexAllCol) - values(1, :value) - """, - value=str(vector), - ) - - def type_handler(cursor, metadata): - if metadata.name == "SPARSEVECTORFLEXALLCOL": - return cursor.var( - oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize - ) - - self.cursor.outputtypehandler = type_handler +def test_7718(conn, cursor): + "7718 - test dml returning vector type" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + out_var = cursor.var(oracledb.DB_TYPE_VECTOR) + cursor.execute("delete from TestSparseVectors") + cursor.execute( + """ + insert into TestSparseVectors (IntCol, SparseVectorFlex32Col) + values (1, :value) + returning SparseVectorFlex32Col into :out_value + """, + [value, out_var], + ) + conn.commit() + vector = out_var.getvalue()[0] + assert vector.values == value.values + assert vector.indices == value.indices + assert vector.num_dimensions == value.num_dimensions + + +def test_7719(conn, cursor): + "7719 - test handling of NULL vector value" + cursor.execute("delete from TestSparseVectors") + cursor.execute("insert into TestSparseVectors (IntCol) values (1)") + conn.commit() + cursor.execute("select SparseVectorFlexTypeCol from TestSparseVectors") + (fetched_value,) = cursor.fetchone() + assert fetched_value is None + + +def test_7720(cursor, test_env): + "7720 - insert a float32 vector into an int8 column (negative)" + value = oracledb.SparseVector( + 16, [1, 3, 5], array.array("f", [-130, 400, 5]) + ) + with test_env.assert_raises_full_code("ORA-51806"): + _test_insert_and_fetch(cursor, value, "Vector8Col", "b") + with test_env.assert_raises_full_code("ORA-51806"): + _test_insert_and_fetch_sparse(cursor, value, "SparseVector8Col", "b") + + +def test_7721(cursor): + "7721 - insert a float32 vector with 65,533 dimensions" + value = oracledb.SparseVector( + 65533, [1, 3, 5], array.array("f", [1, 0, 5]) + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "f") + _test_insert_and_fetch_sparse(cursor, value, "SparseVectorFlexAllCol", "f") + + +def test_7722(cursor): + "7722 - insert vectors with different dimensions" + for dim in [30, 70, 255, 256, 65534, 65535]: + for typ in ["f", "d", "b"]: + element_value = 3 if typ == "b" else 1.5 + value = oracledb.SparseVector( + dim, [1, 3, 5], array.array(typ, [element_value] * 3) + ) + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", typ) + _test_insert_and_fetch_sparse( + cursor, value, "SparseVectorFlexAllCol", typ + ) - self.cursor.execute( - "select SparseVectorFlexAllCol from TestSparseVectors" - ) - (fetched_value,) = self.cursor.fetchone() - self.assertEqual(json.loads(fetched_value), values) - - def test_7724(self): - "7724 - insert vectors with flexible dimensions and conversion" - for dim in [30, 255, 256, 257, 32768, 65535]: - for source_type in ["f", "d", "b"]: - for target_type in ["f", "d", "b"]: - with self.subTest( - dim=dim, - source_type=source_type, - target_type=target_type, - ): - if target_type == "f": - target_col = "VectorFlex32Col" - elif target_type == "d": - target_col = "VectorFlex64Col" - else: - target_col = "VectorFlex8Col" - element_value = 4 if source_type == "b" else 2.25 - value = oracledb.SparseVector( - dim, - [1, 3, 7, 9], - array.array(source_type, [element_value] * 4), - ) - self.__test_insert_and_fetch( - value, target_col, target_type - ) - self.__test_insert_and_fetch_sparse( - value, f"Sparse{target_col}", target_type - ) - - def test_7725(self): - "7725 - test binding a vector with inf values (negative)" - value = oracledb.SparseVector( - 16, - [1, 3, 5], - array.array("d", [float("inf"), float("-inf"), float("-inf")]), - ) - with self.assertRaisesFullCode("ORA-51805", "ORA-51831"): - self.cursor.execute("select :1 from dual", [value]) - def test_7726(self): - "7726 - test setting a sparse vector to a vector variable" - value = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - var = self.cursor.var(oracledb.DB_TYPE_VECTOR) - var.setvalue(0, value) - vector = var.getvalue() - self.assertEqual(vector.values, value.values) - self.assertEqual(vector.indices, value.indices) - self.assertEqual(vector.num_dimensions, value.num_dimensions) - - def test_7727(self): - "7727 - fetch JSON value with an embedded vector" - self.cursor.execute("delete from TestSparseVectors") - vector = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) - ) - self.cursor.execute( - """ - insert into TestSparseVectors (IntCol, SparseVector64Col) - values (1, :1) - """, - [vector], - ) - self.cursor.execute( - """ - select json_object( - 'id': 7732, - 'vector' : vector(SparseVector64Col, 16, float64, sparse) - returning json - ) from TestSparseVectors - """ - ) - (result,) = self.cursor.fetchone() - fetched_vector = result["vector"] - self.assertIsInstance(fetched_vector, oracledb.SparseVector) - self.assertEqual(fetched_vector.indices, vector.indices) - self.assertEqual(fetched_vector.values, vector.values) - self.assertEqual(fetched_vector.num_dimensions, vector.num_dimensions) - - def test_7728(self): - "7728 - executemany() without setinputsizes()" - self.cursor.execute("delete from TestSparseVectors") - vector = oracledb.SparseVector( - 16, [1, 3, 5], array.array("f", [1, 0, 5]) - ) - values = [vector, [0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0]] - self.cursor.executemany( - """ - insert into TestSparseVectors (IntCol, SparseVector32Col) - values (:1, :2) - """, - list(enumerate(values)), - ) - self.cursor.execute( - "select SparseVector32Col from TestSparseVectors order by IntCol" - ) - (fetched_vector1,), (fetched_vector2,) = self.cursor.fetchall() - self.assertEqual(fetched_vector1.values, vector.values) - self.assertEqual(fetched_vector1.indices, vector.indices) - self.assertEqual(fetched_vector1.num_dimensions, vector.num_dimensions) - self.assertEqual( - fetched_vector2.values, array.array("f", [2.0, 1.0, 4.0]) - ) - self.assertEqual(fetched_vector2.indices, array.array("I", [3, 9, 11])) - self.assertEqual(fetched_vector2.num_dimensions, 16) - - def test_7729(self): - "7729 - executemany() with setinputsizes()" - self.cursor.execute("delete from TestSparseVectors") - vector = oracledb.SparseVector( - 16, [1, 3, 5], array.array("d", [1, 0, 5]) - ) - values = [[144, 0, 1000], vector] - self.cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR) - self.cursor.executemany( - """ - insert into TestSparseVectors (IntCol, SparseVectorFlex64Col) - values (:1, :2) - """, - list(enumerate(values)), - ) - self.cursor.execute( - """ - select SparseVectorFlex64Col - from TestSparseVectors order by IntCol - """ - ) - (fetched_vector1,), (fetched_vector2,) = self.cursor.fetchall() - self.assertEqual( - fetched_vector1.values, array.array("d", [144.0, 1000.0]) - ) - self.assertEqual(fetched_vector1.indices, array.array("I", [0, 2])) - self.assertEqual(fetched_vector1.num_dimensions, 3) - self.assertEqual(fetched_vector2.values, vector.values) - self.assertEqual(fetched_vector2.indices, vector.indices) - self.assertEqual(fetched_vector2.num_dimensions, vector.num_dimensions) - - def test_7730(self): - "7730 - vector with zero dimensions" - self.cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) - vector = oracledb.SparseVector(4, [], []) - with self.assertRaisesFullCode("ORA-51803", "ORA-21560"): - self.cursor.execute("select :1", [vector]) - - def test_7731(self): - "7731 - test inserting a vector as a string and fetching it" - self.cursor.execute("delete from TestSparseVectors") - self.cursor.execute( - """ - insert into TestSparseVectors (IntCol, SparseVectorFlexAllCol) - values (1, '[4, [1, 3], [1.0, 2.0]]') - """ - ) - self.cursor.execute( - "select SparseVectorFlexAllCol from TestSparseVectors" - ) - vector = self.cursor.fetchone()[0] - self.assertEqual(vector.values, array.array("f", [1, 2])) - self.assertEqual(vector.num_dimensions, 4) - self.assertEqual(vector.indices, array.array("I", [1, 3])) - - def test_7732(self): - "7732 - SparseVector() with invalid values" - # pass strings instead of number or list/array.array - with self.assertRaises(TypeError): - oracledb.SparseVector("10", [1, 2], [1.5, 3.5]) - with self.assertRaises(TypeError): - oracledb.SparseVector(10, "[1, 2]", [1.5, 3.5]) - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [1, 2], "[1.5, 3.5]") - - # insert matrix - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [[1, 2]], [1.5, 3.5]) - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [1, 2], [[1.5, 3.5]]) - # use num_dimensions as a list - with self.assertRaises(TypeError): - oracledb.SparseVector([10], [1, 2], [1.5, 3.5]) - # use num_dimensions as a float - value = oracledb.SparseVector(10.4, [1, 2], [1.5, 3.5]) - self.assertEqual(value.num_dimensions, 10) - - # negative index - with self.assertRaises(OverflowError): - oracledb.SparseVector(10, [-1], [1.5]) - # negative num_dimensions - with self.assertRaises(OverflowError): - oracledb.SparseVector(-10, [1], [3.5]) - # use float index - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [2.4], [3.5]) - - def test_7733(self): - "7733 - SparseVector() with indices and values of different length" - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [1], [1.5, 3.5]) - with self.assertRaises(TypeError): - oracledb.SparseVector(10, [1, 2, 3, 4], [6.75]) - - def test_7734(self): - "7734 - declare and insert an empty SparseVector" - value = oracledb.SparseVector(0, [], []) - self.assertEqual(value.values, array.array("d")) - self.assertEqual(value.indices, array.array("I")) - self.assertEqual(value.num_dimensions, 0) - with self.assertRaisesFullCode("ORA-51803", "ORA-21560", "ORA-51862"): - self.__test_insert_and_fetch(value, "VectorFlexAllCol", "d") - with self.assertRaisesFullCode("ORA-51803", "ORA-21560", "ORA-51862"): - self.__test_insert_and_fetch_sparse( - value, "SparseVectorFlexAllCol", "d" +def test_7723(cursor): + "7723 - insert and fetch VECTOR data using strings" + values = [16, [1, 3, 5], [1, 0, 5]] + vector = oracledb.SparseVector(*values) + cursor.execute("delete from TestSparseVectors") + cursor.execute( + """ + insert into TestSparseVectors (IntCol, SparseVectorFlexAllCol) + values(1, :value) + """, + value=str(vector), + ) + + def type_handler(cursor, metadata): + if metadata.name == "SPARSEVECTORFLEXALLCOL": + return cursor.var( + oracledb.DB_TYPE_LONG, arraysize=cursor.arraysize ) - def test_7735(self): - "7735 - select with vector()" - dense_vector = array.array( - "f", [1, 2, 3, 4, 0, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0] - ) - sparse_vector = oracledb.SparseVector(16, [1], array.array("f", [9])) - column_names = [ - "SparseVector8Col", - "SparseVector32Col", - "SparseVector64Col", - "SparseVectorFlex8Col", - "SparseVectorFlex32Col", - "SparseVectorFlex64Col", - "SparseVectorFlexAllCol", - ] - for vector in [dense_vector, sparse_vector]: - for column_name in column_names: - with self.subTest(vector=vector, column_name=column_name): - self.__fetch_with_vector( - vector, column_name, 16, "INT8", "b" - ) - self.__fetch_with_vector( - vector, column_name, 16, "FLOAT32", "f" - ) - self.__fetch_with_vector( - vector, column_name, 16, "FLOAT64", "d" - ) - - # fixed dimension columns - dense_vector = array.array("f", [1, 2]) - sparse_vector = oracledb.SparseVector(2, [1], array.array("f", [1])) - for vector in [dense_vector, sparse_vector]: - for column_name in column_names[3:]: - with self.subTest(vector=vector, column_name=column_name): - self.__fetch_with_vector( - vector, column_name, 2, "INT8", "b" - ) - self.__fetch_with_vector( - vector, column_name, 2, "FLOAT32", "f" - ) - self.__fetch_with_vector( - vector, column_name, 2, "FLOAT64", "d" - ) - - def test_7736(self): - "7736 - test from_vector() with returning and vector storage format" - self.cursor.execute("delete from TestSparseVectors") - values = [16, [1, 2, 15], [2, 45.5, 73.25]] - vector = oracledb.SparseVector(*values) - dense_vector = [0, 2, 45.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73.25] - column_name = "SparseVector64Col" - self.cursor.execute( - f""" - insert into TestSparseVectors (IntCol, {column_name}) - values(1, :vector) - """, - vector=vector, - ) - self.cursor.execute( - f""" - select from_vector({column_name} returning clob format sparse) - from TestSparseVectors - """ - ) - (lob,) = self.cursor.fetchone() - self.assertEqual(json.loads(lob.read()), values) - self.cursor.execute( - f""" - select from_vector({column_name} returning clob format dense) - from TestSparseVectors - """ - ) - (lob,) = self.cursor.fetchone() - self.assertEqual(json.loads(lob.read()), dense_vector) - self.cursor.execute( - f""" - select from_vector({column_name} returning clob) - from TestSparseVectors - """ - ) - (lob,) = self.cursor.fetchone() - self.assertEqual(json.loads(lob.read()), values) - self.cursor.execute( - f""" - select from_vector({column_name} returning varchar2 format sparse) - from TestSparseVectors - """ - ) - self.assertEqual(json.loads(self.cursor.fetchone()[0]), values) - self.cursor.execute( - f""" - select from_vector({column_name} returning varchar2 format dense) - from TestSparseVectors - """ - ) - self.assertEqual(json.loads(self.cursor.fetchone()[0]), dense_vector) - self.cursor.execute( - f""" - select from_vector({column_name} returning varchar2) - from TestSparseVectors - """ - ) - self.assertEqual(json.loads(self.cursor.fetchone()[0]), values) + cursor.outputtypehandler = type_handler + + cursor.execute("select SparseVectorFlexAllCol from TestSparseVectors") + (fetched_value,) = cursor.fetchone() + assert json.loads(fetched_value) == values + + +def test_7724(cursor): + "7724 - insert vectors with flexible dimensions and conversion" + for dim in [30, 255, 256, 257, 32768, 65535]: + for source_type in ["f", "d", "b"]: + for target_type in ["f", "d", "b"]: + if target_type == "f": + target_col = "VectorFlex32Col" + elif target_type == "d": + target_col = "VectorFlex64Col" + else: + target_col = "VectorFlex8Col" + element_value = 4 if source_type == "b" else 2.25 + value = oracledb.SparseVector( + dim, + [1, 3, 7, 9], + array.array(source_type, [element_value] * 4), + ) + _test_insert_and_fetch(cursor, value, target_col, target_type) + _test_insert_and_fetch_sparse( + cursor, value, f"Sparse{target_col}", target_type + ) -if __name__ == "__main__": - test_env.run_test_cases() +def test_7725(cursor, test_env): + "7725 - test binding a vector with inf values (negative)" + value = oracledb.SparseVector( + 16, + [1, 3, 5], + array.array("d", [float("inf"), float("-inf"), float("-inf")]), + ) + with test_env.assert_raises_full_code("ORA-51805", "ORA-51831"): + cursor.execute("select :1 from dual", [value]) + + +def test_7726(cursor): + "7726 - test setting a sparse vector to a vector variable" + value = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + var = cursor.var(oracledb.DB_TYPE_VECTOR) + var.setvalue(0, value) + vector = var.getvalue() + assert vector.values == value.values + assert vector.indices == value.indices + assert vector.num_dimensions == value.num_dimensions + + +def test_7727(cursor): + "7727 - fetch JSON value with an embedded vector" + cursor.execute("delete from TestSparseVectors") + vector = oracledb.SparseVector( + 16, [1, 3, 5], array.array("d", [1.5, 0.25, 0.5]) + ) + cursor.execute( + """ + insert into TestSparseVectors (IntCol, SparseVector64Col) + values (1, :1) + """, + [vector], + ) + cursor.execute( + """ + select json_object( + 'id': 7732, + 'vector' : vector(SparseVector64Col, 16, float64, sparse) + returning json + ) from TestSparseVectors + """ + ) + (result,) = cursor.fetchone() + fetched_vector = result["vector"] + assert isinstance(fetched_vector, oracledb.SparseVector) + assert fetched_vector.indices == vector.indices + assert fetched_vector.values == vector.values + assert fetched_vector.num_dimensions == vector.num_dimensions + + +def test_7728(cursor): + "7728 - executemany() without setinputsizes()" + cursor.execute("delete from TestSparseVectors") + vector = oracledb.SparseVector(16, [1, 3, 5], array.array("f", [1, 0, 5])) + values = [vector, [0, 0, 0, 2, 0, 0, 0, 0, 0, 1, 0, 4, 0, 0, 0, 0]] + cursor.executemany( + """ + insert into TestSparseVectors (IntCol, SparseVector32Col) + values (:1, :2) + """, + list(enumerate(values)), + ) + cursor.execute( + "select SparseVector32Col from TestSparseVectors order by IntCol" + ) + (fetched_vector1,), (fetched_vector2,) = cursor.fetchall() + assert fetched_vector1.values == vector.values + assert fetched_vector1.indices == vector.indices + assert fetched_vector1.num_dimensions == vector.num_dimensions + assert fetched_vector2.values == array.array("f", [2.0, 1.0, 4.0]) + assert fetched_vector2.indices == array.array("I", [3, 9, 11]) + assert fetched_vector2.num_dimensions == 16 + + +def test_7729(cursor): + "7729 - executemany() with setinputsizes()" + cursor.execute("delete from TestSparseVectors") + vector = oracledb.SparseVector(16, [1, 3, 5], array.array("d", [1, 0, 5])) + values = [[144, 0, 1000], vector] + cursor.setinputsizes(None, oracledb.DB_TYPE_VECTOR) + cursor.executemany( + """ + insert into TestSparseVectors (IntCol, SparseVectorFlex64Col) + values (:1, :2) + """, + list(enumerate(values)), + ) + cursor.execute( + """ + select SparseVectorFlex64Col + from TestSparseVectors order by IntCol + """ + ) + (fetched_vector1,), (fetched_vector2,) = cursor.fetchall() + assert fetched_vector1.values == array.array("d", [144.0, 1000.0]) + assert fetched_vector1.indices == array.array("I", [0, 2]) + assert fetched_vector1.num_dimensions == 3 + assert fetched_vector2.values == vector.values + assert fetched_vector2.indices == vector.indices + assert fetched_vector2.num_dimensions == vector.num_dimensions + + +def test_7730(cursor, test_env): + "7730 - vector with zero dimensions" + cursor.setinputsizes(oracledb.DB_TYPE_VECTOR) + vector = oracledb.SparseVector(4, [], []) + with test_env.assert_raises_full_code("ORA-51803", "ORA-21560"): + cursor.execute("select :1", [vector]) + + +def test_7731(cursor): + "7731 - test inserting a vector as a string and fetching it" + cursor.execute("delete from TestSparseVectors") + cursor.execute( + """ + insert into TestSparseVectors (IntCol, SparseVectorFlexAllCol) + values (1, '[4, [1, 3], [1.0, 2.0]]') + """ + ) + cursor.execute("select SparseVectorFlexAllCol from TestSparseVectors") + vector = cursor.fetchone()[0] + assert vector.values == array.array("f", [1, 2]) + assert vector.num_dimensions == 4 + assert vector.indices == array.array("I", [1, 3]) + + +def test_7732(): + "7732 - SparseVector() with invalid values" + # pass strings instead of number or list/array.array + with pytest.raises(TypeError): + oracledb.SparseVector("10", [1, 2], [1.5, 3.5]) + with pytest.raises(TypeError): + oracledb.SparseVector(10, "[1, 2]", [1.5, 3.5]) + with pytest.raises(TypeError): + oracledb.SparseVector(10, [1, 2], "[1.5, 3.5]") + + # insert matrix + with pytest.raises(TypeError): + oracledb.SparseVector(10, [[1, 2]], [1.5, 3.5]) + with pytest.raises(TypeError): + oracledb.SparseVector(10, [1, 2], [[1.5, 3.5]]) + # use num_dimensions as a list + with pytest.raises(TypeError): + oracledb.SparseVector([10], [1, 2], [1.5, 3.5]) + # use num_dimensions as a float + value = oracledb.SparseVector(10.4, [1, 2], [1.5, 3.5]) + assert value.num_dimensions == 10 + + # negative index + with pytest.raises(OverflowError): + oracledb.SparseVector(10, [-1], [1.5]) + # negative num_dimensions + with pytest.raises(OverflowError): + oracledb.SparseVector(-10, [1], [3.5]) + # use float index + with pytest.raises(TypeError): + oracledb.SparseVector(10, [2.4], [3.5]) + + +def test_7733(): + "7733 - SparseVector() with indices and values of different length" + with pytest.raises(TypeError): + oracledb.SparseVector(10, [1], [1.5, 3.5]) + with pytest.raises(TypeError): + oracledb.SparseVector(10, [1, 2, 3, 4], [6.75]) + + +def test_7734(cursor, test_env): + "7734 - declare and insert an empty SparseVector" + value = oracledb.SparseVector(0, [], []) + assert value.values == array.array("d") + assert value.indices == array.array("I") + assert value.num_dimensions == 0 + with test_env.assert_raises_full_code( + "ORA-51803", "ORA-21560", "ORA-51862" + ): + _test_insert_and_fetch(cursor, value, "VectorFlexAllCol", "d") + with test_env.assert_raises_full_code( + "ORA-51803", "ORA-21560", "ORA-51862" + ): + _test_insert_and_fetch_sparse( + cursor, value, "SparseVectorFlexAllCol", "d" + ) + + +def test_7735(cursor): + "7735 - select with vector()" + dense_vector = array.array( + "f", [1, 2, 3, 4, 0, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0, 0] + ) + sparse_vector = oracledb.SparseVector(16, [1], array.array("f", [9])) + column_names = [ + "SparseVector8Col", + "SparseVector32Col", + "SparseVector64Col", + "SparseVectorFlex8Col", + "SparseVectorFlex32Col", + "SparseVectorFlex64Col", + "SparseVectorFlexAllCol", + ] + for vector in [dense_vector, sparse_vector]: + for column_name in column_names: + _fetch_with_vector(cursor, vector, column_name, 16, "INT8", "b") + _fetch_with_vector(cursor, vector, column_name, 16, "FLOAT32", "f") + _fetch_with_vector(cursor, vector, column_name, 16, "FLOAT64", "d") + + # fixed dimension columns + dense_vector = array.array("f", [1, 2]) + sparse_vector = oracledb.SparseVector(2, [1], array.array("f", [1])) + for vector in [dense_vector, sparse_vector]: + for column_name in column_names[3:]: + _fetch_with_vector(cursor, vector, column_name, 2, "INT8", "b") + _fetch_with_vector(cursor, vector, column_name, 2, "FLOAT32", "f") + _fetch_with_vector(cursor, vector, column_name, 2, "FLOAT64", "d") + + +def test_7736(cursor): + "7736 - test from_vector() with returning and vector storage format" + cursor.execute("delete from TestSparseVectors") + values = [16, [1, 2, 15], [2, 45.5, 73.25]] + vector = oracledb.SparseVector(*values) + dense_vector = [0, 2, 45.5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 73.25] + column_name = "SparseVector64Col" + cursor.execute( + f""" + insert into TestSparseVectors (IntCol, {column_name}) + values(1, :vector) + """, + vector=vector, + ) + cursor.execute( + f""" + select from_vector({column_name} returning clob format sparse) + from TestSparseVectors + """ + ) + (lob,) = cursor.fetchone() + assert json.loads(lob.read()) == values + cursor.execute( + f""" + select from_vector({column_name} returning clob format dense) + from TestSparseVectors + """ + ) + (lob,) = cursor.fetchone() + assert json.loads(lob.read()) == dense_vector + cursor.execute( + f""" + select from_vector({column_name} returning clob) + from TestSparseVectors + """ + ) + (lob,) = cursor.fetchone() + assert json.loads(lob.read()) == values + cursor.execute( + f""" + select from_vector({column_name} returning varchar2 format sparse) + from TestSparseVectors + """ + ) + assert json.loads(cursor.fetchone()[0]) == values + cursor.execute( + f""" + select from_vector({column_name} returning varchar2 format dense) + from TestSparseVectors + """ + ) + assert json.loads(cursor.fetchone()[0]) == dense_vector + cursor.execute( + f""" + select from_vector({column_name} returning varchar2) + from TestSparseVectors + """ + ) + assert json.loads(cursor.fetchone()[0]) == values diff --git a/tests/test_7800_aq_raw.py b/tests/test_7800_aq_raw.py index be18d1bf..dded8fe4 100644 --- a/tests/test_7800_aq_raw.py +++ b/tests/test_7800_aq_raw.py @@ -26,478 +26,479 @@ 7800 - Module for testing AQ with raw queues """ -import oracledb -import test_env import threading +import oracledb +import pytest -class TestCase(test_env.BaseTestCase): - raw_data = [ - b"sample raw data 1", - b"sample raw data 2", - b"sample raw data 3", - b"sample raw data 4", - b"sample raw data 5", - b"sample raw data 6", - ] - def __deq_in_thread(self, results): - with test_env.get_connection() as conn: - queue = conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = 10 - props = queue.deqone() - if props is not None: - results.append(props.payload) - conn.commit() - - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) - - def test_7800(self): - "7800 - test dequeuing an empty RAW queue" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - - def test_7801(self): - "7801 - test enqueuing and dequeuing multiple RAW messages" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - props = self.conn.msgproperties() - for value in self.raw_data: - props.payload = value - queue.enqone(props) - self.conn.commit() - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = queue.deqone() - if props is None: - break - value = props.payload - results.append(value) - self.conn.commit() - self.assertEqual(results, self.raw_data) - - def test_7802(self): - "7802 - test dequeuing with DEQ_REMOVE_NODATA in RAW queue" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[1] - props = self.conn.msgproperties(payload=value) - queue.enqone(props) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA - props = queue.deqone() - self.assertIsNotNone(props) - self.assertEqual(props.payload, b"") - - def test_7803(self): - "7803 - test getting/setting dequeue options attributes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - def test_7804(self): - "7804 - test enqueue options attributes RAW queue" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - def test_7805(self): - "7805 - test waiting for dequeue" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - results = [] - thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) - thread.start() - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value) - queue.enqone(props) - self.conn.commit() - thread.join() - self.assertEqual(results, [value]) - - def test_7806(self): - "7806 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - self.assertIsNone(props.enqtime) - - def test_7807(self): - "7807 - test enqueue visibility option - ENQ_ON_COMMIT" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=value) - queue.enqone(props) +RAW_DATA = [ + b"sample raw data 1", + b"sample raw data 2", + b"sample raw data 3", + b"sample raw data 4", + b"sample raw data 5", + b"sample raw data 6", +] - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - self.conn.commit() - props = queue.deqone() - self.assertIsNotNone(props) - - def test_7808(self): - "7808 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - value = props.payload - results = value - other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - - def test_7809(self): - "7809 - test enqueue/dequeue delivery modes identical - buffered" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - queue.enqone(props) +@pytest.fixture +def queue(conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return test_env.get_and_clear_queue(conn, "TEST_RAW_QUEUE") - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - value = props.payload - results = value - other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) - - def test_7810(self): - "7810 - test enqueue/dequeue delivery modes identical - persistent" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT +def _deq_in_thread(test_env, results): + with test_env.get_connection() as conn: + queue = conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.wait = 10 props = queue.deqone() - value = props.payload - results = value - other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - - def test_7811(self): - "7811 - test enqueue/dequeue delivery modes the same" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - queue.enqone(props) + if props is not None: + results.append(props.payload) + conn.commit() + + +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value + + +def test_7800(queue): + "7800 - test dequeuing an empty RAW queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + +def test_7801(queue, conn): + "7801 - test enqueuing and dequeuing multiple RAW messages" + props = conn.msgproperties() + for value in RAW_DATA: + props.payload = value + queue.enqone(props) + conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: props = queue.deqone() + if props is None: + break value = props.payload - results = value - other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - - def test_7812(self): - "7812 - test enqueue/dequeue delivery modes different" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) + results.append(value) + conn.commit() + assert results == RAW_DATA + + +def test_7802(queue, conn): + "7802 - test dequeuing with DEQ_REMOVE_NODATA in RAW queue" + value = RAW_DATA[1] + props = conn.msgproperties(payload=value) + queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = queue.deqone() + assert props is not None + assert props.payload == b"" + + +def test_7803(queue): + "7803 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +def test_7804(queue): + "7804 - test enqueue options attributes RAW queue" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +def test_7805(queue, conn, test_env): + "7805 - test waiting for dequeue" + results = [] + thread = threading.Thread(target=_deq_in_thread, args=(test_env, results)) + thread.start() + value = RAW_DATA[0] + props = conn.msgproperties(payload=value) + queue.enqone(props) + conn.commit() + thread.join() + assert results == [value] + + +def test_7806(conn): + "7806 - test getting/setting message properties attributes" + props = conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + assert props.enqtime is None + + +def test_7807(queue, conn, test_env): + "7807 - test enqueue visibility option - ENQ_ON_COMMIT" + value = RAW_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + conn.commit() + props = queue.deqone() + assert props is not None + + +def test_7808(queue, conn, test_env): + "7808 - test enqueue visibility option - ENQ_IMMEDIATE" + value = RAW_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + value = props.payload + results = value + other_conn.commit() + assert results == RAW_DATA[0] + + +def test_7809(queue, conn, test_env): + "7809 - test enqueue/dequeue delivery modes identical - buffered" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + value = props.payload + results = value + other_conn.commit() + assert results == RAW_DATA[0] + assert props.deliverymode == oracledb.MSG_BUFFERED + + +def test_7810(queue, conn, test_env): + "7810 - test enqueue/dequeue delivery modes identical - persistent" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + value = props.payload + results = value + other_conn.commit() + assert results == RAW_DATA[0] + assert props.deliverymode == oracledb.MSG_PERSISTENT + + +def test_7811(queue, conn, test_env): + "7811 - test enqueue/dequeue delivery modes the same" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + value = props.payload + results = value + other_conn.commit() + assert results == RAW_DATA[0] + + +def test_7812(queue, conn, test_env): + "7812 - test enqueue/dequeue delivery modes different" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=value) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + + +def test_7813(queue, conn, test_env): + "7813 - test error for message with no payload" + props = conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - - def test_7813(self): - "7813 - test error for message with no payload" - queue = self.conn.queue("TEST_RAW_QUEUE") - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - queue.enqone(props) - - def test_7814(self): - "7814 - verify that the msgid property is returned correctly" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value) - self.assertIsNone(props.msgid) + +def test_7814(queue, conn, cursor): + "7814 - verify that the msgid property is returned correctly" + value = RAW_DATA[0] + props = conn.msgproperties(payload=value) + assert props.msgid is None + queue.enqone(props) + cursor.execute("select msgid from RAW_QUEUE_TAB") + (actual_msgid,) = cursor.fetchone() + assert props.msgid == actual_msgid + props = queue.deqone() + assert props.msgid == actual_msgid + + +def test_7815(queue, conn, cursor): + "7815 - test message props enqtime" + value = RAW_DATA[0] + cursor.execute("select current_timestamp from dual") + (start_date,) = cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = conn.msgproperties(payload=value) + queue.enqone(props) + props = queue.deqone() + cursor.execute("select current_timestamp from dual") + (end_date,) = cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +def test_7816(queue, conn): + "7816 - test message props declared attributes" + value = RAW_DATA[0] + values = dict( + payload=value, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + queue.enqone(props) + conn.commit() + prop = queue.deqone() + for attr_name in values: + assert getattr(prop, attr_name) == values[attr_name] + + +def test_7817(queue, conn): + "7817 - test getting queue attributes" + assert queue.name == "TEST_RAW_QUEUE" + assert queue.connection is conn + + +def test_7818(queue): + "7818 - test getting write-only attributes" + for options in (queue.enqoptions, queue.deqoptions): + with pytest.raises(AttributeError): + options.deliverymode + + +def test_7819(queue, conn): + "7819 - test deqoption condition with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + value = RAW_DATA[0] + props = conn.msgproperties(payload=value, priority=priority) queue.enqone(props) - self.cursor.execute("select msgid from RAW_QUEUE_TAB") - (actual_msgid,) = self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) + + queue.deqoptions.condition = "priority = 9" + results = [] + while True: props = queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - - def test_7815(self): - "7815 - test message props enqtime" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - self.cursor.execute("select current_timestamp from dual") - (start_date,) = self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=value) + if props is None: + break + results.append(props.payload) + conn.commit() + assert len(results) == 3 + + +def test_7820(queue, conn): + "7820 - test deqoption correlation" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + value = RAW_DATA[0] + props = conn.msgproperties(payload=value, correlation=correlation) queue.enqone(props) + conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: props = queue.deqone() - self.cursor.execute("select current_timestamp from dual") - (end_date,) = self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - def test_7816(self): - "7816 - test message props declared attributes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - values = dict( - payload=value, - correlation="TEST_CORRELATION", - delay=0, - exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", - expiration=15, - priority=1, - ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - queue.enqone(props) - self.conn.commit() - prop = queue.deqone() - for attr_name in values: - self.assertEqual(getattr(prop, attr_name), values[attr_name]) - - def test_7817(self): - "7817 - test getting queue attributes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - self.assertEqual(queue.name, "TEST_RAW_QUEUE") - self.assertEqual(queue.connection, self.conn) - - def test_7818(self): - "7818 - test getting write-only attributes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - for options in (queue.enqoptions, queue.deqoptions): - with self.assertRaises(AttributeError): - options.deliverymode - - def test_7819(self): - "7819 - test deqoption condition with priority" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] - for priority in priorities: - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value, priority=priority) - queue.enqone(props) - - queue.deqoptions.condition = "priority = 9" - results = [] - while True: - props = queue.deqone() - if props is None: - break - results.append(props.payload) - self.conn.commit() - self.assertEqual(len(results), 3) - - def test_7820(self): - "7820 - test deqoption correlation" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - correlations = [ - "sample", - "sample correlation", - "sample", - "sample", - "sample correlation", - ] - for correlation in correlations: - value = self.raw_data[0] - props = self.conn.msgproperties( - payload=value, correlation=correlation - ) - queue.enqone(props) - self.conn.commit() - queue.deqoptions.correlation = "sample correlation" - results = [] - while True: - props = queue.deqone() - if props is None: - break - results.append(props.payload) - self.conn.commit() - self.assertEqual(len(results), 2) - - def test_7821(self): - "7821 - test deqoption msgid" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value) - queue.enqone(props) + if props is None: + break + results.append(props.payload) + conn.commit() + assert len(results) == 2 + + +def test_7821(queue, conn): + "7821 - test deqoption msgid" + value = RAW_DATA[0] + props = conn.msgproperties(payload=value) + queue.enqone(props) + queue.enqone(props) + conn.commit() + msgid = props.msgid + queue.enqone(props) + conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = queue.deqone() + conn.commit() + assert prop.msgid == msgid + + +def test_7822(queue): + "7822 - test payload_type returns the correct value" + assert queue.payload_type is None + + +def test_7823(queue): + "7823 - test deprecated attributes (enqOptions, deqOptions)" + assert queue.enqOptions is queue.enqoptions + assert queue.deqOptions is queue.deqoptions + + +def test_7824(queue, conn): + "7824 - test deprecated AQ methods (enqOne, deqOne)" + value = b"Test 7823" + queue.enqOne(conn.msgproperties(value)) + props = queue.deqOne() + assert props.payload == value + + +def test_7825(queue, conn, test_env): + "7825 - test wrong payload type" + typ = conn.gettype("UDT_BOOK") + obj = typ.newobject() + props = conn.msgproperties(payload=obj) + with test_env.assert_raises_full_code("DPY-2062"): queue.enqone(props) - self.conn.commit() - msgid = props.msgid - queue.enqone(props) - self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.msgid = msgid - prop = queue.deqone() - self.conn.commit() - self.assertEqual(prop.msgid, msgid) - - def test_7822(self): - "7822 - test payload_type returns the correct value" - queue = self.conn.queue("TEST_RAW_QUEUE") - self.assertIsNone(queue.payload_type) - - def test_7823(self): - "7823 - test deprecated attributes (enqOptions, deqOptions)" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - self.assertEqual(queue.enqOptions, queue.enqoptions) - self.assertEqual(queue.deqOptions, queue.deqoptions) - - def test_7824(self): - "7824 - test deprecated AQ methods (enqOne, deqOne)" - value = b"Test 7823" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.enqOne(self.conn.msgproperties(value)) - props = queue.deqOne() - self.assertEqual(props.payload, value) - - def test_7825(self): - "7825 - test wrong payload type" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - typ = self.conn.gettype("UDT_BOOK") - obj = typ.newobject() - props = self.conn.msgproperties(payload=obj) - with self.assertRaisesFullCode("DPY-2062"): - queue.enqone(props) - - def test_7826(self): - "7826 - test providing null values on queue dequeue options" - queue = self.conn.queue("TEST_RAW_QUEUE") - str_value = "test - 7826" - bytes_value = str_value.encode() - for name in [ - "condition", - "consumername", - "correlation", - "msgid", - "transformation", - ]: - value = bytes_value if name == "msgid" else str_value - with self.subTest(name=name): - setattr(queue.deqoptions, name, value) - self.assertEqual(getattr(queue.deqoptions, name), value) - setattr(queue.deqoptions, name, None) - self.assertIsNone(getattr(queue.deqoptions, name)) - - def test_7827(self): - "7827 - test providing null values on queue enqueue options" - queue = self.conn.queue("TEST_RAW_QUEUE") - value = "test - 7827" - for name in ["transformation"]: - with self.subTest(name=name): - setattr(queue.enqoptions, name, value) - self.assertEqual(getattr(queue.enqoptions, name), value) - setattr(queue.enqoptions, name, None) - self.assertIsNone(getattr(queue.enqoptions, name)) - - def test_7828(self): - "7828 - test providing null correlation on message properties" - props = self.conn.msgproperties() - value = "test - 7828" - for name in ["correlation", "exceptionq"]: - with self.subTest(name=name): - setattr(props, name, value) - self.assertEqual(getattr(props, name), value) - setattr(props, name, None) - self.assertIsNone(getattr(props, name)) - - def test_7829(self): - "7829 - test deq options correlation with buffered messages" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value, correlation="sample") - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqone(props) - self.conn.commit() - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = "sample" - msg = queue.deqone() - self.conn.commit() - self.assertEqual(msg.payload, value) - - def test_7830(self): - "7830 - test deq options with msgid > 16 bytes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.msgid = b"invalid_msgid_123456789" - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - with self.assertRaisesFullCode("ORA-25263"): - queue.deqone() - - def test_7831(self): - "7831 - test deq options with msgid < 16 bytes" - queue = self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.msgid = b"short_msgid" - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - with self.assertRaisesFullCode("ORA-25263"): - queue.deqone() - - -if __name__ == "__main__": - test_env.run_test_cases() + + +def test_7826(queue): + "7826 - test providing null values on queue dequeue options" + str_value = "test - 7826" + bytes_value = str_value.encode() + for name in [ + "condition", + "consumername", + "correlation", + "msgid", + "transformation", + ]: + value = bytes_value if name == "msgid" else str_value + setattr(queue.deqoptions, name, value) + assert getattr(queue.deqoptions, name) == value + setattr(queue.deqoptions, name, None) + assert getattr(queue.deqoptions, name) is None + + +def test_7827(queue): + "7827 - test providing null values on queue enqueue options" + value = "test - 7827" + for name in ["transformation"]: + setattr(queue.enqoptions, name, value) + assert getattr(queue.enqoptions, name) == value + setattr(queue.enqoptions, name, None) + assert getattr(queue.enqoptions, name) is None + + +def test_7828(conn): + "7828 - test providing null correlation on message properties" + props = conn.msgproperties() + value = "test - 7828" + for name in ["correlation", "exceptionq"]: + setattr(props, name, value) + assert getattr(props, name) == value + setattr(props, name, None) + assert getattr(props, name) is None + + +def test_7829(queue, conn): + "7829 - test deq options correlation with buffered messages" + value = RAW_DATA[0] + props = conn.msgproperties(payload=value, correlation="sample") + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqone(props) + conn.commit() + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "sample" + msg = queue.deqone() + conn.commit() + assert msg.payload == value + + +def test_7830(queue, test_env): + "7830 - test deq options with msgid > 16 bytes" + queue.deqoptions.msgid = b"invalid_msgid_123456789" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with test_env.assert_raises_full_code("ORA-25263"): + queue.deqone() + + +def test_7831(queue, test_env): + "7831 - test deq options with msgid < 16 bytes" + queue.deqoptions.msgid = b"short_msgid" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with test_env.assert_raises_full_code("ORA-25263"): + queue.deqone() diff --git a/tests/test_7900_aq_raw_async.py b/tests/test_7900_aq_raw_async.py index 0772a80c..0e4be60b 100644 --- a/tests/test_7900_aq_raw_async.py +++ b/tests/test_7900_aq_raw_async.py @@ -27,412 +27,422 @@ """ import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - raw_data = [ - b"sample raw data 1", - b"sample raw data 2", - b"sample raw data 3", - b"sample raw data 4", - b"sample raw data 5", - b"sample raw data 6", - ] +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def queue(async_conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return await test_env.get_and_clear_queue_async( + async_conn, "TEST_RAW_QUEUE" + ) + + +RAW_DATA = [ + b"sample raw data 1", + b"sample raw data 2", + b"sample raw data 3", + b"sample raw data 4", + b"sample raw data 5", + b"sample raw data 6", +] + - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value - async def test_7900(self): - "7900 - test dequeuing an empty RAW queue" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") + +async def test_7900(queue): + "7900 - test dequeuing an empty RAW queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None + + +async def test_7901(async_conn, queue): + "7901 - test enqueuing and dequeuing multiple RAW messages" + props = async_conn.msgproperties() + for value in RAW_DATA: + props.payload = value + await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + value = props.payload + results.append(value) + await async_conn.commit() + assert results == RAW_DATA + + +async def test_7902(async_conn, queue): + "7902 - test dequeuing with DEQ_REMOVE_NODATA in RAW queue" + value = RAW_DATA[1] + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + assert props is not None + assert props.payload == b"" + + +async def test_7903(queue): + "7903 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +async def test_7904(queue): + "7904 - test enqueue options attributes RAW queue" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +async def test_7905(queue): + "7905 - test errors for invalid values for enqueue" + value = RAW_DATA[0] + with pytest.raises(TypeError): + await queue.enqone(value) + + +async def test_7906(async_conn): + "7906 - test getting/setting message properties attributes" + props = async_conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + + +async def test_7907(async_conn, queue, test_env): + "7907 - test enqueue visibility option - ENQ_ON_COMMIT" + value = RAW_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() - self.assertIsNone(props) - - async def test_7901(self): - "7901 - test enqueuing and dequeuing multiple RAW messages" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - props = self.conn.msgproperties() - for value in self.raw_data: - props.payload = value - await queue.enqone(props) - await self.conn.commit() + assert props is None + await async_conn.commit() + props = await queue.deqone() + assert props is not None + + +async def test_7908(queue, async_conn, test_env): + "7908 - test enqueue visibility option - ENQ_IMMEDIATE" + value = RAW_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = await queue.deqone() - if props is None: - break - value = props.payload - results.append(value) - await self.conn.commit() - self.assertEqual(results, self.raw_data) - - async def test_7902(self): - "7902 - test dequeuing with DEQ_REMOVE_NODATA in RAW queue" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[1] - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + assert results == RAW_DATA[0] + + +async def test_7909(queue, async_conn, test_env): + "7909 - test enqueue/dequeue delivery modes identical - buffered" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA props = await queue.deqone() - self.assertIsNotNone(props) - self.assertEqual(props.payload, b"") - - async def test_7903(self): - "7903 - test getting/setting dequeue options attributes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - async def test_7904(self): - "7904 - test enqueue options attributes RAW queue" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - async def test_7905(self): - "7905 - test errors for invalid values for enqueue" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - with self.assertRaises(TypeError): - await queue.enqone(value) - - async def test_7906(self): - "7906 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - - async def test_7907(self): - "7907 - test enqueue visibility option - ENQ_ON_COMMIT" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) - - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertIsNone(props) - await self.conn.commit() - props = await queue.deqone() - self.assertIsNotNone(props) - - async def test_7908(self): - "7908 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) + value = props.payload + results = value + await other_conn.commit() + assert results == RAW_DATA[0] + assert props.deliverymode == oracledb.MSG_BUFFERED + + +async def test_7910(queue, async_conn, test_env): + "7910 - test enqueue/dequeue delivery modes identical - persistent" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + assert results == RAW_DATA[0] + assert props.deliverymode == oracledb.MSG_PERSISTENT + + +async def test_7911(queue, async_conn, test_env): + "7911 - test enqueue/dequeue delivery modes the same" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + value = props.payload + results = value + await other_conn.commit() + assert results == RAW_DATA[0] + + +async def test_7912(queue, async_conn, test_env): + "7912 - test enqueue/dequeue delivery modes different" + value = RAW_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue("TEST_RAW_QUEUE") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - value = props.payload - results = value - await other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - - async def test_7909(self): - "7909 - test enqueue/dequeue delivery modes identical - buffered" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - value = props.payload - results = value - await other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) - - async def test_7910(self): - "7910 - test enqueue/dequeue delivery modes identical - persistent" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) +async def test_7913(async_conn, test_env): + "7913 - test error for message with no payload" + queue = async_conn.queue("TEST_RAW_QUEUE") + props = async_conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - value = props.payload - results = value - await other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - - async def test_7911(self): - "7911 - test enqueue/dequeue delivery modes the same" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - value = props.payload - results = value - await other_conn.commit() - self.assertEqual(results, self.raw_data[0]) - - async def test_7912(self): - "7912 - test enqueue/dequeue delivery modes different" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=value) +async def test_7914(async_conn, async_cursor, queue): + "7914 - verify that the msgid property is returned correctly" + value = RAW_DATA[0] + props = async_conn.msgproperties(payload=value) + assert props.msgid is None + await queue.enqone(props) + await async_cursor.execute("select msgid from RAW_QUEUE_TAB") + (actual_msgid,) = await async_cursor.fetchone() + assert props.msgid == actual_msgid + props = await queue.deqone() + assert props.msgid == actual_msgid + + +async def test_7915(async_conn, async_cursor, queue): + "7915 - test message props enqtime" + value = RAW_DATA[0] + await async_cursor.execute("select current_timestamp from dual") + (start_date,) = await async_cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + props = await queue.deqone() + await async_cursor.execute("select current_timestamp from dual") + (end_date,) = await async_cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +async def test_7916(async_conn, queue): + "7916 - test message props declared attributes" + value = RAW_DATA[0] + values = dict( + payload=value, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = async_conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + await queue.enqone(props) + await async_conn.commit() + prop = await queue.deqone() + for attr_name in values: + assert getattr(prop, attr_name) == values[attr_name] + + +async def test_7917(async_conn, queue): + "7917 - test getting queue attributes" + assert queue.name == "TEST_RAW_QUEUE" + assert queue.connection is async_conn + + +async def test_7918(queue): + "7918 - test getting write-only attributes" + for options in (queue.enqoptions, queue.deqoptions): + with pytest.raises(AttributeError): + options.deliverymode + + +async def test_7919(async_conn, queue): + "7919 - test deqoption condition with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + value = RAW_DATA[0] + props = async_conn.msgproperties(payload=value, priority=priority) await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue("TEST_RAW_QUEUE") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertIsNone(props) - - async def test_7913(self): - "7913 - test error for message with no payload" - queue = self.conn.queue("TEST_RAW_QUEUE") - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - await queue.enqone(props) - - async def test_7914(self): - "7914 - verify that the msgid property is returned correctly" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value) - self.assertIsNone(props.msgid) - await queue.enqone(props) - await self.cursor.execute("select msgid from RAW_QUEUE_TAB") - (actual_msgid,) = await self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) - props = await queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - - async def test_7915(self): - "7915 - test message props enqtime" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - await self.cursor.execute("select current_timestamp from dual") - (start_date,) = await self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=value) - await queue.enqone(props) + queue.deqoptions.condition = "priority = 9" + results = [] + while True: props = await queue.deqone() - await self.cursor.execute("select current_timestamp from dual") - (end_date,) = await self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - async def test_7916(self): - "7916 - test message props declared attributes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - values = dict( - payload=value, - correlation="TEST_CORRELATION", - delay=0, - exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", - expiration=15, - priority=1, + if props is None: + break + results.append(props.payload) + await async_conn.commit() + assert len(results) == 3 + + +async def test_7920(async_conn, queue): + "7920 - test deqoption correlation" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + value = RAW_DATA[0] + props = async_conn.msgproperties( + payload=value, correlation=correlation ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - await queue.enqone(props) - await self.conn.commit() - prop = await queue.deqone() - for attr_name in values: - self.assertEqual(getattr(prop, attr_name), values[attr_name]) - - async def test_7917(self): - "7917 - test getting queue attributes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - self.assertEqual(queue.name, "TEST_RAW_QUEUE") - self.assertEqual(queue.connection, self.conn) - - async def test_7918(self): - "7918 - test getting write-only attributes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - for options in (queue.enqoptions, queue.deqoptions): - with self.assertRaises(AttributeError): - options.deliverymode - - async def test_7919(self): - "7919 - test deqoption condition with priority" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] - for priority in priorities: - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value, priority=priority) - await queue.enqone(props) - - queue.deqoptions.condition = "priority = 9" - results = [] - while True: - props = await queue.deqone() - if props is None: - break - results.append(props.payload) - await self.conn.commit() - self.assertEqual(len(results), 3) - - async def test_7920(self): - "7920 - test deqoption correlation" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - correlations = [ - "sample", - "sample correlation", - "sample", - "sample", - "sample correlation", - ] - for correlation in correlations: - value = self.raw_data[0] - props = self.conn.msgproperties( - payload=value, correlation=correlation - ) - await queue.enqone(props) - await self.conn.commit() - queue.deqoptions.correlation = "sample correlation" - results = [] - while True: - props = await queue.deqone() - if props is None: - break - results.append(props.payload) - await self.conn.commit() - self.assertEqual(len(results), 2) - - async def test_7921(self): - "7921 - test deqoption msgid" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value) await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await async_conn.commit() + assert len(results) == 2 + + +async def test_7921(async_conn, queue): + "7921 - test deqoption msgid" + value = RAW_DATA[0] + props = async_conn.msgproperties(payload=value) + await queue.enqone(props) + await queue.enqone(props) + await async_conn.commit() + msgid = props.msgid + await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = await queue.deqone() + await async_conn.commit() + assert prop.msgid == msgid + + +async def test_7922(queue): + "7922 - test payload_type returns the correct value" + assert queue.payload_type is None + + +async def test_7923(queue): + "7923 - test deprecated attributes (enqOptions, deqOptions)" + assert queue.enqOptions is queue.enqoptions + assert queue.deqOptions is queue.deqoptions + + +async def test_7924(async_conn, queue, test_env): + "7924 - test wrong payload type" + typ = await async_conn.gettype("UDT_BOOK") + obj = typ.newobject() + props = async_conn.msgproperties(payload=obj) + with test_env.assert_raises_full_code("DPY-2062"): await queue.enqone(props) - await self.conn.commit() - msgid = props.msgid - await queue.enqone(props) - await self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.msgid = msgid - prop = await queue.deqone() - await self.conn.commit() - self.assertEqual(prop.msgid, msgid) - - async def test_7922(self): - "7922 - test payload_type returns the correct value" - queue = self.conn.queue("TEST_RAW_QUEUE") - self.assertIsNone(queue.payload_type) - - async def test_7923(self): - "7923 - test deprecated attributes (enqOptions, deqOptions)" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - self.assertEqual(queue.enqOptions, queue.enqoptions) - self.assertEqual(queue.deqOptions, queue.deqoptions) - - async def test_7924(self): - "7924 - test wrong payload type" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - typ = await self.conn.gettype("UDT_BOOK") - obj = typ.newobject() - props = self.conn.msgproperties(payload=obj) - with self.assertRaisesFullCode("DPY-2062"): - await queue.enqone(props) - - async def test_7925(self): - "7925 - test deq options correlation with buffered messages" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - value = self.raw_data[0] - props = self.conn.msgproperties(payload=value, correlation="sample") - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - await queue.enqone(props) - await self.conn.commit() - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = "sample" - msg = await queue.deqone() - await self.conn.commit() - self.assertEqual(msg.payload, value) - - async def test_7926(self): - "7926 - test deq options with msgid > 16 bytes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.msgid = b"invalid_msgid_123456789" - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - with self.assertRaisesFullCode("ORA-25263"): - await queue.deqone() - - async def test_7927(self): - "7927 - test deq options with msgid < 16 bytes" - queue = await self.get_and_clear_queue("TEST_RAW_QUEUE") - queue.deqoptions.msgid = b"short_msgid" - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - with self.assertRaisesFullCode("ORA-25263"): - await queue.deqone() -if __name__ == "__main__": - test_env.run_test_cases() +async def test_7925(async_conn, queue): + "7925 - test deq options correlation with buffered messages" + value = RAW_DATA[0] + props = async_conn.msgproperties(payload=value, correlation="sample") + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "sample" + msg = await queue.deqone() + await async_conn.commit() + assert msg.payload == value + + +async def test_7926(queue, test_env): + "7926 - test deq options with msgid > 16 bytes" + queue.deqoptions.msgid = b"invalid_msgid_123456789" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with test_env.assert_raises_full_code("ORA-25263"): + await queue.deqone() + + +async def test_7927(queue, test_env): + "7927 - test deq options with msgid < 16 bytes" + queue.deqoptions.msgid = b"short_msgid" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + with test_env.assert_raises_full_code("ORA-25263"): + await queue.deqone() diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 4837c67e..7193c33e 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -31,10 +31,8 @@ import decimal import oracledb -import numpy -import pandas import pyarrow -import test_env +import pytest # basic DATASET_1 = [ @@ -247,579 +245,617 @@ QUERY_SQL = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="") -class TestCase(test_env.BaseTestCase): +def _convert_date(typ, value): + """ + Converts a date to the format required by Arrow. + """ + if value is not None: + if typ.unit == "s": + value = datetime.datetime(value.year, value.month, value.day) + ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() + if typ.unit != "s": + ts *= 1_000_000 + return ts + + +def _convert_to_array(data, typ): + """ + Convert raw data to an Arrow array using pyarrow. + """ + if isinstance(typ, pyarrow.Decimal128Type): + data = [ + decimal.Decimal(str(value)) if value is not None else value + for value in data + ] + elif isinstance(typ, pyarrow.TimestampType): + data = [_convert_date(typ, v) for v in data] + mask = [value is None for value in data] + return pyarrow.array(data, typ, mask=mask) + + +def _convert_to_df(data): + """ + Converts the data set to a Pandas data frame for comparison to what is + returned from the database. + """ + data_by_col = [[row[i] for row in data] for i in range(len(data[0]))] + fetch_decimals = oracledb.defaults.fetch_decimals + types = [ + pyarrow.decimal128(9) if fetch_decimals else pyarrow.int64(), + pyarrow.string(), + pyarrow.string(), + pyarrow.string(), + pyarrow.string(), + pyarrow.timestamp("s"), + pyarrow.decimal128(9, 2) if fetch_decimals else pyarrow.float64(), + pyarrow.decimal128(3) if fetch_decimals else pyarrow.int64(), + pyarrow.timestamp("us"), + ] + arrays = [_convert_to_array(d, t) for d, t in zip(data_by_col, types)] + names = [ + "ID", + "FIRSTNAME", + "LASTNAME", + "CITY", + "COUNTRY", + "DATEOFBIRTH", + "SALARY", + "CREDITSCORE", + "LASTUPDATED", + ] + pa_tab = pyarrow.Table.from_arrays(arrays, names=names) + pa_tab.validate(full=True) + return pa_tab.to_pandas() + + +def _populate_table(cursor, data): + """ + Populate the test table with the given data. + """ + cursor.execute("delete from TestDataframe") + types = [None] * len(data[0]) + types[8] = oracledb.DB_TYPE_TIMESTAMP + cursor.setinputsizes(*types) + cursor.executemany( + """ + insert into TestDataframe ( + Id, FirstName, LastName, City, Country, + DateOfBirth, Salary, CreditScore, LastUpdated + ) values ( + :id, :first_name, :last_name, :city, :country, + :dob, :salary, :credit_score, :last_updated + ) + """, + data, + ) + cursor.connection.commit() + + +def _test_df_interop(test_env, cursor, data): + """ + Tests interoperability with external data frames using the data set + provided. + """ + _populate_table(cursor, data) + ora_df = cursor.connection.fetch_df_all(QUERY_SQL) + _validate_df(ora_df, data, test_env) + + +def _test_df_batches_interop(test_env, cursor, data, batch_size, num_batches): + """ + Tests interoperability with external data frames using the data set + provided. + """ + _populate_table(cursor, data) + conn = cursor.connection + batches = list(conn.fetch_df_batches(QUERY_SQL, size=batch_size)) + assert len(batches) == num_batches + if num_batches == 1: + _validate_df(batches[0], data, test_env) + else: + offset = 0 + for batch in batches: + _validate_df(batch, data[offset : offset + batch_size], test_env) + offset += batch_size + + +def _validate_df(ora_df, data, test_env): + """ + Validates the data frame by converting it to Pandas and comparing it + with the original data set that was used. + """ + raw_df = _convert_to_df(data) + raw_data = test_env.get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == raw_data + + +def test_8000(conn, cursor): + "8000 - test basic fetch of data frame" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all(QUERY_SQL) + assert ora_df.num_rows() == len(DATASET_1) + assert ora_df.num_columns() == len(DATASET_1[0]) + + +def test_8001(cursor, test_env): + "8001 - test conversion to external dataframe" + _test_df_interop(test_env, cursor, DATASET_1) + + +def test_8002(cursor, test_env): + "8001 - test null and negative values" + _test_df_interop(test_env, cursor, DATASET_2) + + +def test_8003(cursor, test_env): + "8002 - test with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + _test_df_interop(test_env, cursor, DATASET_1) + + +def test_8004(cursor, test_env): + "8003 - test null and negative values with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + _test_df_interop(test_env, cursor, DATASET_2) + + +def test_8005(cursor, test_env): + "8005 - test null and values with leading zeros" + _test_df_interop(test_env, cursor, DATASET_3) + + +def test_8006(cursor, test_env): + "8005 - test null and values with leading zeros with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + _test_df_interop(test_env, cursor, DATASET_3) + + +def test_8007(cursor, test_env): + "8007 - duplicate values in the rows" + _test_df_interop(test_env, cursor, DATASET_4) + + +def test_8008(cursor, test_env): + "8008 - batches without specification of size" + _test_df_batches_interop( + test_env, cursor, DATASET_4, batch_size=None, num_batches=1 + ) + + +def test_8009(cursor, test_env): + "8009 - batches with specification of size" + _test_df_batches_interop( + test_env, cursor, DATASET_4, batch_size=5, num_batches=2 + ) + + +def test_8010(conn, cursor, test_env): + "8010 - verify passing Arrow arrays twice works" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all(QUERY_SQL) + _validate_df(ora_df, DATASET_1, test_env) + _validate_df(ora_df, DATASET_1, test_env) + + +def test_8011(conn, cursor): + "8011 - verify empty data set" + _populate_table(cursor, DATASET_1) + statement = "select * from TestDataFrame where Id = 4" + ora_df = conn.fetch_df_all(statement) + assert ora_df.num_rows() == 0 + + +def test_8012(conn, cursor): + "8012 - verify empty data set with batches" + _populate_table(cursor, DATASET_1) + statement = "select * from TestDataFrame where Id = 4" + for ora_df in conn.fetch_df_batches(statement): + assert ora_df.num_rows() == 0 + - def __convert_date(self, typ, value): +def test_8013(conn, cursor): + "8013 - negative checks on attributes" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all(QUERY_SQL) + with pytest.raises(IndexError): + ora_df.get_column(121) + with pytest.raises(IndexError): + ora_df.get_column(-1) + with pytest.raises(KeyError): + ora_df.get_column_by_name("missing_column") + + +def test_8014(conn, test_env): + "8014 - check unsupported error" + statement = "select cursor(select user from dual) from dual" + with test_env.assert_raises_full_code("DPY-3030"): + conn.fetch_df_all(statement) + + +def test_8015(cursor, test_env): + "8015 - batches with specification of size matching number of rows" + _test_df_batches_interop( + test_env, cursor, DATASET_2, batch_size=len(DATASET_2), num_batches=1 + ) + + +def test_8016(conn, cursor): + "8016 - verify get_column() returns the correct value" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all(QUERY_SQL) + array = pyarrow.array(ora_df.get_column(1)) + assert array.to_pylist() == ["John", "Big"] + + +def test_8017(cursor, test_env): + "8017 - batches with size that has duplicate rows across batches" + _test_df_batches_interop( + test_env, cursor, DATASET_4, batch_size=3, num_batches=2 + ) + + +def test_8018(conn, test_env): + "8018 - fetch_decimals without precision and scale specified" + data = [(1.0,)] + with test_env.defaults_context_manager("fetch_decimals", True): + ora_df = conn.fetch_df_all("select 1.0 from dual") + fetched_tab = pyarrow.Table.from_arrays( + ora_df.column_arrays(), names=ora_df.column_names() + ) + fetched_df = fetched_tab.to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8019(conn, test_env): + "8019 - fetch clob" + data = [("test_8023",)] + ora_df = conn.fetch_df_all("select to_clob('test_8023') from dual") + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8020(conn, test_env): + "8020 - fetch blob" + data = [(b"test_8024",)] + ora_df = conn.fetch_df_all( + "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8021(conn, test_env): + "8021 - fetch raw" + data = [(b"test_8025",)] + ora_df = conn.fetch_df_all( + "select utl_raw.cast_to_raw('test_8025') from dual" + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8022(skip_unless_native_boolean_supported, conn, test_env): + "8022 - fetch boolean" + data = [(True,), (False,), (False,), (True,), (True,)] + ora_df = conn.fetch_df_all( """ - Converts a date to the format required by Arrow. + select true + union all + select false + union all + select false + union all + select true + union all + select true """ - if value is not None: - if typ.unit == "s": - value = datetime.datetime(value.year, value.month, value.day) - ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() - if typ.unit != "s": - ts *= 1_000_000 - return ts - - def __convert_to_array(self, data, typ): + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8023(conn, test_env): + "8023 - fetch data with multiple rows containing null values" + ora_df = conn.fetch_df_all( """ - Convert raw data to an Arrow array using pyarrow. + select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual """ - if isinstance(typ, pyarrow.Decimal128Type): - data = [ - decimal.Decimal(str(value)) if value is not None else value - for value in data - ] - elif isinstance(typ, pyarrow.TimestampType): - data = [self.__convert_date(typ, v) for v in data] - mask = [value is None for value in data] - return pyarrow.array(data, typ, mask=mask) - - def __convert_to_df(self, data): + ) + data = [ + (datetime.datetime(2025, 6, 12),), + (None,), + (None,), + (None,), + (datetime.datetime(2025, 6, 11),), + (None,), + (None,), + (None,), + (None,), + ] + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8024(skip_unless_vectors_supported, conn, test_env): + "8024 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = conn.fetch_df_all( """ - Converts the data set to a Pandas data frame for comparison to what is - returned from the database. + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) """ - data_by_col = [[row[i] for row in data] for i in range(len(data[0]))] - fetch_decimals = oracledb.defaults.fetch_decimals - types = [ - pyarrow.decimal128(9) if fetch_decimals else pyarrow.int64(), - pyarrow.string(), - pyarrow.string(), - pyarrow.string(), - pyarrow.string(), - pyarrow.timestamp("s"), - pyarrow.decimal128(9, 2) if fetch_decimals else pyarrow.float64(), - pyarrow.decimal128(3) if fetch_decimals else pyarrow.int64(), - pyarrow.timestamp("us"), - ] - arrays = [ - self.__convert_to_array(d, t) for d, t in zip(data_by_col, types) - ] - names = [ - "ID", - "FIRSTNAME", - "LASTNAME", - "CITY", - "COUNTRY", - "DATEOFBIRTH", - "SALARY", - "CREDITSCORE", - "LASTUPDATED", - ] - pa_tab = pyarrow.Table.from_arrays(arrays, names=names) - pa_tab.validate(full=True) - return pa_tab.to_pandas() - - def __convert_df_value(self, df_val): + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8025(skip_unless_vectors_supported, conn, test_env): + "8025 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8, 55.9],), + ] + ora_df = conn.fetch_df_all( """ - This method converts a dataframe cell value to use with assertEqual() - For e.g. NaN and np.array cannot be compared directly. Values are - converted according to the following rules: - - NaN -> None - - np.array -> np.array.tolist() (Python list) + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT64) """ - if isinstance(df_val, numpy.ndarray): - return df_val.tolist() - elif pandas.isna(df_val): - return None - elif isinstance(df_val, dict): - return {k: self.__convert_df_value(v) for k, v in df_val.items()} - else: - return df_val - - def __get_data_from_df(self, df): + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8026(skip_unless_vectors_supported, conn, test_env): + "8026 - fetch int8 vector" + data = [ + ([34, -77],), + ([34, 77, 55],), + ] + ora_df = conn.fetch_df_all( """ - Returns data from the data frame in a normalized fashion suitable for - comparison. In particular, NaN values cannot be compared to one another - so they are converted to the value None for comparison purposes. + SELECT TO_VECTOR('[34, -77]', 2, INT8) + union all + SELECT TO_VECTOR('[34, 77, 55]', 3, INT8) """ - return [ - tuple(self.__convert_df_value(v) for v in row) - for row in df.itertuples(index=False, name=None) - ] - - def __populate_table(self, data): + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8027(skip_unless_vectors_supported, conn, test_env): + "8027 - fetch binary vector" + data = [ + ([3, 2, 3],), + ([3, 2],), + ] + ora_df = conn.fetch_df_all( """ - Populate the test table with the given data. + SELECT TO_VECTOR('[3, 2, 3]', 24, BINARY) + union all + SELECT TO_VECTOR('[3, 2]', 16, BINARY) """ - self.cursor.execute("delete from TestDataframe") - types = [None] * len(data[0]) - types[8] = oracledb.DB_TYPE_TIMESTAMP - self.cursor.setinputsizes(*types) - self.cursor.executemany( - """ - insert into TestDataframe ( - Id, FirstName, LastName, City, Country, - DateOfBirth, Salary, CreditScore, LastUpdated - ) values ( - :id, :first_name, :last_name, :city, :country, - :dob, :salary, :credit_score, :last_updated - ) - """, - data, - ) - self.conn.commit() - - def __test_df_interop(self, data): + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8028(skip_unless_vectors_supported, conn, test_env): + "8028 - fetch float32 vectors with None" + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + (None,), + ] + ora_df = conn.fetch_df_all( """ - Tests interoperability with external data frames using the data set - provided. + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) + union all + select NULL """ - self.__populate_table(data) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - self.__validate_df(ora_df, data) - - def __test_df_batches_interop(self, data, batch_size, num_batches): + ) + assert ora_df.num_rows() == 3 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8029(skip_unless_vectors_supported, conn, test_env): + "8029 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + ora_df = conn.fetch_df_all( """ - Tests interoperability with external data frames using the data set - provided. + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) + union all + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) """ - self.__populate_table(data) - batches = list(self.conn.fetch_df_batches(QUERY_SQL, size=batch_size)) - self.assertEqual(len(batches), num_batches) - if num_batches == 1: - self.__validate_df(batches[0], data) - else: - offset = 0 - for batch in batches: - self.__validate_df(batch, data[offset : offset + batch_size]) - offset += batch_size - - def __validate_df(self, ora_df, data): + ) + assert ora_df.num_rows() == 12 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8030(skip_unless_sparse_vectors_supported, conn, test_env): + "8030 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + ora_df = conn.fetch_df_all( """ - Validates the data frame by converting it to Pandas and comparing it - with the original data set that was used. + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT32), + 8, + FLOAT32, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT32), + 8, + FLOAT32, + SPARSE + ) """ - raw_df = self.__convert_to_df(data) - raw_data = self.__get_data_from_df(raw_df) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, raw_data) - - def test_8000(self): - "8000 - test basic fetch of data frame" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) - - def test_8001(self): - "8001 - test conversion to external dataframe" - self.__test_df_interop(DATASET_1) - - def test_8002(self): - "8001 - test null and negative values" - self.__test_df_interop(DATASET_2) - - def test_8003(self): - "8002 - test with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - self.__test_df_interop(DATASET_1) - - def test_8004(self): - "8003 - test null and negative values with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - self.__test_df_interop(DATASET_2) - - def test_8005(self): - "8005 - test null and values with leading zeros" - self.__test_df_interop(DATASET_3) - - def test_8006(self): - "8005 - test null and values with leading zeros with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - self.__test_df_interop(DATASET_3) - - def test_8007(self): - "8007 - duplicate values in the rows" - self.__test_df_interop(DATASET_4) - - def test_8008(self): - "8008 - batches without specification of size" - self.__test_df_batches_interop( - DATASET_4, batch_size=None, num_batches=1 - ) + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8031(skip_unless_sparse_vectors_supported, conn, test_env): + "8031 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + """ + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) - def test_8009(self): - "8009 - batches with specification of size" - self.__test_df_batches_interop(DATASET_4, batch_size=5, num_batches=2) - - def test_8010(self): - "8010 - verify passing Arrow arrays twice works" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - self.__validate_df(ora_df, DATASET_1) - self.__validate_df(ora_df, DATASET_1) - - def test_8011(self): - "8011 - verify empty data set" - self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame where Id = 4" - ora_df = self.conn.fetch_df_all(statement) - self.assertEqual(ora_df.num_rows(), 0) - - def test_8012(self): - "8012 - verify empty data set with batches" - self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame where Id = 4" - for ora_df in self.conn.fetch_df_batches(statement): - self.assertEqual(ora_df.num_rows(), 0) - - def test_8013(self): - "8013 - negative checks on attributes" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - with self.assertRaises(IndexError): - ora_df.get_column(121) - with self.assertRaises(IndexError): - ora_df.get_column(-1) - with self.assertRaises(KeyError): - ora_df.get_column_by_name("missing_column") - - def test_8014(self): - "8014 - check unsupported error" - statement = "select cursor(select user from dual) from dual" - with self.assertRaisesFullCode("DPY-3030"): - self.conn.fetch_df_all(statement) - - def test_8015(self): - "8015 - batches with specification of size matching number of rows" - self.__test_df_batches_interop( - DATASET_2, batch_size=len(DATASET_2), num_batches=1 - ) - def test_8016(self): - "8016 - verify get_column() returns the correct value" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - array = pyarrow.array(ora_df.get_column(1)) - self.assertEqual(array.to_pylist(), ["John", "Big"]) - - def test_8017(self): - "8017 - batches with size that has duplicate rows across batches" - self.__test_df_batches_interop(DATASET_4, batch_size=3, num_batches=2) - - def test_8018(self): - "8018 - fetch_decimals without precision and scale specified" - data = [(1.0,)] - with test_env.DefaultsContextManager("fetch_decimals", True): - ora_df = self.conn.fetch_df_all("select 1.0 from dual") - fetched_tab = pyarrow.Table.from_arrays( - ora_df.column_arrays(), names=ora_df.column_names() - ) - fetched_df = fetched_tab.to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - def test_8019(self): - "8019 - fetch clob" - data = [("test_8023",)] - ora_df = self.conn.fetch_df_all( - "select to_clob('test_8023') from dual" - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - def test_8020(self): - "8020 - fetch blob" - data = [(b"test_8024",)] - ora_df = self.conn.fetch_df_all( - "select to_blob(utl_raw.cast_to_raw('test_8024')) from dual" - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - def test_8021(self): - "8021 - fetch raw" - data = [(b"test_8025",)] - ora_df = self.conn.fetch_df_all( - "select utl_raw.cast_to_raw('test_8025') from dual" - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - @test_env.skip_unless_native_boolean_supported() - def test_8022(self): - "8022 - fetch boolean" - data = [(True,), (False,), (False,), (True,), (True,)] - ora_df = self.conn.fetch_df_all( +def test_8032(skip_unless_vectors_supported, conn, test_env): + "8032 - DPY-3031 - Unsupported flexible vector formats" + with test_env.assert_raises_full_code("DPY-3031"): + conn.fetch_df_all( """ - select true + SELECT TO_VECTOR('[44, 55, 89]', 3, INT8) as flex_col union all - select false - union all - select false - union all - select true - union all - select true + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) as flex_col """ ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - def test_8023(self): - "8023 - fetch data with multiple rows containing null values" - ora_df = self.conn.fetch_df_all( - """ - select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - """ - ) - data = [ - (datetime.datetime(2025, 6, 12),), - (None,), - (None,), - (None,), - (datetime.datetime(2025, 6, 11),), - (None,), - (None,), - (None,), - (None,), - ] - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - @test_env.skip_unless_vectors_supported() - def test_8024(self): - "8024 - fetch float32 vector" - - # float32 is a special case while comparing dataframe values - # Converting Dataframe cell value of type numpy.ndarray[float32] - # using .tolist() converts each value to Python float. Python - # float uses 64-bit precision causing mismatches in assertEqual. - # As a workaround we use array.array('f', src).tolist() on the - # source data - data = [ - (array.array("f", [34.6, 77.8]).tolist(),), - (array.array("f", [34.6, 77.8, 55.9]).tolist(),), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) - union all - SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8025(self): - "8025 - fetch float64 vector" - data = [ - ([34.6, 77.8],), - ([34.6, 77.8, 55.9],), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT64) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8026(self): - "8026 - fetch int8 vector" - data = [ - ([34, -77],), - ([34, 77, 55],), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34, -77]', 2, INT8) - union all - SELECT TO_VECTOR('[34, 77, 55]', 3, INT8) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8027(self): - "8027 - fetch binary vector" - data = [ - ([3, 2, 3],), - ([3, 2],), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[3, 2, 3]', 24, BINARY) - union all - SELECT TO_VECTOR('[3, 2]', 16, BINARY) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8028(self): - "8028 - fetch float32 vectors with None" - data = [ - (array.array("f", [34.6, 77.8]).tolist(),), - (array.array("f", [34.6, 77.8, 55.9]).tolist(),), - (None,), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) - union all - SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) - union all - select NULL - """ - ) - self.assertEqual(ora_df.num_rows(), 3) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8029(self): - "8029 - fetch duplicate float64 vectors" - data = [ - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - union all - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT64) - """ - ) - self.assertEqual(ora_df.num_rows(), 12) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_8030(self): - "8030 - fetch float32 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 77.8]).tolist(), - }, - ), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 9.1]).tolist(), - }, - ), - ] - ora_df = self.conn.fetch_df_all( - """ - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT32), - 8, - FLOAT32, - SPARSE - ) - union all - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT32), - 8, - FLOAT32, - SPARSE - ) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_8031(self): - "8031 - fetch float64 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 77.8], - }, - ), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 9.1], - }, - ), - ] - ora_df = self.conn.fetch_df_all( + +def test_8033(skip_unless_sparse_vectors_supported, conn, test_env): + "8033 - DPY-4007 -fetch sparse vectors with flexible dimensions" + with test_env.assert_raises_full_code("DPY-2065"): + conn.fetch_df_all( """ SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), - 8, + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 77.8]', 7, FLOAT64), + 7, FLOAT64, SPARSE ) @@ -832,1057 +868,1056 @@ def test_8031(self): ) """ ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_vectors_supported() - def test_8032(self): - "8032 - DPY-3031 - Unsupported flexible vector formats" - with self.assertRaisesFullCode("DPY-3031"): - self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[44, 55, 89]', 3, INT8) as flex_col - union all - SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) as flex_col - """ - ) - @test_env.skip_unless_sparse_vectors_supported() - def test_8033(self): - "8033 - DPY-4007 -fetch sparse vectors with flexible dimensions" - with self.assertRaisesFullCode("DPY-2065"): - self.conn.fetch_df_all( - """ - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 77.8]', 7, FLOAT64), - 7, - FLOAT64, - SPARSE - ) - union all - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), - 8, - FLOAT64, - SPARSE - ) - """ - ) - def test_8034(self): - "8034 - test expressions on numeric columns" - # fill only the numeric column - credit score - dataset = [ - (1, None, None, None, None, None, None, 225, None), - (2, None, None, None, None, None, None, 365, None), - ] - - data = [ - (56.25,), - (91.25,), - ] - self.__populate_table(dataset) - - # Use numeric expression involving a column - statement = "select CreditScore/4 from TestDataFrame order by Id" - ora_df = self.conn.fetch_df_all(statement) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_8035(self): - "8035 - test metadata of all data types" - now = datetime.datetime.now() - data = [ - ("NUMBERVALUE", 5, pyarrow.float64()), - ("STRINGVALUE", "String Val", pyarrow.string()), - ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), - ("NSTRINGVALUE", "NString Val", pyarrow.string()), - ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), - ("RAWVALUE", b"Raw Data", pyarrow.binary()), - ("INTVALUE", 25_387_923, pyarrow.float64()), - ("SMALLINTVALUE", 127, pyarrow.float64()), - ("REALVALUE", 125.25, pyarrow.float64()), - ("DECIMALVALUE", 91.1025, pyarrow.float64()), - ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), - ("FLOATVALUE", 125.375, pyarrow.float64()), - ("BINARYFLOATVALUE", -25, pyarrow.float32()), - ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), - ("DATEVALUE", now, pyarrow.timestamp("s")), - ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), - ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), - ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), - ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), - ] - self.cursor.execute("delete from TestAllTypes") - column_names = ",".join(n for n, v, t in data) - bind_values = ",".join(f":{i + 1}" for i in range(len(data))) - data_to_insert = tuple(v for n, v, t in data) - self.cursor.execute( - f""" - insert into TestAllTypes ({column_names}) - values ({bind_values}) - """, - data_to_insert, - ) - self.conn.commit() +def test_8034(conn, cursor, test_env): + "8034 - test expressions on numeric columns" + # fill only the numeric column - credit score + dataset = [ + (1, None, None, None, None, None, None, 225, None), + (2, None, None, None, None, None, None, 365, None), + ] + + data = [ + (56.25,), + (91.25,), + ] + _populate_table(cursor, dataset) + + # Use numeric expression involving a column + statement = "select CreditScore/4 from TestDataFrame order by Id" + ora_df = conn.fetch_df_all(statement) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_8035(conn, cursor): + "8035 - test metadata of all data types" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.float64()), + ("SMALLINTVALUE", 127, pyarrow.float64()), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.float64()), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + conn.commit() + sql = f"select {column_names} from TestAllTypes" + ora_df = conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + assert actual_types == expected_types + + +def test_8036(conn, cursor, test_env): + "8036 - test metadata of all data types with fetch_decimals = True" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), + ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + conn.commit() + with test_env.defaults_context_manager("fetch_decimals", True): sql = f"select {column_names} from TestAllTypes" - ora_df = self.conn.fetch_df_all(sql) + ora_df = conn.fetch_df_all(sql) expected_types = [t for n, v, t in data] actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] - self.assertEqual(actual_types, expected_types) + assert actual_types == expected_types - def test_8036(self): - "8036 - test metadata of all data types with fetch_decimals = True" - now = datetime.datetime.now() - data = [ - ("NUMBERVALUE", 5, pyarrow.float64()), - ("STRINGVALUE", "String Val", pyarrow.string()), - ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), - ("NSTRINGVALUE", "NString Val", pyarrow.string()), - ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), - ("RAWVALUE", b"Raw Data", pyarrow.binary()), - ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), - ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), - ("REALVALUE", 125.25, pyarrow.float64()), - ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), - ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), - ("FLOATVALUE", 125.375, pyarrow.float64()), - ("BINARYFLOATVALUE", -25, pyarrow.float32()), - ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), - ("DATEVALUE", now, pyarrow.timestamp("s")), - ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), - ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), - ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), - ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), - ] - self.cursor.execute("delete from TestAllTypes") - column_names = ",".join(n for n, v, t in data) - bind_values = ",".join(f":{i + 1}" for i in range(len(data))) - data_to_insert = tuple(v for n, v, t in data) - self.cursor.execute( - f""" - insert into TestAllTypes ({column_names}) - values ({bind_values}) - """, - data_to_insert, + +def test_8037(skip_unless_native_boolean_supported, conn, cursor): + "8037 - test metadata with boolean type" + cursor.execute("delete from TestBooleans") + data = [(1, True, False, None), (2, False, True, True)] + cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) + values (:1, :2, :3, :4) + """, + data, + ) + conn.commit() + + sql = "select * from TestBooleans order by IntCol" + ora_df = conn.fetch_df_all(sql) + expected_types = [ + pyarrow.int64(), + pyarrow.bool_(), + pyarrow.bool_(), + pyarrow.bool_(), + ] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + assert actual_types == expected_types + + +def test_8038(cursor, test_env): + "8038 - test NULL rows with all null values" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8039(conn, cursor): + "8039 - test repeated pyarrow table construction" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000.50, + 100, + datetime.datetime.now(), ) - self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - sql = f"select {column_names} from TestAllTypes" - ora_df = self.conn.fetch_df_all(sql) - expected_types = [t for n, v, t in data] - actual_types = [ - pyarrow.array(a).type for a in ora_df.column_arrays() - ] - self.assertEqual(actual_types, expected_types) - - @test_env.skip_unless_native_boolean_supported() - def test_8037(self): - "8037 - test metadata with boolean type" - self.cursor.execute("delete from TestBooleans") - data = [(1, True, False, None), (2, False, True, True)] - self.cursor.executemany( - """ - insert into TestBooleans - (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) - values (:1, :2, :3, :4) - """, - data, + ] + _populate_table(cursor, data) + ora_df = conn.fetch_df_all(QUERY_SQL) + table1 = pyarrow.table(ora_df) + table2 = pyarrow.table(ora_df) + assert table1.schema == table2.schema + assert table1.to_pydict() == table2.to_pydict() + + +def test_8040(conn, cursor, test_env): + "8040 - test dataframe query with multiple bind variables" + _populate_table(cursor, DATASET_2) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id between :min_id and :max_id" + ) + ora_df = conn.fetch_df_all(statement, {"min_id": 2, "max_id": 3}) + assert ora_df.num_rows() == 2 + + expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] + raw_df = _convert_to_df(expected_data) + raw_data = test_env.get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == raw_data + + +def test_8041(conn, test_env): + "8041 - test error handling with invalid SQL in fetch_df_batches()" + with test_env.assert_raises_full_code("ORA-00942"): + for batch in conn.fetch_df_batches("select * from NonExistentTable"): + pass + + +def test_8042(cursor, test_env): + "8042 - test partial batch (last batch smaller than batch size)" + test_data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + datetime.date(2000, 1, 1), + i * 100, + i % 800, + datetime.datetime.now(), ) - self.conn.commit() - - sql = "select * from TestBooleans order by IntCol" - ora_df = self.conn.fetch_df_all(sql) - expected_types = [ - pyarrow.int64(), - pyarrow.bool_(), - pyarrow.bool_(), - pyarrow.bool_(), - ] - actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] - self.assertEqual(actual_types, expected_types) + for i in range(1, 8) # 7 rows + ] + _test_df_batches_interop( + test_env, cursor, test_data, batch_size=3, num_batches=3 + ) - def test_8038(self): - "8038 - test NULL rows with all null values" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, None, None, None, None, None, None, None, None), - ] - self.__test_df_interop(data) - def test_8039(self): - "8039 - test repeated pyarrow table construction" - data = [ - ( - 1, - "John", - "Doe", - "SF", - "USA", - datetime.date(1990, 1, 1), - 5000.50, - 100, - datetime.datetime.now(), - ) - ] - self.__populate_table(data) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - table1 = pyarrow.table(ora_df) - table2 = pyarrow.table(ora_df) - self.assertEqual(table1.schema, table2.schema) - self.assertEqual(table1.to_pydict(), table2.to_pydict()) - - def test_8040(self): - "8040 - test dataframe query with multiple bind variables" - self.__populate_table(DATASET_2) - statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( - where_clause="where Id between :min_id and :max_id" +def test_8043(conn, cursor): + "8043 - test with date functions" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all( + """ + select + Id, + extract(year from DateOfBirth) as birth_year, + to_char(DateOfBirth, 'YYYY-MM') as birth_month + from TestDataFrame + order by Id + """ + ) + assert ora_df.num_rows() == len(DATASET_1) + year_col = ora_df.get_column_by_name("BIRTH_YEAR") + array = pyarrow.array(year_col) + assert array.to_pylist() == [1955, 1955] + + +def test_8044(conn, cursor): + "8044 - test column access by index bounds" + _populate_table(cursor, DATASET_1) + ora_df = conn.fetch_df_all(QUERY_SQL) + with pytest.raises(IndexError): + ora_df.get_column(ora_df.num_columns()) + + +def test_8045(cursor, test_env): + "8045 - test with different batch sizes" + _test_df_batches_interop( + test_env, cursor, DATASET_4, batch_size=1, num_batches=6 + ) + _test_df_batches_interop( + test_env, cursor, DATASET_4, batch_size=2, num_batches=3 + ) + + +def test_8046(cursor, test_env): + "8046 - test with very large batch size" + _test_df_batches_interop( + test_env, cursor, DATASET_1, batch_size=1000, num_batches=1 + ) + + +def test_8047(conn, test_env): + "8047 - test error handling with invalid SQL" + with test_env.assert_raises_full_code("ORA-00942"): + conn.fetch_df_all("select * from NonExistentTable") + + +def test_8048(conn, cursor, test_env): + "8048 - test error handling with invalid bind variable" + _populate_table(cursor, DATASET_1) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + conn.fetch_df_all( + "select * from TestDataFrame where Id = :missing_bind" ) - ora_df = self.conn.fetch_df_all(statement, {"min_id": 2, "max_id": 3}) - self.assertEqual(ora_df.num_rows(), 2) - - expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] - raw_df = self.__convert_to_df(expected_data) - raw_data = self.__get_data_from_df(raw_df) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, raw_data) - - def test_8041(self): - "8041 - test error handling with invalid SQL in fetch_df_batches()" - with self.assertRaisesFullCode("ORA-00942"): - for batch in self.conn.fetch_df_batches( - "select * from NonExistentTable" - ): - pass - - def test_8042(self): - "8042 - test partial batch (last batch smaller than batch size)" - test_data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - datetime.date(2000, 1, 1), - i * 100, - i % 800, - datetime.datetime.now(), - ) - for i in range(1, 8) # 7 rows - ] - self.__test_df_batches_interop(test_data, batch_size=3, num_batches=3) - def test_8043(self): - "8043 - test with date functions" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all( - """ - select - Id, - extract(year from DateOfBirth) as birth_year, - to_char(DateOfBirth, 'YYYY-MM') as birth_month - from TestDataFrame - order by Id - """ - ) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - year_col = ora_df.get_column_by_name("BIRTH_YEAR") - array = pyarrow.array(year_col) - self.assertEqual(array.to_pylist(), [1955, 1955]) - - def test_8044(self): - "8044 - test column access by index bounds" - self.__populate_table(DATASET_1) - ora_df = self.conn.fetch_df_all(QUERY_SQL) - with self.assertRaises(IndexError): - ora_df.get_column(ora_df.num_columns()) - - def test_8045(self): - "8045 - test with different batch sizes" - self.__test_df_batches_interop(DATASET_4, batch_size=1, num_batches=6) - self.__test_df_batches_interop(DATASET_4, batch_size=2, num_batches=3) - - def test_8046(self): - "8046 - test with very large batch size" - self.__test_df_batches_interop( - DATASET_1, batch_size=1000, num_batches=1 - ) - def test_8047(self): - "8047 - test error handling with invalid SQL" - with self.assertRaisesFullCode("ORA-00942"): - self.conn.fetch_df_all("select * from NonExistentTable") - - def test_8048(self): - "8048 - test error handling with invalid bind variable" - self.__populate_table(DATASET_1) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - self.conn.fetch_df_all( - "select * from TestDataFrame where Id = :missing_bind" - ) +def test_8049(conn, cursor, test_env): + "8049 - test with single row result" + _populate_table(cursor, DATASET_1) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="where Id = 1") + ora_df = conn.fetch_df_all(statement) + assert ora_df.num_rows() == 1 + _validate_df(ora_df, [DATASET_1[0]], test_env) - def test_8049(self): - "8049 - test with single row result" - self.__populate_table(DATASET_1) - statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( - where_clause="where Id = 1" - ) - ora_df = self.conn.fetch_df_all(statement) - self.assertEqual(ora_df.num_rows(), 1) - self.__validate_df(ora_df, [DATASET_1[0]]) - - def test_8050(self): - "8050 - test with calculated columns" - self.__populate_table(DATASET_1) - now = datetime.datetime.now().replace(microsecond=0) - ora_df = self.conn.fetch_df_all( - """ - select - Id, - FirstName || ' ' || LastName as full_name, - Salary * 12 as annual_salary, - :now as current_date - from TestDataFrame - order by Id - """, - [now], + +def test_8050(conn, cursor, test_env): + "8050 - test with calculated columns" + _populate_table(cursor, DATASET_1) + now = datetime.datetime.now().replace(microsecond=0) + ora_df = conn.fetch_df_all( + """ + select + Id, + FirstName || ' ' || LastName as full_name, + Salary * 12 as annual_salary, + :now as current_date + from TestDataFrame + order by Id + """, + [now], + ) + assert ora_df.num_rows() == len(DATASET_1) + assert ora_df.num_columns() == 4 + + expected_data = [] + for row in DATASET_1: + expected_row = ( + row[0], # Id + f"{row[1]} {row[2]}", # full_name + float(str(row[6] * 12)), # annual_salary + now, ) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - self.assertEqual(ora_df.num_columns(), 4) - - expected_data = [] - for row in DATASET_1: - expected_row = ( - row[0], # Id - f"{row[1]} {row[2]}", # full_name - float(str(row[6] * 12)), # annual_salary - now, - ) - expected_data.append(expected_row) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, expected_data) - - def test_8051(self): - "8051 - test fetch_df_batches with bind variables" - batch_size = 2 - self.__populate_table(DATASET_4) - where_clause = "where Id >= :min_id" - sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) - batches = self.conn.fetch_df_batches( - sql, {"min_id": 3}, size=batch_size + expected_data.append(expected_row) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == expected_data + + +def test_8051(conn, cursor, test_env): + "8051 - test fetch_df_batches with bind variables" + batch_size = 2 + _populate_table(cursor, DATASET_4) + where_clause = "where Id >= :min_id" + sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) + batches = conn.fetch_df_batches(sql, {"min_id": 3}, size=batch_size) + expected_data = [row for row in DATASET_4 if row[0] >= 3] + offset = 0 + for batch in batches: + _validate_df( + batch, expected_data[offset : offset + batch_size], test_env ) - expected_data = [row for row in DATASET_4 if row[0] >= 3] - offset = 0 - for batch in batches: - self.__validate_df( - batch, expected_data[offset : offset + batch_size] - ) - offset += batch_size + offset += batch_size - def test_8052(self): - "8052 - test with large data" - data = [ - (1, "A" * 41_000, b"Very long description " * 5_000), - (2, "B" * 35_000, b"Another long text " * 10_000), - (3, "C" * 72_000, b"Even longer content " * 20_000), - ] - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame - (Id, LongData, LongRawData) - values (:1, :2, :3) - """, - data, - ) - self.conn.commit() +def test_8052(conn, cursor, test_env): + "8052 - test with large data" + data = [ + (1, "A" * 41_000, b"Very long description " * 5_000), + (2, "B" * 35_000, b"Another long text " * 10_000), + (3, "C" * 72_000, b"Even longer content " * 20_000), + ] - ora_df = self.conn.fetch_df_all( - """ - select Id, LongData, LongRawData - from TestDataFrame - order by Id - """ + cursor.execute("delete from TestDataFrame") + cursor.executemany( + """ + insert into TestDataFrame + (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + data, + ) + conn.commit() + + ora_df = conn.fetch_df_all( + """ + select Id, LongData, LongRawData + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +def test_8053(conn, cursor): + "8053 - test fetching from an empty table with fetch_df_batches" + cursor.execute("delete from TestDataFrame") + batches = list(conn.fetch_df_batches(QUERY_SQL, size=10)) + assert len(batches) == 1 + assert batches[0].num_rows() == 0 + + +def test_8054(conn, cursor, test_env): + "8054 - fetch clob in batches" + cursor.execute("delete from TestDataFrame") + test_string = "A" * 10000 + data = [(test_string,)] * 3 + cursor.executemany( + """ + insert into TestDataFrame (LongData) + values (:1) + """, + data, + ) + conn.commit() + + offset = 0 + batch_size = 2 + sql = "select LongData from TestDataFrame" + for batch in conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data[offset : offset + batch_size] + offset += batch_size + + +def test_8055(conn, cursor, test_env): + "8055 - fetch blob in batches" + cursor.execute("delete from TestDataFrame") + test_string = b"B" * 10000 + data = [(test_string,)] * 4 + cursor.executemany( + """ + insert into TestDataFrame (LongRawData) + values (:1) + """, + data, + ) + conn.commit() + + offset = 0 + batch_size = 3 + sql = "select LongRawData from TestDataFrame" + for batch in conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data[offset : offset + batch_size] + offset += batch_size + + +def test_8056(conn, cursor, test_env): + "8056 - test with empty strings" + data = [ + ( + 1, + "", + "", + "City", + "Country", + datetime.datetime(2000, 1, 1), + 1000.0, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First", + "Last", + "", + "", + datetime.datetime(2000, 1, 1), + 2000.0, + 200, + datetime.datetime.now(), + ), + ] + _populate_table(cursor, data) + expected_data = [ + tuple(None if v == "" else v for v in row) for row in data + ] + ora_df = conn.fetch_df_all(QUERY_SQL) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == expected_data + + +def test_8057(cursor, test_env): + "8057 - test with unicode characters" + data = [ + ( + 1, + "Jöhn", + "Döe", + "München", + "Deutschland", + datetime.date(1980, 5, 15), + 5000, + 300, + datetime.datetime.now(), + ), + ( + 2, + "?", + "?", + "??", + "??", + datetime.date(1990, 8, 20), + 8000, + 400, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8058(cursor, test_env): + "8072 - test with very old dates" + data = [ + ( + 1, + "Ancient", + "One", + "Babylon", + "Mesopotamia", + datetime.date(1, 1, 1), + 0, + 0, + datetime.datetime.now(), + ), + ( + 2, + "Medieval", + "Person", + "London", + "England", + datetime.date(1200, 6, 15), + 10, + 50, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8059(cursor, test_env): + "8059 - test with future dates" + data = [ + ( + 1, + "Future", + "Person", + "Mars", + "Solar System", + datetime.date(3000, 1, 1), + 100000, + 900, + datetime.datetime.now(), + ), + ( + 2, + "Distant", + "Future", + "Andromeda", + "Galaxy", + datetime.date(9999, 12, 31), + 999999, + 999, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8060(cursor, test_env): + "8060 - test with exactly arraysize rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - def test_8053(self): - "8053 - test fetching from an empty table with fetch_df_batches" - self.cursor.execute("delete from TestDataFrame") - batches = list(self.conn.fetch_df_batches(QUERY_SQL, size=10)) - self.assertEqual(len(batches), 1) - self.assertEqual(batches[0].num_rows(), 0) - - def test_8054(self): - "8054 - fetch clob in batches" - self.cursor.execute("delete from TestDataFrame") - test_string = "A" * 10000 - data = [(test_string,)] * 3 - self.cursor.executemany( - """ - insert into TestDataFrame (LongData) - values (:1) - """, - data, + for i in range(1, cursor.arraysize + 1) + ] + _test_df_interop(test_env, cursor, data) + + +def test_8061(cursor, test_env): + "8061 - test with arraysize+1 rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, ) - self.conn.commit() - - offset = 0 - batch_size = 2 - sql = "select LongData from TestDataFrame" - for batch in self.conn.fetch_df_batches(sql, size=batch_size): - fetched_df = pyarrow.table(batch).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data[offset : offset + batch_size]) - offset += batch_size - - def test_8055(self): - "8055 - fetch blob in batches" - self.cursor.execute("delete from TestDataFrame") - test_string = b"B" * 10000 - data = [(test_string,)] * 4 - self.cursor.executemany( - """ - insert into TestDataFrame (LongRawData) - values (:1) - """, - data, + for i in range(1, cursor.arraysize + 2) + ] + _test_df_interop(test_env, cursor, data) + + +def test_8062(cursor, test_env): + "8062 - test with odd arraysize" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, ) - self.conn.commit() - - offset = 0 - batch_size = 3 - sql = "select LongRawData from TestDataFrame" - for batch in self.conn.fetch_df_batches(sql, size=batch_size): - fetched_df = pyarrow.table(batch).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data[offset : offset + batch_size]) - offset += batch_size - - def test_8056(self): - "8056 - test with empty strings" - data = [ - ( - 1, - "", - "", - "City", - "Country", - datetime.datetime(2000, 1, 1), - 1000.0, - 100, - datetime.datetime.now(), - ), - ( - 2, - "First", - "Last", - "", - "", - datetime.datetime(2000, 1, 1), - 2000.0, - 200, - datetime.datetime.now(), - ), - ] - self.__populate_table(data) - expected_data = [ - tuple(None if v == "" else v for v in row) for row in data - ] - ora_df = self.conn.fetch_df_all(QUERY_SQL) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, expected_data) - - def test_8057(self): - "8057 - test with unicode characters" - data = [ - ( - 1, - "Jöhn", - "Döe", - "München", - "Deutschland", - datetime.date(1980, 5, 15), - 5000, - 300, - datetime.datetime.now(), - ), - ( - 2, - "?", - "?", - "??", - "??", - datetime.date(1990, 8, 20), - 8000, - 400, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8058(self): - "8072 - test with very old dates" - data = [ - ( - 1, - "Ancient", - "One", - "Babylon", - "Mesopotamia", - datetime.date(1, 1, 1), - 0, - 0, - datetime.datetime.now(), - ), - ( - 2, - "Medieval", - "Person", - "London", - "England", - datetime.date(1200, 6, 15), - 10, - 50, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8059(self): - "8059 - test with future dates" - data = [ - ( - 1, - "Future", - "Person", - "Mars", - "Solar System", - datetime.date(3000, 1, 1), - 100000, - 900, - datetime.datetime.now(), - ), - ( - 2, - "Distant", - "Future", - "Andromeda", - "Galaxy", - datetime.date(9999, 12, 31), - 999999, - 999, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8060(self): - "8060 - test with exactly arraysize rows" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, self.cursor.arraysize + 1) - ] - self.__test_df_interop(data) - - def test_8061(self): - "8061 - test with arraysize+1 rows" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, self.cursor.arraysize + 2) - ] - self.__test_df_interop(data) - - def test_8062(self): - "8062 - test with odd arraysize" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, 48) - ] - self.__test_df_interop(data) - - def test_8063(self): - "8063 - test with single row" - data = [ - ( - 1, - "John", - "Doe", - "SF", - "USA", - datetime.date(1990, 1, 1), - 5000, - 100, - datetime.datetime.now(), - ) - ] - self.__test_df_interop(data) - - def test_8064(self): - "8064 - test multiple rows with NULL values in different columns" - now = datetime.datetime.now() - test_date = datetime.datetime(2000, 1, 1) - data = [ - (1, None, "Last1", "City1", "Country1", None, None, 100, None), - (2, "First2", None, None, "Country2", test_date, 2000, None, None), - (3, "First3", "Last3", None, None, None, 3000, 300, now), - (4, None, None, None, None, None, None, None, None), - ] - self.__test_df_interop(data) - - def test_8065(self): - "8065 - test single column with all NULL values" - data = [ - ( - 1, - None, - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - 1000, - 100, - datetime.datetime.now(), - ), - ( - 2, - None, - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - datetime.datetime.now(), - ), - ( - 3, - None, - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - 3000, - 300, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8066(self): - "8066 - test last column NULL in each row" - data = [ - ( - 1, - "First1", - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - 1000, - 100, - None, - ), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - None, - ), - ( - 3, - "First3", - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - 3000, - 300, - None, - ), - ] - self.__test_df_interop(data) - - def test_8067(self): - "8067 - test alternating NULL/non-NULL values in a column" - data = [ - ( - 1, - "First1", - None, - "City1", - None, - datetime.date(2000, 1, 1), - None, - 100, - datetime.datetime.now(), - ), - (2, "First2", "Last2", None, "Country2", None, 2000, None, None), - ( - 3, - "First3", - None, - "City3", - None, - datetime.date(2002, 1, 1), - None, - 300, - datetime.datetime.now(), - ), - (4, "First4", "Last4", None, "Country4", None, 4000, None, None), - ] - self.__test_df_interop(data) - - def test_8068(self): - "8068 - test all columns NULL except one" - now = datetime.datetime.now() - test_date = datetime.date(2001, 1, 1) - data = [ - (1, None, None, None, None, None, None, None, now), - (2, None, None, None, None, test_date, None, None, None), - (3, "First3", None, None, None, None, None, None, None), - (4, None, None, None, "Country4", None, None, None, None), - ] - self.__test_df_interop(data) - - def test_8069(self): - "8069 - test all date columns with all NULL values" - data = [ - (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), - (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), - (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), - ] - self.__test_df_interop(data) - - def test_8070(self): - "8070 - test NULL values in numeric columns" - data = [ - ( - 1, - "First1", - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - None, - 100, - datetime.datetime.now(), - ), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - None, - datetime.datetime.now(), - ), - ( - 3, - "First3", - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - None, - None, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8071(self): - "8071 - test multiple consecutive NULL rows" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, None, None, None, None, None, None, None, None), - (3, None, None, None, None, None, None, None, None), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - datetime.date(2000, 1, 1), - 4000, - 400, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8072(self): - "8072 - test NULL rows interspersed with data rows" - data = [ - (1, None, None, None, None, None, None, None, None), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - datetime.datetime.now(), - ), - (3, None, None, None, None, None, None, None, None), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - datetime.date(2003, 1, 1), - 4000, - 400, - datetime.datetime.now(), - ), - (5, None, None, None, None, None, None, None, None), - ] - self.__test_df_interop(data) - - def test_8073(self): - "8073 - test multiple NULL rows with different NULL columns" - data = [ - (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), - ( - 2, - "First2", - None, - "City2", - "Country2", - datetime.date(2001, 1, 1), - None, - 200, - None, - ), - ( - 3, - None, - None, - "City3", - "Country3", - None, - None, - 300, - datetime.datetime.now(), - ), - ( - 4, - "First4", - "Last4", - None, - None, - datetime.date(2003, 1, 1), - 4000, - None, - None, - ), - ] - self.__test_df_interop(data) - - def test_8074(self): - "8074 - test NULL rows with alternating NULL patterns" - data = [ - ( - 1, - None, - "Last1", - None, - "Country1", - None, - 1000, - None, - datetime.datetime.now(), - ), - ( - 2, - "First2", - None, - "City2", - None, - datetime.date(2001, 1, 1), - None, - 200, - None, - ), - ( - 3, - None, - "Last3", - None, - "Country3", - None, - 3000, - None, - datetime.datetime.now(), - ), - ( - 4, - "First4", - None, - "City4", - None, - datetime.date(2003, 1, 1), - None, - 400, - None, - ), - ] - self.__test_df_interop(data) - - def test_8075(self): - "8075 - test multiple NULL rows with partial NULL groups" - data = [ - ( - 1, - None, - None, - "City1", - "Country1", - None, - None, - 100, - datetime.datetime.now(), - ), - ( - 2, - None, - None, - "City2", - "Country2", - None, - None, - 200, - datetime.datetime.now(), - ), - ( - 3, - "First3", - "Last3", - None, - None, - datetime.date(2002, 1, 1), - 3000, - None, - None, - ), - ( - 4, - "First4", - "Last4", - None, - None, - datetime.date(2003, 1, 1), - 4000, - None, - None, - ), - ] - self.__test_df_interop(data) - - def test_8076(self): - "8076 - test multiple NULL rows with varying NULL counts" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, "First2", None, "City2", None, None, 2000, None, None), - ( - 3, - None, - "Last3", - None, - "Country3", - datetime.date(2002, 1, 1), - None, - 300, - None, - ), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - None, - 4000, - 400, - datetime.datetime.now(), - ), - ] - self.__test_df_interop(data) - - def test_8077(self): - "8077 - test fetching large integers" - data = (-(2**40), 2**41) - ora_df = self.conn.fetch_df_all( - """ - select - cast(:1 as number(15)), - cast(:2 as number(15)) - from dual - """, - data, + for i in range(1, 48) + ] + _test_df_interop(test_env, cursor, data) + + +def test_8063(cursor, test_env): + "8063 - test with single row" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000, + 100, + datetime.datetime.now(), ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual([data], self.__get_data_from_df(fetched_df)) - - -if __name__ == "__main__": - test_env.run_test_cases() + ] + _test_df_interop(test_env, cursor, data) + + +def test_8064(cursor, test_env): + "8064 - test multiple rows with NULL values in different columns" + now = datetime.datetime.now() + test_date = datetime.datetime(2000, 1, 1) + data = [ + (1, None, "Last1", "City1", "Country1", None, None, 100, None), + (2, "First2", None, None, "Country2", test_date, 2000, None, None), + (3, "First3", "Last3", None, None, None, 3000, 300, now), + (4, None, None, None, None, None, None, None, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8065(cursor, test_env): + "8065 - test single column with all NULL values" + data = [ + ( + 1, + None, + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + ( + 3, + None, + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8066(cursor, test_env): + "8066 - test last column NULL in each row" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + None, + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + None, + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + None, + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8067(cursor, test_env): + "8067 - test alternating NULL/non-NULL values in a column" + data = [ + ( + 1, + "First1", + None, + "City1", + None, + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + (2, "First2", "Last2", None, "Country2", None, 2000, None, None), + ( + 3, + "First3", + None, + "City3", + None, + datetime.date(2002, 1, 1), + None, + 300, + datetime.datetime.now(), + ), + (4, "First4", "Last4", None, "Country4", None, 4000, None, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8068(cursor, test_env): + "8068 - test all columns NULL except one" + now = datetime.datetime.now() + test_date = datetime.date(2001, 1, 1) + data = [ + (1, None, None, None, None, None, None, None, now), + (2, None, None, None, None, test_date, None, None, None), + (3, "First3", None, None, None, None, None, None, None), + (4, None, None, None, "Country4", None, None, None, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8069(cursor, test_env): + "8069 - test all date columns with all NULL values" + data = [ + (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), + (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), + (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8070(cursor, test_env): + "8070 - test NULL values in numeric columns" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + None, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + None, + None, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8071(cursor, test_env): + "8071 - test multiple consecutive NULL rows" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2000, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8072(cursor, test_env): + "8072 - test NULL rows interspersed with data rows" + data = [ + (1, None, None, None, None, None, None, None, None), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2003, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + (5, None, None, None, None, None, None, None, None), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8073(cursor, test_env): + "8073 - test multiple NULL rows with different NULL columns" + data = [ + (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), + ( + 2, + "First2", + None, + "City2", + "Country2", + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + None, + "City3", + "Country3", + None, + None, + 300, + datetime.datetime.now(), + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8074(cursor, test_env): + "8074 - test NULL rows with alternating NULL patterns" + data = [ + ( + 1, + None, + "Last1", + None, + "Country1", + None, + 1000, + None, + datetime.datetime.now(), + ), + ( + 2, + "First2", + None, + "City2", + None, + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + "Last3", + None, + "Country3", + None, + 3000, + None, + datetime.datetime.now(), + ), + ( + 4, + "First4", + None, + "City4", + None, + datetime.date(2003, 1, 1), + None, + 400, + None, + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8075(cursor, test_env): + "8075 - test multiple NULL rows with partial NULL groups" + data = [ + ( + 1, + None, + None, + "City1", + "Country1", + None, + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + None, + "City2", + "Country2", + None, + None, + 200, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + None, + None, + datetime.date(2002, 1, 1), + 3000, + None, + None, + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8076(cursor, test_env): + "8076 - test multiple NULL rows with varying NULL counts" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, "First2", None, "City2", None, None, 2000, None, None), + ( + 3, + None, + "Last3", + None, + "Country3", + datetime.date(2002, 1, 1), + None, + 300, + None, + ), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + None, + 4000, + 400, + datetime.datetime.now(), + ), + ] + _test_df_interop(test_env, cursor, data) + + +def test_8077(conn, test_env): + "8077 - test fetching large integers" + data = (-(2**40), 2**41) + ora_df = conn.fetch_df_all( + """ + select + cast(:1 as number(15)), + cast(:2 as number(15)) + from dual + """, + data, + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert [data] == test_env.get_data_from_df(fetched_df) diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 51d416a2..0b6762aa 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -31,11 +31,8 @@ import decimal import oracledb -import numpy -import pandas import pyarrow - -import test_env +import pytest # basic DATASET_1 = [ @@ -248,1411 +245,1454 @@ QUERY_SQL = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="") -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - - def __convert_date(self, typ, value): - """ - Converts a date to the format required by Arrow. - """ - if value is not None: - if typ.unit == "s": - value = datetime.datetime(value.year, value.month, value.day) - ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() - if typ.unit != "s": - ts *= 1_000_000 - return ts - - def __convert_to_array(self, data, typ): - """ - Convert raw data to an Arrow array using pyarrow. - """ - if isinstance(typ, pyarrow.Decimal128Type): - data = [ - decimal.Decimal(str(value)) if value is not None else value - for value in data - ] - elif isinstance(typ, pyarrow.TimestampType): - data = [self.__convert_date(typ, v) for v in data] - mask = [value is None for value in data] - return pyarrow.array(data, typ, mask=mask) - - def __convert_to_df(self, data): - """ - Converts the data set to a Pandas data frame for comparison to what is - returned from the database. - """ - data_by_col = [[row[i] for row in data] for i in range(len(data[0]))] - fetch_decimals = oracledb.defaults.fetch_decimals - types = [ - pyarrow.decimal128(9) if fetch_decimals else pyarrow.int64(), - pyarrow.string(), - pyarrow.string(), - pyarrow.string(), - pyarrow.string(), - pyarrow.timestamp("s"), - pyarrow.decimal128(9, 2) if fetch_decimals else pyarrow.float64(), - pyarrow.decimal128(3) if fetch_decimals else pyarrow.int64(), - pyarrow.timestamp("us"), - ] - arrays = [ - self.__convert_to_array(d, t) for d, t in zip(data_by_col, types) - ] - names = [ - "ID", - "FIRSTNAME", - "LASTNAME", - "CITY", - "COUNTRY", - "DATEOFBIRTH", - "SALARY", - "CREDITSCORE", - "LASTUPDATED", - ] - pa_tab = pyarrow.Table.from_arrays(arrays, names=names) - pa_tab.validate(full=True) - return pa_tab.to_pandas() +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass - def __convert_df_value(self, df_val): - """ - This method converts a dataframe cell value to use with assertEqual() - For e.g. NaN and np.array cannot be compared directly. Values are - converted according to the following rules: - - NaN -> None - - np.array -> np.array.tolist() (Python list) - """ - if isinstance(df_val, numpy.ndarray): - return df_val.tolist() - elif pandas.isna(df_val): - return None - elif isinstance(df_val, dict): - return {k: self.__convert_df_value(v) for k, v in df_val.items()} - else: - return df_val - - def __get_data_from_df(self, df): - """ - Returns data from the data frame in a normalized fashion suitable for - comparison. In particular, NaN values cannot be compared to one another - so they are converted to the value None for comparison purposes. - """ - return [ - tuple(self.__convert_df_value(v) for v in row) - for row in df.itertuples(index=False, name=None) - ] - async def __populate_table(self, data): - """ - Populate the test table with the given data. - """ - await self.cursor.execute("delete from TestDataframe") - types = [None] * len(data[0]) - types[8] = oracledb.DB_TYPE_TIMESTAMP - self.cursor.setinputsizes(*types) - await self.cursor.executemany( - """ - insert into TestDataframe ( - Id, FirstName, LastName, City, Country, - DateOfBirth, Salary, CreditScore, LastUpdated - ) values ( - :id, :first_name, :last_name, :city, :country, - :dob, :salary, :credit_score, :last_updated - ) - """, - data, - ) - await self.conn.commit() +def _convert_date(typ, value): + """ + Converts a date to the format required by Arrow. + """ + if value is not None: + if typ.unit == "s": + value = datetime.datetime(value.year, value.month, value.day) + ts = (value - datetime.datetime(1970, 1, 1)).total_seconds() + if typ.unit != "s": + ts *= 1_000_000 + return ts - async def __test_df_interop(self, data): - """ - Tests interoperability with external data frames using the data set - provided. - """ - await self.__populate_table(data) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - self.__validate_df(ora_df, data) - async def __test_df_batches_interop(self, data, batch_size, num_batches): - """ - Tests interoperability with external data frames using the data set - provided. - """ - await self.__populate_table(data) - batches = [ - df - async for df in self.conn.fetch_df_batches( - QUERY_SQL, size=batch_size - ) +def _convert_to_array(data, typ): + """ + Convert raw data to an Arrow array using pyarrow. + """ + if isinstance(typ, pyarrow.Decimal128Type): + data = [ + decimal.Decimal(str(value)) if value is not None else value + for value in data ] - self.assertEqual(len(batches), num_batches) - if num_batches == 1: - self.__validate_df(batches[0], data) - else: - offset = 0 - for batch in batches: - self.__validate_df(batch, data[offset : offset + batch_size]) - offset += batch_size - - def __validate_df(self, ora_df, data): - """ - Validates the data frame by converting it to Pandas and comparing it - with the original data set that was used. + elif isinstance(typ, pyarrow.TimestampType): + data = [_convert_date(typ, v) for v in data] + mask = [value is None for value in data] + return pyarrow.array(data, typ, mask=mask) + + +def _convert_to_df(data): + """ + Converts the data set to a Pandas data frame for comparison to what is + returned from the database. + """ + data_by_col = [[row[i] for row in data] for i in range(len(data[0]))] + fetch_decimals = oracledb.defaults.fetch_decimals + types = [ + pyarrow.decimal128(9) if fetch_decimals else pyarrow.int64(), + pyarrow.string(), + pyarrow.string(), + pyarrow.string(), + pyarrow.string(), + pyarrow.timestamp("s"), + pyarrow.decimal128(9, 2) if fetch_decimals else pyarrow.float64(), + pyarrow.decimal128(3) if fetch_decimals else pyarrow.int64(), + pyarrow.timestamp("us"), + ] + arrays = [_convert_to_array(d, t) for d, t in zip(data_by_col, types)] + names = [ + "ID", + "FIRSTNAME", + "LASTNAME", + "CITY", + "COUNTRY", + "DATEOFBIRTH", + "SALARY", + "CREDITSCORE", + "LASTUPDATED", + ] + pa_tab = pyarrow.Table.from_arrays(arrays, names=names) + pa_tab.validate(full=True) + return pa_tab.to_pandas() + + +async def _populate_table(cursor, data): + """ + Populate the test table with the given data. + """ + await cursor.execute("delete from TestDataframe") + types = [None] * len(data[0]) + types[8] = oracledb.DB_TYPE_TIMESTAMP + cursor.setinputsizes(*types) + await cursor.executemany( """ - raw_df = self.__convert_to_df(data) - raw_data = self.__get_data_from_df(raw_df) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, raw_data) - - async def test_8100(self): - "8100 - test basic fetch of data frame" - await self.__populate_table(DATASET_1) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - self.assertEqual(ora_df.num_columns(), len(DATASET_1[0])) - - async def test_8101(self): - "8101 - test conversion to external dataframe" - await self.__test_df_interop(DATASET_1) - - async def test_8102(self): - "8101 - test null and negative values" - await self.__test_df_interop(DATASET_2) - - async def test_8103(self): - "8102 - test with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - await self.__test_df_interop(DATASET_1) - - async def test_8104(self): - "8103 - test null and negative values with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - await self.__test_df_interop(DATASET_2) - - async def test_8105(self): - "8105 - test null and values with leading zeros" - await self.__test_df_interop(DATASET_3) - - async def test_8106(self): - "8105 - test null and values with leading zeros with fetch_decimals" - with test_env.DefaultsContextManager("fetch_decimals", True): - await self.__test_df_interop(DATASET_3) - - async def test_8107(self): - "8107 - duplicate values in the rows" - await self.__test_df_interop(DATASET_4) - - async def test_8108(self): - "8108 - batches without specification of size" - await self.__test_df_batches_interop( - DATASET_4, batch_size=None, num_batches=1 + insert into TestDataframe ( + Id, FirstName, LastName, City, Country, + DateOfBirth, Salary, CreditScore, LastUpdated + ) values ( + :id, :first_name, :last_name, :city, :country, + :dob, :salary, :credit_score, :last_updated ) - - async def test_8109(self): - "8109 - batches with specification of size" - await self.__test_df_batches_interop( - DATASET_4, batch_size=5, num_batches=2 + """, + data, + ) + await cursor.connection.commit() + + +async def _test_df_interop(test_env, cursor, data): + """ + Tests interoperability with external data frames using the data set + provided. + """ + await _populate_table(cursor, data) + ora_df = await cursor.connection.fetch_df_all(QUERY_SQL) + _validate_df(ora_df, data, test_env) + + +async def _test_df_batches_interop( + test_env, cursor, data, batch_size, num_batches +): + """ + Tests interoperability with external data frames using the data set + provided. + """ + await _populate_table(cursor, data) + batches = [ + df + async for df in cursor.connection.fetch_df_batches( + QUERY_SQL, size=batch_size ) + ] + assert len(batches) == num_batches + if num_batches == 1: + _validate_df(batches[0], data, test_env) + else: + offset = 0 + for batch in batches: + _validate_df(batch, data[offset : offset + batch_size], test_env) + offset += batch_size - async def test_8110(self): - "8110 - verify passing Arrow arrays twice works" - await self.__populate_table(DATASET_1) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - self.__validate_df(ora_df, DATASET_1) - self.__validate_df(ora_df, DATASET_1) - - async def test_8111(self): - "8111 - verify empty data set" - await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame where Id = 4" - ora_df = await self.conn.fetch_df_all(statement) - self.assertEqual(ora_df.num_rows(), 0) - - async def test_8112(self): - "8112 - verify empty data set with batches" - await self.__populate_table(DATASET_1) - statement = "select * from TestDataFrame where Id = 4" - async for ora_df in self.conn.fetch_df_batches(statement): - self.assertEqual(ora_df.num_rows(), 0) - - async def test_8113(self): - "8113 - negative checks on attributes" - await self.__populate_table(DATASET_1) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - with self.assertRaises(IndexError): - ora_df.get_column(121) - with self.assertRaises(IndexError): - ora_df.get_column(-1) - with self.assertRaises(KeyError): - ora_df.get_column_by_name("missing_column") - - async def test_8114(self): - "8114 - check unsupported error" - statement = "select cursor(select user from dual) from dual" - with self.assertRaisesFullCode("DPY-3030"): - await self.conn.fetch_df_all(statement) - - async def test_8115(self): - "8115 - batches with specification of size matching number of rows" - await self.__test_df_batches_interop( - DATASET_2, batch_size=len(DATASET_2), num_batches=1 - ) - async def test_8116(self): - "8116 - batches with size that has duplicate rows across batches" - await self.__test_df_batches_interop( - DATASET_4, batch_size=3, num_batches=2 - ) +def _validate_df(ora_df, data, test_env): + """ + Validates the data frame by converting it to Pandas and comparing it + with the original data set that was used. + """ + raw_df = _convert_to_df(data) + raw_data = test_env.get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == raw_data - async def test_8117(self): - "8117 - fetch_decimals without precision and scale specified" - data = [(1.0,)] - with test_env.DefaultsContextManager("fetch_decimals", True): - ora_df = await self.conn.fetch_df_all("select 1.0 from dual") - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - async def test_8118(self): - "8118 - fetch clob" - data = [("test_8123",)] - ora_df = await self.conn.fetch_df_all( - "select to_clob('test_8123') from dual" - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - async def test_8119(self): - "8119 - fetch blob" - data = [(b"test_8124",)] - ora_df = await self.conn.fetch_df_all( - "select to_blob(utl_raw.cast_to_raw('test_8124')) from dual" - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - @test_env.skip_unless_native_boolean_supported() - async def test_8120(self): - "8120 - fetch boolean" - data = [(True,), (False,), (False,), (True,), (True,)] - ora_df = await self.conn.fetch_df_all( - """ - select true - union all - select false - union all - select false - union all - select true - union all - select true - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - @test_env.skip_unless_vectors_supported() - async def test_8121(self): - "8121 - fetch float32 vector" - data = [ - (array.array("f", [34.6, 77.8]).tolist(),), - (array.array("f", [34.6, 77.8, 55.9]).tolist(),), - ] - ora_df = await self.conn.fetch_df_all( - """ - SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) - union all - SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) +async def test_8100(async_conn, async_cursor): + "8100 - test basic fetch of data frame" + await _populate_table(async_cursor, DATASET_1) + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + assert ora_df.num_rows() == len(DATASET_1) + assert ora_df.num_columns() == len(DATASET_1[0]) - @test_env.skip_unless_sparse_vectors_supported() - async def test_8122(self): - "8122 - fetch float64 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 77.8], - }, - ), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 9.1], - }, - ), - ] - ora_df = await self.conn.fetch_df_all( - """ - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), - 8, - FLOAT64, - SPARSE - ) - union all - SELECT TO_VECTOR( - TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), - 8, - FLOAT64, - SPARSE - ) - """ - ) - self.assertEqual(ora_df.num_rows(), 2) - self.assertEqual(ora_df.num_columns(), 1) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_8123(self): - "8123 - fetch data with multiple rows containing null values" - ora_df = await self.conn.fetch_df_all( - """ - select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - union all - select to_date(null) as data from dual - """ - ) - data = [ - (datetime.datetime(2025, 6, 12),), - (None,), - (None,), - (None,), - (datetime.datetime(2025, 6, 11),), - (None,), - (None,), - (None,), - (None,), - ] - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - async def test_8124(self): - "8124 - test metadata of all data types" - now = datetime.datetime.now() - data = [ - ("NUMBERVALUE", 5, pyarrow.float64()), - ("STRINGVALUE", "String Val", pyarrow.string()), - ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), - ("NSTRINGVALUE", "NString Val", pyarrow.string()), - ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), - ("RAWVALUE", b"Raw Data", pyarrow.binary()), - ("INTVALUE", 25_387_923, pyarrow.float64()), - ("SMALLINTVALUE", 127, pyarrow.float64()), - ("REALVALUE", 125.25, pyarrow.float64()), - ("DECIMALVALUE", 91.1025, pyarrow.float64()), - ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), - ("FLOATVALUE", 125.375, pyarrow.float64()), - ("BINARYFLOATVALUE", -25, pyarrow.float32()), - ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), - ("DATEVALUE", now, pyarrow.timestamp("s")), - ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), - ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), - ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), - ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), - ] - await self.cursor.execute("delete from TestAllTypes") - column_names = ",".join(n for n, v, t in data) - bind_values = ",".join(f":{i + 1}" for i in range(len(data))) - data_to_insert = tuple(v for n, v, t in data) - await self.cursor.execute( - f""" - insert into TestAllTypes ({column_names}) - values ({bind_values}) - """, - data_to_insert, - ) - await self.conn.commit() - sql = f"select {column_names} from TestAllTypes" - ora_df = await self.conn.fetch_df_all(sql) - expected_types = [t for n, v, t in data] - actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] - self.assertEqual(actual_types, expected_types) +async def test_8101(async_cursor, test_env): + "8101 - test conversion to external dataframe" + await _test_df_interop(test_env, async_cursor, DATASET_1) - async def test_8125(self): - "8125 - test metadata of all data types with fetch_decimals = True" - now = datetime.datetime.now() - data = [ - ("NUMBERVALUE", 5, pyarrow.float64()), - ("STRINGVALUE", "String Val", pyarrow.string()), - ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), - ("NSTRINGVALUE", "NString Val", pyarrow.string()), - ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), - ("RAWVALUE", b"Raw Data", pyarrow.binary()), - ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), - ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), - ("REALVALUE", 125.25, pyarrow.float64()), - ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), - ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), - ("FLOATVALUE", 125.375, pyarrow.float64()), - ("BINARYFLOATVALUE", -25, pyarrow.float32()), - ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), - ("DATEVALUE", now, pyarrow.timestamp("s")), - ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), - ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), - ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), - ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), - ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), - ] - await self.cursor.execute("delete from TestAllTypes") - column_names = ",".join(n for n, v, t in data) - bind_values = ",".join(f":{i + 1}" for i in range(len(data))) - data_to_insert = tuple(v for n, v, t in data) - await self.cursor.execute( - f""" - insert into TestAllTypes ({column_names}) - values ({bind_values}) - """, - data_to_insert, - ) - await self.conn.commit() - with test_env.DefaultsContextManager("fetch_decimals", True): - sql = f"select {column_names} from TestAllTypes" - ora_df = await self.conn.fetch_df_all(sql) - expected_types = [t for n, v, t in data] - actual_types = [ - pyarrow.array(a).type for a in ora_df.column_arrays() - ] - self.assertEqual(actual_types, expected_types) - - @test_env.skip_unless_native_boolean_supported() - async def test_8126(self): - "8126 - test metadata with boolean type" - await self.cursor.execute("delete from TestBooleans") - data = [(1, True, False, None), (2, False, True, True)] - await self.cursor.executemany( - """ - insert into TestBooleans - (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) - values (:1, :2, :3, :4) - """, - data, - ) - await self.conn.commit() - - sql = "select * from TestBooleans order by IntCol" - ora_df = await self.conn.fetch_df_all(sql) - expected_types = [ - pyarrow.int64(), - pyarrow.bool_(), - pyarrow.bool_(), - pyarrow.bool_(), - ] - actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] - self.assertEqual(actual_types, expected_types) - async def test_8127(self): - "8127 - test NULL rows with all null values" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, None, None, None, None, None, None, None, None), - ] - await self.__test_df_interop(data) +async def test_8102(async_cursor, test_env): + "8101 - test null and negative values" + await _test_df_interop(test_env, async_cursor, DATASET_2) - async def test_8128(self): - "8128 - test repeated pyarrow table construction" - data = [ - ( - 1, - "John", - "Doe", - "SF", - "USA", - datetime.date(1990, 1, 1), - 5000.50, - 100, - datetime.datetime.now(), - ) - ] - await self.__populate_table(data) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - table1 = pyarrow.table(ora_df) - table2 = pyarrow.table(ora_df) - self.assertEqual(table1.schema, table2.schema) - self.assertEqual(table1.to_pydict(), table2.to_pydict()) - - async def test_8129(self): - "8129 - test dataframe query with multiple bind variables" - await self.__populate_table(DATASET_2) - statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( - where_clause="where Id between :min_id and :max_id" - ) - ora_df = await self.conn.fetch_df_all( - statement, {"min_id": 2, "max_id": 3} - ) - self.assertEqual(ora_df.num_rows(), 2) - expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] - raw_df = self.__convert_to_df(expected_data) - raw_data = self.__get_data_from_df(raw_df) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, raw_data) - - async def test_8130(self): - "8130 - test error handling with invalid SQL in fetch_df_batches()" - with self.assertRaisesFullCode("ORA-00942"): - async for batch in self.conn.fetch_df_batches( - "select * from NonExistentTable" - ): - pass - - async def test_8131(self): - "8131 - test partial batch (last batch smaller than batch size)" - test_data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - datetime.date(2000, 1, 1), - i * 100, - i % 800, - datetime.datetime.now(), - ) - for i in range(1, 8) # 7 rows - ] - await self.__test_df_batches_interop( - test_data, batch_size=3, num_batches=3 - ) +async def test_8103(async_cursor, test_env): + "8102 - test with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + await _test_df_interop(test_env, async_cursor, DATASET_1) - async def test_8132(self): - "8132 - test with date functions" - await self.__populate_table(DATASET_1) - ora_df = await self.conn.fetch_df_all( - """ - select - Id, - extract(year from DateOfBirth) as birth_year, - to_char(DateOfBirth, 'YYYY-MM') as birth_month - from TestDataFrame - order by Id - """ - ) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - year_col = ora_df.get_column_by_name("BIRTH_YEAR") - array = pyarrow.array(year_col) - self.assertEqual(array.to_pylist(), [1955, 1955]) - - async def test_8133(self): - "8133 - test column access by index bounds" - await self.__populate_table(DATASET_1) - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - with self.assertRaises(IndexError): - ora_df.get_column(ora_df.num_columns()) - - async def test_8134(self): - "8134 - test with different batch sizes" - await self.__test_df_batches_interop( - DATASET_4, batch_size=1, num_batches=6 - ) - await self.__test_df_batches_interop( - DATASET_4, batch_size=2, num_batches=3 - ) - async def test_8135(self): - "8135 - test with very large batch size" - await self.__test_df_batches_interop( - DATASET_1, batch_size=1000, num_batches=1 - ) +async def test_8104(async_cursor, test_env): + "8103 - test null and negative values with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + await _test_df_interop(test_env, async_cursor, DATASET_2) - async def test_8136(self): - "8136 - test error handling with invalid SQL" - with self.assertRaisesFullCode("ORA-00942"): - await self.conn.fetch_df_all("select * from NonExistentTable") - - async def test_8137(self): - "8137 - test error handling with invalid bind variable" - await self.__populate_table(DATASET_1) - with self.assertRaisesFullCode("DPY-4010", "ORA-01008"): - await self.conn.fetch_df_all( - "select * from TestDataFrame where Id = :missing_bind" - ) - async def test_8138(self): - "8138 - test with single row result" - await self.__populate_table(DATASET_1) - statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( - where_clause="where Id = 1" - ) - ora_df = await self.conn.fetch_df_all(statement) - self.assertEqual(ora_df.num_rows(), 1) - self.__validate_df(ora_df, [DATASET_1[0]]) - - async def test_8139(self): - "8139 - test with calculated columns" - await self.__populate_table(DATASET_1) - now = datetime.datetime.now().replace(microsecond=0) - ora_df = await self.conn.fetch_df_all( - """ - select - Id, - FirstName || ' ' || LastName as full_name, - Salary * 12 as annual_salary, - :now as current_date - from TestDataFrame - order by Id - """, - [now], - ) - self.assertEqual(ora_df.num_rows(), len(DATASET_1)) - self.assertEqual(ora_df.num_columns(), 4) - - expected_data = [] - for row in DATASET_1: - expected_row = ( - row[0], # Id - f"{row[1]} {row[2]}", # full_name - float(str(row[6] * 12)), # annual_salary - now, - ) - expected_data.append(expected_row) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, expected_data) - - async def test_8140(self): - "8140 - test fetch_df_batches with bind variables" - batch_size = 2 - await self.__populate_table(DATASET_4) - where_clause = "where Id >= :min_id" - sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) - expected_data = [row for row in DATASET_4 if row[0] >= 3] - offset = 0 - async for batch in self.conn.fetch_df_batches( - sql, {"min_id": 3}, size=batch_size - ): - self.__validate_df( - batch, expected_data[offset : offset + batch_size] - ) - offset += batch_size +async def test_8105(async_cursor, test_env): + "8105 - test null and values with leading zeros" + await _test_df_interop(test_env, async_cursor, DATASET_3) - async def test_8141(self): - "8141 - test with large data" - data = [ - (1, "A" * 41_000, b"Very long description " * 5_000), - (2, "B" * 35_000, b"Another long text " * 10_000), - (3, "C" * 72_000, b"Even longer content " * 20_000), - ] - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame - (Id, LongData, LongRawData) - values (:1, :2, :3) - """, - data, - ) - await self.conn.commit() - - ora_df = await self.conn.fetch_df_all( - """ - select Id, LongData, LongRawData - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data) - - async def test_8142(self): - "8142 - test fetching from an empty table with fetch_df_batches" - await self.cursor.execute("delete from TestDataFrame") - batches = [ - b async for b in self.conn.fetch_df_batches(QUERY_SQL, size=10) - ] - self.assertEqual(len(batches), 1) - self.assertEqual(batches[0].num_rows(), 0) - - async def test_8143(self): - "8143 - fetch clob in batches" - await self.cursor.execute("delete from TestDataFrame") - test_string = "A" * 10000 - data = [(test_string,)] * 3 - await self.cursor.executemany( - """ - insert into TestDataFrame (LongData) - values (:1) - """, - data, - ) - await self.conn.commit() +async def test_8106(async_cursor, test_env): + "8105 - test null and values with leading zeros with fetch_decimals" + with test_env.defaults_context_manager("fetch_decimals", True): + await _test_df_interop(test_env, async_cursor, DATASET_3) - offset = 0 - batch_size = 2 - sql = "select LongData from TestDataFrame" - async for batch in self.conn.fetch_df_batches(sql, size=batch_size): - fetched_df = pyarrow.table(batch).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data[offset : offset + batch_size]) - offset += batch_size - async def test_8144(self): - "8144 - fetch blob in batches" - await self.cursor.execute("delete from TestDataFrame") - test_string = b"B" * 10000 - data = [(test_string,)] * 4 - await self.cursor.executemany( - """ - insert into TestDataFrame (LongRawData) - values (:1) - """, - data, - ) - await self.conn.commit() +async def test_8107(async_cursor, test_env): + "8107 - duplicate values in the rows" + await _test_df_interop(test_env, async_cursor, DATASET_4) - offset = 0 - batch_size = 3 - sql = "select LongRawData from TestDataFrame" - async for batch in self.conn.fetch_df_batches(sql, size=batch_size): - fetched_df = pyarrow.table(batch).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, data[offset : offset + batch_size]) - offset += batch_size - async def test_8145(self): - "8145 - test with empty strings" - data = [ - ( - 1, - "", - "", - "City", - "Country", - datetime.datetime(2000, 1, 1), - 1000.0, - 100, - datetime.datetime.now(), - ), - ( - 2, - "First", - "Last", - "", - "", - datetime.datetime(2000, 1, 1), - 2000.0, - 200, - datetime.datetime.now(), - ), - ] - await self.__populate_table(data) - expected_data = [ - tuple(None if v == "" else v for v in row) for row in data - ] - ora_df = await self.conn.fetch_df_all(QUERY_SQL) - fetched_df = pyarrow.table(ora_df).to_pandas() - fetched_data = self.__get_data_from_df(fetched_df) - self.assertEqual(fetched_data, expected_data) +async def test_8108(async_cursor, test_env): + "8108 - batches without specification of size" + await _test_df_batches_interop( + test_env, async_cursor, DATASET_4, batch_size=None, num_batches=1 + ) - async def test_8146(self): - "8146 - test with unicode characters" - data = [ - ( - 1, - "Jöhn", - "Döe", - "München", - "Deutschland", - datetime.date(1980, 5, 15), - 5000, - 300, - datetime.datetime.now(), - ), - ( - 2, - "?", - "?", - "??", - "??", - datetime.date(1990, 8, 20), - 8000, - 400, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) - async def test_8147(self): - "8147 - test with very old dates" - data = [ - ( - 1, - "Ancient", - "One", - "Babylon", - "Mesopotamia", - datetime.date(1, 1, 1), - 0, - 0, - datetime.datetime.now(), - ), - ( - 2, - "Medieval", - "Person", - "London", - "England", - datetime.date(1200, 6, 15), - 10, - 50, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) +async def test_8109(async_cursor, test_env): + "8109 - batches with specification of size" + await _test_df_batches_interop( + test_env, async_cursor, DATASET_4, batch_size=5, num_batches=2 + ) - async def test_8148(self): - "8148 - test with future dates" - data = [ - ( - 1, - "Future", - "Person", - "Mars", - "Solar System", - datetime.date(3000, 1, 1), - 100000, - 900, - datetime.datetime.now(), - ), - ( - 2, - "Distant", - "Future", - "Andromeda", - "Galaxy", - datetime.date(9999, 12, 31), - 999999, - 999, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) - async def test_8149(self): - "8149 - test with exactly arraysize rows" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, self.cursor.arraysize + 1) - ] - await self.__test_df_interop(data) +async def test_8110(async_conn, async_cursor, test_env): + "8110 - verify passing Arrow arrays twice works" + await _populate_table(async_cursor, DATASET_1) + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + _validate_df(ora_df, DATASET_1, test_env) + _validate_df(ora_df, DATASET_1, test_env) - async def test_8150(self): - "8150 - test with arraysize+1 rows" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, self.cursor.arraysize + 2) - ] - await self.__test_df_interop(data) - async def test_8151(self): - "8151 - test with odd arraysize" - test_date = datetime.date(2000, 1, 1) - now = datetime.datetime.now() - data = [ - ( - i, - f"Name{i}", - f"Last{i}", - "City", - "Country", - test_date, - i * 100, - i % 800, - now, - ) - for i in range(1, 48) - ] - await self.__test_df_interop(data) +async def test_8111(async_conn, async_cursor): + "8111 - verify empty data set" + await _populate_table(async_cursor, DATASET_1) + statement = "select * from TestDataFrame where Id = 4" + ora_df = await async_conn.fetch_df_all(statement) + assert ora_df.num_rows() == 0 - async def test_8152(self): - "8152 - test with single row" - data = [ - ( - 1, - "John", - "Doe", - "SF", - "USA", - datetime.date(1990, 1, 1), - 5000, - 100, - datetime.datetime.now(), - ) - ] - await self.__test_df_interop(data) - async def test_8153(self): - "8153 - test multiple rows with NULL values in different columns" - now = datetime.datetime.now() - test_date = datetime.datetime(2000, 1, 1) - data = [ - (1, None, "Last1", "City1", "Country1", None, None, 100, None), - (2, "First2", None, None, "Country2", test_date, 2000, None, None), - (3, "First3", "Last3", None, None, None, 3000, 300, now), - (4, None, None, None, None, None, None, None, None), - ] - await self.__test_df_interop(data) +async def test_8112(async_conn, async_cursor): + "8112 - verify empty data set with batches" + await _populate_table(async_cursor, DATASET_1) + statement = "select * from TestDataFrame where Id = 4" + async for ora_df in async_conn.fetch_df_batches(statement): + assert ora_df.num_rows() == 0 - async def test_8154(self): - "8154 - test single column with all NULL values" - data = [ - ( - 1, - None, - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - 1000, - 100, - datetime.datetime.now(), - ), - ( - 2, - None, - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - datetime.datetime.now(), - ), - ( - 3, - None, - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - 3000, - 300, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) - async def test_8155(self): - "8155 - test last column NULL in each row" - data = [ - ( - 1, - "First1", - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - 1000, - 100, - None, - ), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - None, - ), - ( - 3, - "First3", - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - 3000, - 300, - None, - ), - ] - await self.__test_df_interop(data) +async def test_8113(async_conn, async_cursor): + "8113 - negative checks on attributes" + await _populate_table(async_cursor, DATASET_1) + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + with pytest.raises(IndexError): + ora_df.get_column(121) + with pytest.raises(IndexError): + ora_df.get_column(-1) + with pytest.raises(KeyError): + ora_df.get_column_by_name("missing_column") - async def test_8156(self): - "8156 - test alternating NULL/non-NULL values in a column" - data = [ - ( - 1, - "First1", - None, - "City1", - None, - datetime.date(2000, 1, 1), - None, - 100, - datetime.datetime.now(), - ), - (2, "First2", "Last2", None, "Country2", None, 2000, None, None), - ( - 3, - "First3", - None, - "City3", - None, - datetime.date(2002, 1, 1), - None, - 300, - datetime.datetime.now(), - ), - (4, "First4", "Last4", None, "Country4", None, 4000, None, None), - ] - await self.__test_df_interop(data) - async def test_8157(self): - "8157 - test all columns NULL except one" - now = datetime.datetime.now() - test_date = datetime.date(2001, 1, 1) - data = [ - (1, None, None, None, None, None, None, None, now), - (2, None, None, None, None, test_date, None, None, None), - (3, "First3", None, None, None, None, None, None, None), - (4, None, None, None, "Country4", None, None, None, None), - ] - await self.__test_df_interop(data) +async def test_8114(async_conn, test_env): + "8114 - check unsupported error" + statement = "select cursor(select user from dual) from dual" + with test_env.assert_raises_full_code("DPY-3030"): + await async_conn.fetch_df_all(statement) - async def test_8158(self): - "8158 - test all date columns with all NULL values" - data = [ - (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), - (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), - (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), - ] - await self.__test_df_interop(data) - async def test_8159(self): - "8159 - test NULL values in numeric columns" - data = [ - ( - 1, - "First1", - "Last1", - "City1", - "Country1", - datetime.date(2000, 1, 1), - None, - 100, - datetime.datetime.now(), - ), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - None, - datetime.datetime.now(), - ), - ( - 3, - "First3", - "Last3", - "City3", - "Country3", - datetime.date(2002, 1, 1), - None, - None, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) +async def test_8115(async_cursor, test_env): + "8115 - batches with specification of size matching number of rows" + await _test_df_batches_interop( + test_env, + async_cursor, + DATASET_2, + batch_size=len(DATASET_2), + num_batches=1, + ) - async def test_8160(self): - "8160 - test multiple consecutive NULL rows" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, None, None, None, None, None, None, None, None), - (3, None, None, None, None, None, None, None, None), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - datetime.date(2000, 1, 1), - 4000, - 400, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) - async def test_8161(self): - "8161 - test NULL rows interspersed with data rows" - data = [ - (1, None, None, None, None, None, None, None, None), - ( - 2, - "First2", - "Last2", - "City2", - "Country2", - datetime.date(2001, 1, 1), - 2000, - 200, - datetime.datetime.now(), - ), - (3, None, None, None, None, None, None, None, None), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - datetime.date(2003, 1, 1), - 4000, - 400, - datetime.datetime.now(), - ), - (5, None, None, None, None, None, None, None, None), - ] - await self.__test_df_interop(data) +async def test_8116(async_cursor, test_env): + "8116 - batches with size that has duplicate rows across batches" + await _test_df_batches_interop( + test_env, async_cursor, DATASET_4, batch_size=3, num_batches=2 + ) - async def test_8162(self): - "8162 - test multiple NULL rows with different NULL columns" - data = [ - (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), - ( - 2, - "First2", - None, - "City2", - "Country2", - datetime.date(2001, 1, 1), - None, - 200, - None, - ), - ( - 3, - None, - None, - "City3", - "Country3", - None, - None, - 300, - datetime.datetime.now(), - ), - ( - 4, - "First4", - "Last4", - None, - None, - datetime.date(2003, 1, 1), - 4000, - None, - None, - ), - ] - await self.__test_df_interop(data) - async def test_8163(self): - "8163 - test NULL rows with alternating NULL patterns" - data = [ - ( - 1, - None, - "Last1", - None, - "Country1", - None, - 1000, - None, - datetime.datetime.now(), - ), - ( - 2, - "First2", - None, - "City2", - None, - datetime.date(2001, 1, 1), - None, - 200, - None, - ), - ( - 3, - None, - "Last3", - None, - "Country3", - None, - 3000, - None, - datetime.datetime.now(), - ), - ( - 4, - "First4", - None, - "City4", - None, - datetime.date(2003, 1, 1), - None, - 400, - None, - ), - ] - await self.__test_df_interop(data) +async def test_8117(async_conn, test_env): + "8117 - fetch_decimals without precision and scale specified" + data = [(1.0,)] + with test_env.defaults_context_manager("fetch_decimals", True): + ora_df = await async_conn.fetch_df_all("select 1.0 from dual") + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8118(async_conn, test_env): + "8118 - fetch clob" + data = [("test_8123",)] + ora_df = await async_conn.fetch_df_all( + "select to_clob('test_8123') from dual" + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8119(async_conn, test_env): + "8119 - fetch blob" + data = [(b"test_8124",)] + ora_df = await async_conn.fetch_df_all( + "select to_blob(utl_raw.cast_to_raw('test_8124')) from dual" + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8120( + skip_unless_native_boolean_supported, async_conn, test_env +): + "8120 - fetch boolean" + data = [(True,), (False,), (False,), (True,), (True,)] + ora_df = await async_conn.fetch_df_all( + """ + select true + union all + select false + union all + select false + union all + select true + union all + select true + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8121(skip_unless_vectors_supported, async_conn, test_env): + "8121 - fetch float32 vector" + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = await async_conn.fetch_df_all( + """ + SELECT TO_VECTOR('[34.6, 77.8]', 2, FLOAT32) + union all + SELECT TO_VECTOR('[34.6, 77.8, 55.9]', 3, FLOAT32) + """ + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_8122( + skip_unless_sparse_vectors_supported, async_conn, test_env +): + "8122 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = await async_conn.fetch_df_all( + """ + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 77.8]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + union all + SELECT TO_VECTOR( + TO_VECTOR('[34.6, 0, 0, 0, 0, 0, 0, 9.1]', 8, FLOAT64), + 8, + FLOAT64, + SPARSE + ) + """ + ) + assert ora_df.num_rows() == 2 + assert ora_df.num_columns() == 1 + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) - async def test_8164(self): - "8164 - test multiple NULL rows with partial NULL groups" - data = [ - ( - 1, - None, - None, - "City1", - "Country1", - None, - None, - 100, - datetime.datetime.now(), - ), - ( - 2, - None, - None, - "City2", - "Country2", - None, - None, - 200, - datetime.datetime.now(), - ), - ( - 3, - "First3", - "Last3", - None, - None, - datetime.date(2002, 1, 1), - 3000, - None, - None, - ), - ( - 4, - "First4", - "Last4", - None, - None, - datetime.date(2003, 1, 1), - 4000, - None, - None, - ), - ] - await self.__test_df_interop(data) - async def test_8165(self): - "8165 - test multiple NULL rows with varying NULL counts" - data = [ - (1, None, None, None, None, None, None, None, None), - (2, "First2", None, "City2", None, None, 2000, None, None), - ( - 3, - None, - "Last3", - None, - "Country3", - datetime.date(2002, 1, 1), - None, - 300, - None, - ), - ( - 4, - "First4", - "Last4", - "City4", - "Country4", - None, - 4000, - 400, - datetime.datetime.now(), - ), - ] - await self.__test_df_interop(data) - - async def test_8166(self): - "8166 - test fetching large integers" - data = (-(2**40), 2**41) - ora_df = await self.conn.fetch_df_all( - """ - select - cast(:1 as number(15)), - cast(:2 as number(15)) - from dual - """, - data, +async def test_8123(async_conn, test_env): + "8123 - fetch data with multiple rows containing null values" + ora_df = await async_conn.fetch_df_all( + """ + select to_date('2025-06-12', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date('2025-06-11', 'YYYY-MM-DD') as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + union all + select to_date(null) as data from dual + """ + ) + data = [ + (datetime.datetime(2025, 6, 12),), + (None,), + (None,), + (None,), + (datetime.datetime(2025, 6, 11),), + (None,), + (None,), + (None,), + (None,), + ] + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8124(async_conn, async_cursor): + "8124 - test metadata of all data types" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.float64()), + ("SMALLINTVALUE", 127, pyarrow.float64()), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.float64()), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + await async_cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + await async_cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + await async_conn.commit() + sql = f"select {column_names} from TestAllTypes" + ora_df = await async_conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + assert actual_types == expected_types + + +async def test_8125(async_conn, async_cursor, test_env): + "8125 - test metadata of all data types with fetch_decimals = True" + now = datetime.datetime.now() + data = [ + ("NUMBERVALUE", 5, pyarrow.float64()), + ("STRINGVALUE", "String Val", pyarrow.string()), + ("FIXEDCHARVALUE", "Fixed Char", pyarrow.string()), + ("NSTRINGVALUE", "NString Val", pyarrow.string()), + ("NFIXEDCHARVALUE", "NFixedChar", pyarrow.string()), + ("RAWVALUE", b"Raw Data", pyarrow.binary()), + ("INTVALUE", 25_387_923, pyarrow.decimal128(38, 0)), + ("SMALLINTVALUE", 127, pyarrow.decimal128(38, 0)), + ("REALVALUE", 125.25, pyarrow.float64()), + ("DECIMALVALUE", 91.1025, pyarrow.decimal128(20, 6)), + ("DOUBLEPRECISIONVALUE", 87.625, pyarrow.float64()), + ("FLOATVALUE", 125.375, pyarrow.float64()), + ("BINARYFLOATVALUE", -25, pyarrow.float32()), + ("BINARYDOUBLEVALUE", -175.5, pyarrow.float64()), + ("DATEVALUE", now, pyarrow.timestamp("s")), + ("TIMESTAMPVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPTZVALUE", now, pyarrow.timestamp("us")), + ("TIMESTAMPLTZVALUE", now, pyarrow.timestamp("us")), + ("CLOBVALUE", "CLOB Value", pyarrow.large_string()), + ("NCLOBVALUE", "NCLOB Value", pyarrow.large_string()), + ("BLOBVALUE", b"BLOB Value", pyarrow.large_binary()), + ] + await async_cursor.execute("delete from TestAllTypes") + column_names = ",".join(n for n, v, t in data) + bind_values = ",".join(f":{i + 1}" for i in range(len(data))) + data_to_insert = tuple(v for n, v, t in data) + await async_cursor.execute( + f""" + insert into TestAllTypes ({column_names}) + values ({bind_values}) + """, + data_to_insert, + ) + await async_conn.commit() + with test_env.defaults_context_manager("fetch_decimals", True): + sql = f"select {column_names} from TestAllTypes" + ora_df = await async_conn.fetch_df_all(sql) + expected_types = [t for n, v, t in data] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + assert actual_types == expected_types + + +async def test_8126(skip_unless_native_boolean_supported, async_cursor): + "8126 - test metadata with boolean type" + await async_cursor.execute("delete from TestBooleans") + data = [(1, True, False, None), (2, False, True, True)] + await async_cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2, BooleanCol3) + values (:1, :2, :3, :4) + """, + data, + ) + await async_cursor.connection.commit() + + sql = "select * from TestBooleans order by IntCol" + ora_df = await async_cursor.connection.fetch_df_all(sql) + expected_types = [ + pyarrow.int64(), + pyarrow.bool_(), + pyarrow.bool_(), + pyarrow.bool_(), + ] + actual_types = [pyarrow.array(a).type for a in ora_df.column_arrays()] + assert actual_types == expected_types + + +async def test_8127(async_conn, async_cursor, test_env): + "8127 - test NULL rows with all null values" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8128(async_conn, async_cursor): + "8128 - test repeated pyarrow table construction" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000.50, + 100, + datetime.datetime.now(), ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual([data], self.__get_data_from_df(fetched_df)) + ] + await _populate_table(async_cursor, data) + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + table1 = pyarrow.table(ora_df) + table2 = pyarrow.table(ora_df) + assert table1.schema == table2.schema + assert table1.to_pydict() == table2.to_pydict() + + +async def test_8129(async_conn, async_cursor, test_env): + "8129 - test dataframe query with multiple bind variables" + await _populate_table(async_cursor, DATASET_2) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format( + where_clause="where Id between :min_id and :max_id" + ) + ora_df = await async_conn.fetch_df_all( + statement, {"min_id": 2, "max_id": 3} + ) + assert ora_df.num_rows() == 2 + + expected_data = [row for row in DATASET_2 if row[0] in (2, 3)] + raw_df = _convert_to_df(expected_data) + raw_data = test_env.get_data_from_df(raw_df) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == raw_data + + +async def test_8130(async_conn, test_env): + "8130 - test error handling with invalid SQL in fetch_df_batches()" + with test_env.assert_raises_full_code("ORA-00942"): + async for batch in async_conn.fetch_df_batches( + "select * from NonExistentTable" + ): + pass + + +async def test_8131(async_cursor, test_env): + "8131 - test partial batch (last batch smaller than batch size)" + test_data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + datetime.date(2000, 1, 1), + i * 100, + i % 800, + datetime.datetime.now(), + ) + for i in range(1, 8) # 7 rows + ] + await _test_df_batches_interop( + test_env, async_cursor, test_data, batch_size=3, num_batches=3 + ) -if __name__ == "__main__": - test_env.run_test_cases() +async def test_8132(async_conn, async_cursor): + "8132 - test with date functions" + await _populate_table(async_cursor, DATASET_1) + ora_df = await async_conn.fetch_df_all( + """ + select + Id, + extract(year from DateOfBirth) as birth_year, + to_char(DateOfBirth, 'YYYY-MM') as birth_month + from TestDataFrame + order by Id + """ + ) + assert ora_df.num_rows() == len(DATASET_1) + year_col = ora_df.get_column_by_name("BIRTH_YEAR") + array = pyarrow.array(year_col) + assert array.to_pylist() == [1955, 1955] + + +async def test_8133(async_conn, async_cursor): + "8133 - test column access by index bounds" + await _populate_table(async_cursor, DATASET_1) + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + with pytest.raises(IndexError): + ora_df.get_column(ora_df.num_columns()) + + +async def test_8134(async_cursor, test_env): + "8134 - test with different batch sizes" + await _test_df_batches_interop( + test_env, async_cursor, DATASET_4, batch_size=1, num_batches=6 + ) + await _test_df_batches_interop( + test_env, async_cursor, DATASET_4, batch_size=2, num_batches=3 + ) + + +async def test_8135(async_cursor, test_env): + "8135 - test with very large batch size" + await _test_df_batches_interop( + test_env, async_cursor, DATASET_1, batch_size=1000, num_batches=1 + ) + + +async def test_8136(async_conn, test_env): + "8136 - test error handling with invalid SQL" + with test_env.assert_raises_full_code("ORA-00942"): + await async_conn.fetch_df_all("select * from NonExistentTable") + + +async def test_8137(async_conn, async_cursor, test_env): + "8137 - test error handling with invalid bind variable" + await _populate_table(async_cursor, DATASET_1) + with test_env.assert_raises_full_code("DPY-4010", "ORA-01008"): + await async_conn.fetch_df_all( + "select * from TestDataFrame where Id = :missing_bind" + ) + + +async def test_8138(async_conn, async_cursor, test_env): + "8138 - test with single row result" + await _populate_table(async_cursor, DATASET_1) + statement = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause="where Id = 1") + ora_df = await async_conn.fetch_df_all(statement) + assert ora_df.num_rows() == 1 + _validate_df(ora_df, [DATASET_1[0]], test_env) + + +async def test_8139(async_conn, async_cursor, test_env): + "8139 - test with calculated columns" + await _populate_table(async_cursor, DATASET_1) + now = datetime.datetime.now().replace(microsecond=0) + ora_df = await async_conn.fetch_df_all( + """ + select + Id, + FirstName || ' ' || LastName as full_name, + Salary * 12 as annual_salary, + :now as current_date + from TestDataFrame + order by Id + """, + [now], + ) + assert ora_df.num_rows() == len(DATASET_1) + assert ora_df.num_columns() == 4 + + expected_data = [] + for row in DATASET_1: + expected_row = ( + row[0], # Id + f"{row[1]} {row[2]}", # full_name + float(str(row[6] * 12)), # annual_salary + now, + ) + expected_data.append(expected_row) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == expected_data + + +async def test_8140(async_conn, async_cursor, test_env): + "8140 - test fetch_df_batches with bind variables" + batch_size = 2 + await _populate_table(async_cursor, DATASET_4) + where_clause = "where Id >= :min_id" + sql = QUERY_SQL_WITH_WHERE_CLAUSE.format(where_clause=where_clause) + expected_data = [row for row in DATASET_4 if row[0] >= 3] + offset = 0 + async for batch in async_conn.fetch_df_batches( + sql, {"min_id": 3}, size=batch_size + ): + _validate_df( + batch, expected_data[offset : offset + batch_size], test_env + ) + offset += batch_size + + +async def test_8141(async_conn, async_cursor, test_env): + "8141 - test with large data" + data = [ + (1, "A" * 41_000, b"Very long description " * 5_000), + (2, "B" * 35_000, b"Another long text " * 10_000), + (3, "C" * 72_000, b"Even longer content " * 20_000), + ] + + await async_cursor.execute("delete from TestDataFrame") + await async_cursor.executemany( + """ + insert into TestDataFrame + (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + data, + ) + await async_conn.commit() + + ora_df = await async_conn.fetch_df_all( + """ + select Id, LongData, LongRawData + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data + + +async def test_8142(async_conn, async_cursor): + "8142 - test fetching from an empty table with fetch_df_batches" + await async_cursor.execute("delete from TestDataFrame") + batches = [ + b async for b in async_conn.fetch_df_batches(QUERY_SQL, size=10) + ] + assert len(batches) == 1 + assert batches[0].num_rows() == 0 + + +async def test_8143(async_conn, async_cursor, test_env): + "8143 - fetch clob in batches" + await async_cursor.execute("delete from TestDataFrame") + test_string = "A" * 10000 + data = [(test_string,)] * 3 + await async_cursor.executemany( + """ + insert into TestDataFrame (LongData) + values (:1) + """, + data, + ) + await async_conn.commit() + + offset = 0 + batch_size = 2 + sql = "select LongData from TestDataFrame" + async for batch in async_conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data[offset : offset + batch_size] + offset += batch_size + + +async def test_8144(async_conn, async_cursor, test_env): + "8144 - fetch blob in batches" + await async_cursor.execute("delete from TestDataFrame") + test_string = b"B" * 10000 + data = [(test_string,)] * 4 + await async_cursor.executemany( + """ + insert into TestDataFrame (LongRawData) + values (:1) + """, + data, + ) + await async_conn.commit() + + offset = 0 + batch_size = 3 + sql = "select LongRawData from TestDataFrame" + async for batch in async_conn.fetch_df_batches(sql, size=batch_size): + fetched_df = pyarrow.table(batch).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == data[offset : offset + batch_size] + offset += batch_size + + +async def test_8145(async_conn, async_cursor, test_env): + "8145 - test with empty strings" + data = [ + ( + 1, + "", + "", + "City", + "Country", + datetime.datetime(2000, 1, 1), + 1000.0, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First", + "Last", + "", + "", + datetime.datetime(2000, 1, 1), + 2000.0, + 200, + datetime.datetime.now(), + ), + ] + await _populate_table(async_cursor, data) + expected_data = [ + tuple(None if v == "" else v for v in row) for row in data + ] + ora_df = await async_conn.fetch_df_all(QUERY_SQL) + fetched_df = pyarrow.table(ora_df).to_pandas() + fetched_data = test_env.get_data_from_df(fetched_df) + assert fetched_data == expected_data + + +async def test_8146(async_cursor, test_env): + "8146 - test with unicode characters" + data = [ + ( + 1, + "Jöhn", + "Döe", + "München", + "Deutschland", + datetime.date(1980, 5, 15), + 5000, + 300, + datetime.datetime.now(), + ), + ( + 2, + "?", + "?", + "??", + "??", + datetime.date(1990, 8, 20), + 8000, + 400, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8147(async_cursor, test_env): + "8147 - test with very old dates" + data = [ + ( + 1, + "Ancient", + "One", + "Babylon", + "Mesopotamia", + datetime.date(1, 1, 1), + 0, + 0, + datetime.datetime.now(), + ), + ( + 2, + "Medieval", + "Person", + "London", + "England", + datetime.date(1200, 6, 15), + 10, + 50, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8148(async_cursor, test_env): + "8148 - test with future dates" + data = [ + ( + 1, + "Future", + "Person", + "Mars", + "Solar System", + datetime.date(3000, 1, 1), + 100000, + 900, + datetime.datetime.now(), + ), + ( + 2, + "Distant", + "Future", + "Andromeda", + "Galaxy", + datetime.date(9999, 12, 31), + 999999, + 999, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8149(async_cursor, test_env): + "8149 - test with exactly arraysize rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, async_cursor.arraysize + 1) + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8150(async_cursor, test_env): + "8150 - test with arraysize+1 rows" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, async_cursor.arraysize + 2) + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8151(async_cursor, test_env): + "8151 - test with odd arraysize" + test_date = datetime.date(2000, 1, 1) + now = datetime.datetime.now() + data = [ + ( + i, + f"Name{i}", + f"Last{i}", + "City", + "Country", + test_date, + i * 100, + i % 800, + now, + ) + for i in range(1, 48) + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8152(async_cursor, test_env): + "8152 - test with single row" + data = [ + ( + 1, + "John", + "Doe", + "SF", + "USA", + datetime.date(1990, 1, 1), + 5000, + 100, + datetime.datetime.now(), + ) + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8153(async_cursor, test_env): + "8153 - test multiple rows with NULL values in different columns" + now = datetime.datetime.now() + test_date = datetime.datetime(2000, 1, 1) + data = [ + (1, None, "Last1", "City1", "Country1", None, None, 100, None), + (2, "First2", None, None, "Country2", test_date, 2000, None, None), + (3, "First3", "Last3", None, None, None, 3000, 300, now), + (4, None, None, None, None, None, None, None, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8154(async_cursor, test_env): + "8154 - test single column with all NULL values" + data = [ + ( + 1, + None, + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + ( + 3, + None, + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8155(async_cursor, test_env): + "8155 - test last column NULL in each row" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + 1000, + 100, + None, + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + None, + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + 3000, + 300, + None, + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8156(async_cursor, test_env): + "8156 - test alternating NULL/non-NULL values in a column" + data = [ + ( + 1, + "First1", + None, + "City1", + None, + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + (2, "First2", "Last2", None, "Country2", None, 2000, None, None), + ( + 3, + "First3", + None, + "City3", + None, + datetime.date(2002, 1, 1), + None, + 300, + datetime.datetime.now(), + ), + (4, "First4", "Last4", None, "Country4", None, 4000, None, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8157(async_cursor, test_env): + "8157 - test all columns NULL except one" + now = datetime.datetime.now() + test_date = datetime.date(2001, 1, 1) + data = [ + (1, None, None, None, None, None, None, None, now), + (2, None, None, None, None, test_date, None, None, None), + (3, "First3", None, None, None, None, None, None, None), + (4, None, None, None, "Country4", None, None, None, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8158(async_cursor, test_env): + "8158 - test all date columns with all NULL values" + data = [ + (1, "First1", "Last1", "City1", "Country1", None, 1000, 100, None), + (2, "First2", "Last2", "City2", "Country2", None, 2000, 200, None), + (3, "First3", "Last3", "City3", "Country3", None, 3000, 300, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8159(async_cursor, test_env): + "8159 - test NULL values in numeric columns" + data = [ + ( + 1, + "First1", + "Last1", + "City1", + "Country1", + datetime.date(2000, 1, 1), + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + None, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + "City3", + "Country3", + datetime.date(2002, 1, 1), + None, + None, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8160(async_cursor, test_env): + "8160 - test multiple consecutive NULL rows" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, None, None, None, None, None, None, None, None), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2000, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8161(async_cursor, test_env): + "8161 - test NULL rows interspersed with data rows" + data = [ + (1, None, None, None, None, None, None, None, None), + ( + 2, + "First2", + "Last2", + "City2", + "Country2", + datetime.date(2001, 1, 1), + 2000, + 200, + datetime.datetime.now(), + ), + (3, None, None, None, None, None, None, None, None), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + datetime.date(2003, 1, 1), + 4000, + 400, + datetime.datetime.now(), + ), + (5, None, None, None, None, None, None, None, None), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8162(async_cursor, test_env): + "8162 - test multiple NULL rows with different NULL columns" + data = [ + (1, None, "Last1", "City1", "Country1", None, 1000, 100, None), + ( + 2, + "First2", + None, + "City2", + "Country2", + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + None, + "City3", + "Country3", + None, + None, + 300, + datetime.datetime.now(), + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8163(async_cursor, test_env): + "8163 - test NULL rows with alternating NULL patterns" + data = [ + ( + 1, + None, + "Last1", + None, + "Country1", + None, + 1000, + None, + datetime.datetime.now(), + ), + ( + 2, + "First2", + None, + "City2", + None, + datetime.date(2001, 1, 1), + None, + 200, + None, + ), + ( + 3, + None, + "Last3", + None, + "Country3", + None, + 3000, + None, + datetime.datetime.now(), + ), + ( + 4, + "First4", + None, + "City4", + None, + datetime.date(2003, 1, 1), + None, + 400, + None, + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8164(async_cursor, test_env): + "8164 - test multiple NULL rows with partial NULL groups" + data = [ + ( + 1, + None, + None, + "City1", + "Country1", + None, + None, + 100, + datetime.datetime.now(), + ), + ( + 2, + None, + None, + "City2", + "Country2", + None, + None, + 200, + datetime.datetime.now(), + ), + ( + 3, + "First3", + "Last3", + None, + None, + datetime.date(2002, 1, 1), + 3000, + None, + None, + ), + ( + 4, + "First4", + "Last4", + None, + None, + datetime.date(2003, 1, 1), + 4000, + None, + None, + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8165(async_cursor, test_env): + "8165 - test multiple NULL rows with varying NULL counts" + data = [ + (1, None, None, None, None, None, None, None, None), + (2, "First2", None, "City2", None, None, 2000, None, None), + ( + 3, + None, + "Last3", + None, + "Country3", + datetime.date(2002, 1, 1), + None, + 300, + None, + ), + ( + 4, + "First4", + "Last4", + "City4", + "Country4", + None, + 4000, + 400, + datetime.datetime.now(), + ), + ] + await _test_df_interop(test_env, async_cursor, data) + + +async def test_8166(async_conn, test_env): + "8166 - test fetching large integers" + data = (-(2**40), 2**41) + ora_df = await async_conn.fetch_df_all( + """ + select + cast(:1 as number(15)), + cast(:2 as number(15)) + from dual + """, + data, + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert [data] == test_env.get_data_from_df(fetched_df) diff --git a/tests/test_8200_aq_bulk_async.py b/tests/test_8200_aq_bulk_async.py index 14b8f78d..86d6c098 100644 --- a/tests/test_8200_aq_bulk_async.py +++ b/tests/test_8200_aq_bulk_async.py @@ -29,7 +29,7 @@ import datetime import oracledb -import test_env +import pytest RAW_QUEUE_NAME = "TEST_RAW_QUEUE" JSON_QUEUE_NAME = "TEST_JSON_QUEUE" @@ -61,113 +61,130 @@ ] -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def __deq_in_thread(self, results): - async with test_env.get_connection_async() as conn: - queue = conn.queue(RAW_QUEUE_NAME) - queue.deqoptions.wait = 10 - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - while len(results) < len(RAW_PAYLOAD_DATA): - messages = await queue.deqmany(5) - if not messages: - break - for message in messages: - results.append(message.payload.decode()) - await conn.commit() - - async def test_8200(self): - "8200 - test bulk enqueue and dequeue" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def queue(async_conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return await test_env.get_and_clear_queue_async(async_conn, RAW_QUEUE_NAME) + + +@pytest.fixture +async def json_queue(async_conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return await test_env.get_and_clear_queue_async( + async_conn, JSON_QUEUE_NAME, "JSON" + ) + + +async def _deq_in_thread(test_env, results): + async with test_env.get_connection_async() as conn: + queue = conn.queue(RAW_QUEUE_NAME) + queue.deqoptions.wait = 10 + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + while len(results) < len(RAW_PAYLOAD_DATA): + messages = await queue.deqmany(5) + if not messages: + break + for message in messages: + results.append(message.payload.decode()) + await conn.commit() + + +async def test_8200(queue, async_conn): + "8200 - test bulk enqueue and dequeue" + messages = [ + async_conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + ] + await queue.enqmany(messages) + messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) + data = [message.payload.decode() for message in messages] + await async_conn.commit() + assert data == RAW_PAYLOAD_DATA + + +async def test_8201(queue, async_conn): + "8201 - test empty bulk dequeue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = await queue.deqmany(5) + await async_conn.commit() + assert messages == [] + + +async def test_8202(queue, async_conn): + "8202 - test enqueue and dequeue multiple times" + data_to_enqueue = RAW_PAYLOAD_DATA + for num in (2, 6, 4): messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + async_conn.msgproperties(payload=data) + for data in data_to_enqueue[:num] ] + data_to_enqueue = data_to_enqueue[num:] await queue.enqmany(messages) - messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) - data = [message.payload.decode() for message in messages] - await self.conn.commit() - self.assertEqual(data, RAW_PAYLOAD_DATA) - - async def test_8201(self): - "8201 - test empty bulk dequeue" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - messages = await queue.deqmany(5) - await self.conn.commit() - self.assertEqual(messages, []) - - async def test_8202(self): - "8202 - test enqueue and dequeue multiple times" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - data_to_enqueue = RAW_PAYLOAD_DATA - for num in (2, 6, 4): - messages = [ - self.conn.msgproperties(payload=data) - for data in data_to_enqueue[:num] - ] - data_to_enqueue = data_to_enqueue[num:] - await queue.enqmany(messages) - await self.conn.commit() - all_data = [] - for num in (3, 5, 10): - messages = await queue.deqmany(num) - all_data.extend(message.payload.decode() for message in messages) - await self.conn.commit() - self.assertEqual(all_data, RAW_PAYLOAD_DATA) - - async def test_8203(self): - "8203 - test error for messages with no payload" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - messages = [self.conn.msgproperties() for _ in RAW_PAYLOAD_DATA] - with self.assertRaisesFullCode("DPY-2000"): - await queue.enqmany(messages) - - async def test_8204(self): - "8204 - verify that the msgid property is returned correctly" - queue = await self.get_and_clear_queue(RAW_QUEUE_NAME) - messages = [ - self.conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA - ] + await async_conn.commit() + all_data = [] + for num in (3, 5, 10): + messages = await queue.deqmany(num) + all_data.extend(message.payload.decode() for message in messages) + await async_conn.commit() + assert all_data == RAW_PAYLOAD_DATA + + +async def test_8203(queue, async_conn, test_env): + "8203 - test error for messages with no payload" + messages = [async_conn.msgproperties() for _ in RAW_PAYLOAD_DATA] + with test_env.assert_raises_full_code("DPY-2000"): await queue.enqmany(messages) - await self.cursor.execute("select msgid from raw_queue_tab") - actual_msgids = set(m for m, in await self.cursor.fetchall()) - msgids = set(message.msgid for message in messages) - self.assertEqual(msgids, actual_msgids) - messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) - msgids = set(message.msgid for message in messages) - self.assertEqual(msgids, actual_msgids) - - async def test_8205(self): - "8205 - test enqueuing and dequeuing JSON message" - queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = [ - self.conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD - ] - await queue.enqmany(props) - await self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - messages = await queue.deqmany(5) - actual_data = [message.payload for message in messages] - self.assertEqual(actual_data, JSON_DATA_PAYLOAD) - - async def test_8206(self): - "8206 - test enqueuing to a JSON queue without a JSON payload" - queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = self.conn.msgproperties(payload="string message") - with self.assertRaisesFullCode("DPY-2062"): - await queue.enqmany([props, props]) - - async def test_8207(self): - "8207 - test errors for invalid values for enqmany and deqmany" - queue = await self.get_and_clear_queue(JSON_QUEUE_NAME, "JSON") - props = self.conn.msgproperties(payload="string message") - with self.assertRaises(TypeError): - await queue.enqmany(props) - with self.assertRaises(TypeError): - await queue.enqmany(["Not", "msgproperties"]) - with self.assertRaises(TypeError): - await queue.deqmany("5") - - -if __name__ == "__main__": - test_env.run_test_cases() + + +async def test_8204(queue, async_conn, async_cursor): + "8204 - verify that the msgid property is returned correctly" + messages = [ + async_conn.msgproperties(payload=data) for data in RAW_PAYLOAD_DATA + ] + await queue.enqmany(messages) + await async_cursor.execute("select msgid from raw_queue_tab") + actual_msgids = set(m for m, in await async_cursor.fetchall()) + msgids = set(message.msgid for message in messages) + assert msgids == actual_msgids + messages = await queue.deqmany(len(RAW_PAYLOAD_DATA)) + msgids = set(message.msgid for message in messages) + assert msgids == actual_msgids + + +async def test_8205(json_queue, async_conn): + "8205 - test enqueuing and dequeuing JSON message" + props = [ + async_conn.msgproperties(payload=data) for data in JSON_DATA_PAYLOAD + ] + await json_queue.enqmany(props) + await async_conn.commit() + json_queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = await json_queue.deqmany(5) + actual_data = [message.payload for message in messages] + assert actual_data == JSON_DATA_PAYLOAD + + +async def test_8206(json_queue, async_conn, test_env): + "8206 - test enqueuing to a JSON queue without a JSON payload" + props = async_conn.msgproperties(payload="string message") + with test_env.assert_raises_full_code("DPY-2062"): + await json_queue.enqmany([props, props]) + + +async def test_8207(json_queue, async_conn): + "8207 - test errors for invalid values for enqmany and deqmany" + props = async_conn.msgproperties(payload="string message") + with pytest.raises(TypeError): + await json_queue.enqmany(props) + with pytest.raises(TypeError): + await json_queue.enqmany(["Not", "msgproperties"]) + with pytest.raises(TypeError): + await json_queue.deqmany("5") diff --git a/tests/test_8300_aq_json.py b/tests/test_8300_aq_json.py index de725bf5..a66ec107 100644 --- a/tests/test_8300_aq_json.py +++ b/tests/test_8300_aq_json.py @@ -29,390 +29,391 @@ import datetime import decimal import threading -import unittest import oracledb -import test_env - - -class TestCase(test_env.BaseTestCase): - json_queue_name = "TEST_JSON_QUEUE" - json_data = [ - [ - 2.75, - True, - "Ocean Beach", - b"Some bytes", - {"keyA": 1.0, "KeyB": "Melbourne"}, - datetime.datetime(2022, 8, 1, 0, 0), - ], - [ - True, - False, - "String", - b"Some Bytes", - {}, - {"name": None}, - {"name": "John"}, - {"age": 30}, - {"Permanent": True}, - { - "employee": { - "name": "John", - "age": 30, - "city": "Delhi", - "Parmanent": True, - } - }, - {"employees": ["John", "Matthew", "James"]}, - { - "employees": [ - {"employee1": {"name": "John", "city": "Delhi"}}, - {"employee2": {"name": "Matthew", "city": "Mumbai"}}, - {"employee3": {"name": "James", "city": "Bangalore"}}, - ] - }, - ], - [ - datetime.datetime.today(), - datetime.datetime(2004, 2, 1, 3, 4, 5), - datetime.datetime(2020, 12, 2, 13, 29, 14), - datetime.timedelta(8.5), - datetime.datetime(2002, 12, 13, 9, 36, 0), - oracledb.Timestamp(2002, 12, 13, 9, 36, 0), - datetime.datetime(2002, 12, 13), - ], - dict(name="John", age=30, city="New York"), - [ - 0, - 1, - 25.25, - 6088343244, - -9999999999999999999, - decimal.Decimal("0.25"), - decimal.Decimal("10.25"), - decimal.Decimal("319438950232418390.273596"), - ], - ] - - def __deq_in_thread(self, results): - with test_env.get_connection() as conn: - queue = conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = 10 - props = queue.deqone() - if props is not None: - results.append(props.payload) - conn.commit() - - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) - - def test_8300(self): - "8300 - test dequeuing an empty JSON queue" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT +import pytest + + +JSON_QUEUE_NAME = "TEST_JSON_QUEUE" +JSON_DATA = [ + [ + 2.75, + True, + "Ocean Beach", + b"Some bytes", + {"keyA": 1.0, "KeyB": "Melbourne"}, + datetime.datetime(2022, 8, 1, 0, 0), + ], + [ + True, + False, + "String", + b"Some Bytes", + {}, + {"name": None}, + {"name": "John"}, + {"age": 30}, + {"Permanent": True}, + { + "employee": { + "name": "John", + "age": 30, + "city": "Delhi", + "Parmanent": True, + } + }, + {"employees": ["John", "Matthew", "James"]}, + { + "employees": [ + {"employee1": {"name": "John", "city": "Delhi"}}, + {"employee2": {"name": "Matthew", "city": "Mumbai"}}, + {"employee3": {"name": "James", "city": "Bangalore"}}, + ] + }, + ], + [ + datetime.datetime.today(), + datetime.datetime(2004, 2, 1, 3, 4, 5), + datetime.datetime(2020, 12, 2, 13, 29, 14), + datetime.timedelta(8.5), + datetime.datetime(2002, 12, 13, 9, 36, 0), + oracledb.Timestamp(2002, 12, 13, 9, 36, 0), + datetime.datetime(2002, 12, 13), + ], + dict(name="John", age=30, city="New York"), + [ + 0, + 1, + 25.25, + 6088343244, + -9999999999999999999, + decimal.Decimal("0.25"), + decimal.Decimal("10.25"), + decimal.Decimal("319438950232418390.273596"), + ], +] + + +@pytest.fixture +def queue(conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return test_env.get_and_clear_queue(conn, JSON_QUEUE_NAME, "JSON") + + +def _deq_in_thread(test_env, results): + with test_env.get_connection() as conn: + queue = conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.wait = 10 props = queue.deqone() - self.assertIsNone(props) - - def test_8301(self): - "8301 - test enqueuing and dequeuing multiple JSON messages" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties() - for data in self.json_data: - props.payload = data - queue.enqone(props) - self.conn.commit() - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = queue.deqone() - if props is None: - break + if props is not None: results.append(props.payload) - self.conn.commit() - self.assertEqual(results, self.json_data) - - @unittest.skip("awaiting fix for bug 37746852") - def test_8302(self): - "8302 - test dequeuing with DEQ_REMOVE_NODATA option" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[1] - props = self.conn.msgproperties(payload=data) + conn.commit() + + +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value + + +def test_8300(queue): + "8300 - test dequeuing an empty JSON queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + + +def test_8301(queue, conn): + "8301 - test enqueuing and dequeuing multiple JSON messages" + props = conn.msgproperties() + for data in JSON_DATA: + props.payload = data queue.enqone(props) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: props = queue.deqone() - self.assertIsNotNone(props) - self.assertIsNone(props.payload) - - def test_8303(self): - "8303 - test getting/setting dequeue options attributes" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - def test_8304(self): - "8304 - test waiting for dequeue" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - results = [] - thread = threading.Thread(target=self.__deq_in_thread, args=(results,)) - thread.start() - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - queue.enqone(props) - self.conn.commit() - thread.join() - self.assertEqual(results, [data]) - - def test_8305(self): - "8305 - test getting/setting enqueue options attributes" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - def test_8306(self): - "8306 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - - def test_8307(self): - "8307 - test enqueue visibility options - ENQ_ON_COMMIT" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=data) + if props is None: + break + results.append(props.payload) + conn.commit() + assert results == JSON_DATA + + +@pytest.mark.skip("awaiting fix for bug 37746852") +def test_8302(queue, conn): + "8302 - test dequeuing with DEQ_REMOVE_NODATA option" + data = JSON_DATA[1] + props = conn.msgproperties(payload=data) + queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = queue.deqone() + assert props is not None + assert props.payload is None + + +def test_8303(queue): + "8303 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +def test_8304(queue, conn, test_env): + "8304 - test waiting for dequeue" + results = [] + thread = threading.Thread(target=_deq_in_thread, args=(test_env, results)) + thread.start() + data = JSON_DATA[0] + props = conn.msgproperties(payload=data) + queue.enqone(props) + conn.commit() + thread.join() + assert results == [data] + + +def test_8305(queue): + "8305 - test getting/setting enqueue options attributes" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +def test_8306(conn): + "8306 - test getting/setting message properties attributes" + props = conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + + +def test_8307(queue, conn, test_env): + "8307 - test enqueue visibility options - ENQ_ON_COMMIT" + data = JSON_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + assert props is None + conn.commit() + props = queue.deqone() + assert props is not None + + +def test_8308(queue, conn, test_env): + "8308 - test enqueue visibility option - ENQ_IMMEDIATE" + data = JSON_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + data = props.payload + results = data + other_conn.commit() + assert results == JSON_DATA[0] + + +def test_8309(queue, conn, test_env): + "8309 - test enqueue/dequeue delivery modes identical - persistent" + data = JSON_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = conn.msgproperties(payload=data) + queue.enqone(props) + + other_conn = test_env.get_connection() + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = queue.deqone() + data = props.payload + results = data + other_conn.commit() + assert results == JSON_DATA[0] + + +def test_8310(queue, conn, test_env): + "8310 - test error for message with no payload" + props = conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - self.assertIsNone(props) - self.conn.commit() - props = queue.deqone() - self.assertIsNotNone(props) - - def test_8308(self): - "8308 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=data) - queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = queue.deqone() - data = props.payload - results = data - other_conn.commit() - self.assertEqual(results, self.json_data[0]) - - def test_8309(self): - "8309 - test enqueue/dequeue delivery modes identical - persistent" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=data) +def test_8311(queue, conn, cursor): + "8311 - verify that the msgid property is returned correctly" + data = JSON_DATA[0] + props = conn.msgproperties(payload=data) + assert props.msgid is None + queue.enqone(props) + cursor.execute("select msgid from JSON_QUEUE_TAB") + (actual_msgid,) = cursor.fetchone() + assert props.msgid == actual_msgid + props = queue.deqone() + assert props.msgid == actual_msgid + + +def test_8312(queue, conn, cursor): + "8312 - test message props enqtime" + data = JSON_DATA[0] + cursor.execute("select current_timestamp from dual") + (start_date,) = cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = conn.msgproperties(payload=data) + queue.enqone(props) + props = queue.deqone() + cursor.execute("select current_timestamp from dual") + (end_date,) = cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +def test_8313(queue, conn): + "8313 - test message props declared attributes" + data = JSON_DATA[0] + values = dict( + payload=data, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + queue.enqone(props) + conn.commit() + prop = queue.deqone() + for attr_name in values: + assert getattr(prop, attr_name) == values[attr_name] + + +def test_8314(queue, conn): + "8314 - test getting queue attributes" + assert queue.name == "TEST_JSON_QUEUE" + assert queue.connection is conn + + +def test_8315(queue): + "8315 - test getting write-only attributes" + for options in (queue.enqoptions, queue.deqoptions): + with pytest.raises(AttributeError): + options.deliverymode + + +def test_8316(queue, conn): + "8316 - test deqoption condition with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + data = JSON_DATA[0] + props = conn.msgproperties(payload=data, priority=priority) queue.enqone(props) - other_conn = test_env.get_connection() - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.condition = "priority = 9" + results = [] + while True: props = queue.deqone() - data = props.payload - results = data - other_conn.commit() - self.assertEqual(results, self.json_data[0]) - - def test_8310(self): - "8310 - test error for message with no payload" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - queue.enqone(props) - - def test_8311(self): - "8311 - verify that the msgid property is returned correctly" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - self.assertIsNone(props.msgid) - queue.enqone(props) - self.cursor.execute("select msgid from JSON_QUEUE_TAB") - (actual_msgid,) = self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) - props = queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - - def test_8312(self): - "8312 - test message props enqtime" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - self.cursor.execute("select current_timestamp from dual") - (start_date,) = self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=data) + if props is None: + break + results.append(props.payload) + conn.commit() + assert len(results) == 3 + + +def test_8317(queue, conn): + "8317 - test deqoption correlation" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + data = JSON_DATA[0] + props = conn.msgproperties(payload=data, correlation=correlation) queue.enqone(props) + conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: props = queue.deqone() - self.cursor.execute("select current_timestamp from dual") - (end_date,) = self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - def test_8313(self): - "8313 - test message props declared attributes" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - values = dict( - payload=data, - correlation="TEST_CORRELATION", - delay=0, - exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", - expiration=15, - priority=1, - ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - queue.enqone(props) - self.conn.commit() - prop = queue.deqone() - for attr_name in values: - self.assertEqual(getattr(prop, attr_name), values[attr_name]) - - def test_8314(self): - "8314 - test getting queue attributes" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.name, "TEST_JSON_QUEUE") - self.assertEqual(queue.connection, self.conn) - - def test_8315(self): - "8315 - test getting write-only attributes" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - for options in (queue.enqoptions, queue.deqoptions): - with self.assertRaises(AttributeError): - options.deliverymode - - def test_8316(self): - "8316 - test deqoption condition with priority" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] - for priority in priorities: - data = self.json_data[0] - props = self.conn.msgproperties(payload=data, priority=priority) - queue.enqone(props) - - queue.deqoptions.condition = "priority = 9" - results = [] - while True: - props = queue.deqone() - if props is None: - break - results.append(props.payload) - self.conn.commit() - self.assertEqual(len(results), 3) - - def test_8317(self): - "8317 - test deqoption correlation" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - correlations = [ - "sample", - "sample correlation", - "sample", - "sample", - "sample correlation", - ] - for correlation in correlations: - data = self.json_data[0] - props = self.conn.msgproperties( - payload=data, correlation=correlation - ) - queue.enqone(props) - self.conn.commit() - queue.deqoptions.correlation = "sample correlation" - results = [] - while True: - props = queue.deqone() - if props is None: - break - results.append(props.payload) - self.conn.commit() - self.assertEqual(len(results), 2) - - def test_8318(self): - "8318 - test deqoption msgid" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - queue.enqone(props) - queue.enqone(props) - self.conn.commit() - msgid = props.msgid + if props is None: + break + results.append(props.payload) + conn.commit() + assert len(results) == 2 + + +def test_8318(queue, conn): + "8318 - test deqoption msgid" + data = JSON_DATA[0] + props = conn.msgproperties(payload=data) + queue.enqone(props) + queue.enqone(props) + conn.commit() + msgid = props.msgid + queue.enqone(props) + conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = queue.deqone() + conn.commit() + assert prop.msgid == msgid + + +def test_8319(queue): + "8319 - test payload_type returns the correct value" + assert queue.payload_type == "JSON" + + +def test_8320(queue): + "8320 - test deprecated attributes (enqOptions, deqOptions)" + assert queue.enqOptions is queue.enqoptions + assert queue.deqOptions is queue.deqoptions + + +def test_8321(queue, conn): + "8321 - test deprecated AQ methods (enqOne, deqOne)" + data = JSON_DATA[0] + queue.enqOne(conn.msgproperties(payload=data)) + props = queue.deqOne() + assert props.payload == data + + +def test_8322(queue, conn, test_env): + "8322 - test wrong payload type" + props = conn.msgproperties(payload="A string") + with test_env.assert_raises_full_code("DPY-2062"): queue.enqone(props) - self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.msgid = msgid - prop = queue.deqone() - self.conn.commit() - self.assertEqual(prop.msgid, msgid) - - def test_8319(self): - "8319 - test payload_type returns the correct value" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.payload_type, "JSON") - - def test_8320(self): - "8320 - test deprecated attributes (enqOptions, deqOptions)" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.enqOptions, queue.enqoptions) - self.assertEqual(queue.deqOptions, queue.deqoptions) - - def test_8321(self): - "8321 - test deprecated AQ methods (enqOne, deqOne)" - data = self.json_data[0] - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.enqOne(self.conn.msgproperties(payload=data)) - props = queue.deqOne() - self.assertEqual(props.payload, data) - - def test_8322(self): - "8322 - test wrong payload type" - queue = self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties(payload="A string") - with self.assertRaisesFullCode("DPY-2062"): - queue.enqone(props) - - -if __name__ == "__main__": - test_env.run_test_cases() diff --git a/tests/test_8400_aq_dbobject_async.py b/tests/test_8400_aq_dbobject_async.py index 4a338b1c..3a280ca3 100644 --- a/tests/test_8400_aq_dbobject_async.py +++ b/tests/test_8400_aq_dbobject_async.py @@ -29,456 +29,417 @@ import decimal import oracledb -import test_env +import pytest +BOOK_TYPE_NAME = "UDT_BOOK" +BOOK_QUEUE_NAME = "TEST_BOOK_QUEUE" +BOOK_DATA = [ + ("Wings of Fire", "A.P.J. Abdul Kalam", decimal.Decimal("15.75")), + ("The Story of My Life", "Hellen Keller", decimal.Decimal("10.50")), + ("The Chronicles of Narnia", "C.S. Lewis", decimal.Decimal("25.25")), +] + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def queue(async_conn, test_env): + return await test_env.get_and_clear_queue_async( + async_conn, BOOK_QUEUE_NAME, BOOK_TYPE_NAME + ) + + +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value + + +async def test_8400(queue): + "8400 - test dequeuing an empty queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None + + +async def test_8401(queue, async_conn): + "8401 - test enqueuing and dequeuing multiple messages" + props = async_conn.msgproperties() + for title, authors, price in BOOK_DATA: + props.payload = book = queue.payload_type.newobject() + book.TITLE = title + book.AUTHORS = authors + book.PRICE = price + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + book = props.payload + row = (book.TITLE, book.AUTHORS, book.PRICE) + results.append(row) + await async_conn.commit() + assert results == BOOK_DATA + + +async def test_8402(queue, async_conn): + "8402 - test dequeuing with DEQ_REMOVE_NODATA option" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[1] + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + assert props is not None + assert props.payload.TITLE is None + + +async def test_8403(queue): + "8403 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +async def test_8404(queue): + "8404 - test getting/setting enqueue options attributes" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +async def test_8405(queue): + "8405 - test errors for invalid values for enqueue" + book = queue.payload_type.newobject() + with pytest.raises(TypeError): + await queue.enqone(book) + + +async def test_8406(async_conn): + "8406 - test getting/setting message properties attributes" + props = async_conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + + +async def test_8407(queue, async_conn, test_env): + "8407 - test enqueue visibility option - ENQ_ON_COMMIT" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None + await async_conn.commit() + props = await queue.deqone() + await other_conn.commit() + assert props is not None -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - book_type_name = "UDT_BOOK" - book_queue_name = "TEST_BOOK_QUEUE" - book_data = [ - ("Wings of Fire", "A.P.J. Abdul Kalam", decimal.Decimal("15.75")), - ("The Story of My Life", "Hellen Keller", decimal.Decimal("10.50")), - ("The Chronicles of Narnia", "C.S. Lewis", decimal.Decimal("25.25")), - ] - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) +async def test_8408(queue, async_conn, test_env): + "8408 - test enqueue visibility option - ENQ_IMMEDIATE" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) - async def test_8400(self): - "8400 - test dequeuing an empty queue" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() - self.assertIsNone(props) - - async def test_8401(self): - "8401 - test enqueuing and dequeuing multiple messages" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - props = self.conn.msgproperties() - for title, authors, price in self.book_data: - props.payload = book = queue.payload_type.newobject() - book.TITLE = title - book.AUTHORS = authors - book.PRICE = price - await queue.enqone(props) + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + assert results == BOOK_DATA[0] + + +async def test_8409(queue, async_conn, test_env): + "8409 - test enqueue/dequeue delivery modes identical - buffered" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = await queue.deqone() - if props is None: - break - book = props.payload - row = (book.TITLE, book.AUTHORS, book.PRICE) - results.append(row) - await self.conn.commit() - self.assertEqual(results, self.book_data) - - async def test_8402(self): - "8402 - test dequeuing with DEQ_REMOVE_NODATA option" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[1] - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) + props = await queue.deqone() + assert props.deliverymode == oracledb.MSG_BUFFERED + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + assert results == BOOK_DATA[0] + + +async def test_8410(queue, async_conn, test_env): + "8410 - test enqueue/dequeue delivery modes identical - persistent" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA props = await queue.deqone() - self.assertIsNotNone(props) - self.assertIsNone(props.payload.TITLE) - - async def test_8403(self): - "8403 - test getting/setting dequeue options attributes" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - async def test_8404(self): - "8404 - test getting/setting enqueue options attributes" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - async def test_8405(self): - "8405 - test errors for invalid values for enqueue" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - with self.assertRaises(TypeError): - await queue.enqone(book) - - async def test_8406(self): - "8406 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - - async def test_8407(self): - "8407 - test enqueue visibility option - ENQ_ON_COMMIT" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) + assert props.deliverymode == oracledb.MSG_PERSISTENT + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + assert results == BOOK_DATA[0] + + +async def test_8411(queue, async_conn, test_env): + "8411 - test enqueue/dequeue delivery modes the same" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props.deliverymode == oracledb.MSG_PERSISTENT + book = props.payload + results = (book.TITLE, book.AUTHORS, book.PRICE) + await other_conn.commit() + assert results == BOOK_DATA[0] + + +async def test_8412(queue, async_conn, test_env): + "8412 - test enqueue/dequeue delivery modes different" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + books_type = await other_conn.gettype(BOOK_TYPE_NAME) + queue = other_conn.queue(BOOK_QUEUE_NAME, books_type) + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertIsNone(props) - await self.conn.commit() - props = await queue.deqone() - await other_conn.commit() - self.assertIsNotNone(props) - - async def test_8408(self): - "8408 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - await other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - async def test_8409(self): - "8409 - test enqueue/dequeue delivery modes identical - buffered" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) +async def test_8413(async_conn, test_env): + "8413 - test error for message with no payload" + books_type = await async_conn.gettype(BOOK_TYPE_NAME) + queue = async_conn.queue(BOOK_QUEUE_NAME, books_type) + props = async_conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_BUFFERED) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - await other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - async def test_8410(self): - "8410 - test enqueue/dequeue delivery modes identical - persistent" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - await other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - async def test_8411(self): - "8411 - test enqueue/dequeue delivery modes the same" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) +async def test_8414(queue, async_conn, async_cursor): + "8414 - verify that the msgid property is returned correctly" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + props = async_conn.msgproperties(payload=book) + assert props.msgid is None + await queue.enqone(props) + await async_cursor.execute("select msgid from book_queue_tab") + (actual_msgid,) = await async_cursor.fetchone() + assert props.msgid == actual_msgid + props = await queue.deqone() + assert props.msgid == actual_msgid + + +async def test_8415(queue, async_conn, async_cursor): + "8415 - test message props enqtime" + book = queue.payload_type.newobject() + await async_cursor.execute("select current_timestamp from dual") + (start_date,) = await async_cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = async_conn.msgproperties(payload=book) + await queue.enqone(props) + props = await queue.deqone() + await async_cursor.execute("select current_timestamp from dual") + (end_date,) = await async_cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +async def test_8416(queue, async_conn): + "8416 - test message props declared attributes" + book = queue.payload_type.newobject() + values = dict( + payload=book, + correlation="TEST_CORRELATION", + delay=7, + exceptionq="TEST_EXCEPTIONQ", + expiration=10, + priority=1, + ) + props = async_conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + + +async def test_8417(async_conn): + "8417 - test error for invalid type for payload_type" + with pytest.raises(TypeError): + await async_conn.queue("THE QUEUE", payload_type=4) + + +async def test_8418(queue, async_conn): + "8418 - test getting queue attributes" + assert queue.name == BOOK_QUEUE_NAME + assert queue.connection is async_conn + + +async def test_8419(queue): + "8419 - test getting write-only attributes" + with pytest.raises(AttributeError): + queue.enqoptions.deliverymode + with pytest.raises(AttributeError): + queue.deqoptions.deliverymode + + +async def test_8420(queue, async_conn, test_env): + "8420 - test correlation deqoption" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + correlations = ["Math", "Programming"] + num_messages = 3 + messages = [ + async_conn.msgproperties(payload=book, correlation=c) + for c in correlations + for i in range(num_messages) + ] + await queue.enqmany(messages) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = correlations[0] + correlated_messages = await queue.deqmany(num_messages + 1) + assert len(correlated_messages) == num_messages + + queue.deqoptions.correlation = correlations[1] + with test_env.assert_raises_full_code("ORA-25241"): + await queue.deqone() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + correlated_messages = await queue.deqmany(num_messages + 1) + assert len(correlated_messages) == num_messages + + +async def test_8421(queue, async_conn): + "8421 - test correlation deqoption with pattern-matching characters" + book = queue.payload_type.newobject() + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[0] + for correlation in ("PreCalculus-math1", "Calculus-Math2"): + props = async_conn.msgproperties(payload=book, correlation=correlation) await queue.enqone(props) + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.correlation = "%Calculus-%ath_" + messages = await queue.deqmany(5) + assert len(messages) == 2 - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertEqual(props.deliverymode, oracledb.MSG_PERSISTENT) - book = props.payload - results = (book.TITLE, book.AUTHORS, book.PRICE) - await other_conn.commit() - self.assertEqual(results, self.book_data[0]) - - async def test_8412(self): - "8412 - test enqueue/dequeue delivery modes different" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_BUFFERED - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - books_type = await other_conn.gettype(self.book_type_name) - queue = other_conn.queue(self.book_queue_name, books_type) - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertIsNone(props) - - async def test_8413(self): - "8413 - test error for message with no payload" - books_type = await self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - await queue.enqone(props) - - async def test_8414(self): - "8414 - verify that the msgid property is returned correctly" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) +async def test_8422(queue, async_conn): + "8422 - test condition deqoption with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + + priorities = [5, 10] + indexes = [0, 1] + for priority, ix in zip(priorities, indexes): book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - props = self.conn.msgproperties(payload=book) - self.assertIsNone(props.msgid) + book.TITLE, book.AUTHORS, book.PRICE = BOOK_DATA[ix] + props = async_conn.msgproperties(payload=book, priority=priority) await queue.enqone(props) - await self.cursor.execute("select msgid from book_queue_tab") - (actual_msgid,) = await self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) - props = await queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - async def test_8415(self): - "8415 - test message props enqtime" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - await self.cursor.execute("select current_timestamp from dual") - (start_date,) = await self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=book) + queue.deqoptions.condition = "priority = 9" + messages = await queue.deqmany(3) + assert len(messages) == 0 + + for priority, ix in zip(priorities, indexes): + queue.deqoptions.condition = f"priority = {priority}" + messages = await queue.deqmany(3) + assert len(messages) == 1 + book = messages[0].payload + data = book.TITLE, book.AUTHORS, book.PRICE + assert data == BOOK_DATA[ix] + + +async def test_8423(queue, async_conn): + "8423 - test mode deqoption with DEQ_REMOVE_NODATA" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + + book = queue.payload_type.newobject() + for data in BOOK_DATA: + book.TITLE, book.AUTHORS, book.PRICE = data + props = async_conn.msgproperties(payload=book) await queue.enqone(props) - props = await queue.deqone() - await self.cursor.execute("select current_timestamp from dual") - (end_date,) = await self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - async def test_8416(self): - "8416 - test message props declared attributes" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - values = dict( - payload=book, - correlation="TEST_CORRELATION", - delay=7, - exceptionq="TEST_EXCEPTIONQ", - expiration=10, - priority=1, - ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - - async def test_8417(self): - "8417 - test error for invalid type for payload_type" - self.assertRaises( - TypeError, self.conn.queue, "THE QUEUE", payload_type=4 - ) - - async def test_8418(self): - "8418 - test getting queue attributes" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - self.assertEqual(queue.name, self.book_queue_name) - self.assertEqual(queue.connection, self.conn) - - async def test_8419(self): - "8419 - test getting write-only attributes" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - with self.assertRaises(AttributeError): - queue.enqoptions.deliverymode - with self.assertRaises(AttributeError): - queue.deqoptions.deliverymode - - async def test_8420(self): - "8420 - test correlation deqoption" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - correlations = ["Math", "Programming"] - num_messages = 3 - messages = [ - self.conn.msgproperties(payload=book, correlation=c) - for c in correlations - for i in range(num_messages) - ] - await queue.enqmany(messages) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = correlations[0] - correlated_messages = await queue.deqmany(num_messages + 1) - self.assertEqual(len(correlated_messages), num_messages) - queue.deqoptions.correlation = correlations[1] - with self.assertRaisesFullCode("ORA-25241"): - await queue.deqone() - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - correlated_messages = await queue.deqmany(num_messages + 1) - self.assertEqual(len(correlated_messages), num_messages) - - async def test_8421(self): - "8421 - test correlation deqoption with pattern-matching characters" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[0] - for correlation in ("PreCalculus-math1", "Calculus-Math2"): - props = self.conn.msgproperties( - payload=book, correlation=correlation - ) - await queue.enqone(props) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.correlation = "%Calculus-%ath_" - messages = await queue.deqmany(5) - self.assertEqual(len(messages), 2) - - async def test_8422(self): - "8422 - test condition deqoption with priority" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + messages = await queue.deqmany(5) + assert len(messages) == 3 + for message in messages: + assert message.payload.TITLE is None + assert message.payload.AUTHORS is None + assert message.payload.PRICE is None - priorities = [5, 10] - indexes = [0, 1] - for priority, ix in zip(priorities, indexes): - book = queue.payload_type.newobject() - book.TITLE, book.AUTHORS, book.PRICE = self.book_data[ix] - props = self.conn.msgproperties(payload=book, priority=priority) - await queue.enqone(props) - queue.deqoptions.condition = "priority = 9" - messages = await queue.deqmany(3) - self.assertEqual(len(messages), 0) - - for priority, ix in zip(priorities, indexes): - queue.deqoptions.condition = f"priority = {priority}" - messages = await queue.deqmany(3) - self.assertEqual(len(messages), 1) - book = messages[0].payload - data = book.TITLE, book.AUTHORS, book.PRICE - self.assertEqual(data, self.book_data[ix]) - - async def test_8423(self): - "8423 - test mode deqoption with DEQ_REMOVE_NODATA" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA +async def test_8424(async_conn): + "8424 - test payload_type returns the correct value" + books_type = await async_conn.gettype(BOOK_TYPE_NAME) + queue = async_conn.queue(BOOK_QUEUE_NAME, books_type) + assert queue.payload_type == books_type - book = queue.payload_type.newobject() - for data in self.book_data: - book.TITLE, book.AUTHORS, book.PRICE = data - props = self.conn.msgproperties(payload=book) - await queue.enqone(props) - - messages = await queue.deqmany(5) - self.assertEqual(len(messages), 3) - for message in messages: - self.assertIsNone(message.payload.TITLE) - self.assertIsNone(message.payload.AUTHORS) - self.assertIsNone(message.payload.PRICE) - - async def test_8424(self): - "8424 - test payload_type returns the correct value" - books_type = await self.conn.gettype(self.book_type_name) - queue = self.conn.queue(self.book_queue_name, books_type) - self.assertEqual(queue.payload_type, books_type) - - async def test_8425(self): - "8425 - test enqueuing to an object queue with the wrong payload" - queue = await self.get_and_clear_queue( - self.book_queue_name, self.book_type_name - ) - props = self.conn.msgproperties(payload="A string") - with self.assertRaisesFullCode("DPY-2062"): - await queue.enqone(props) - - -if __name__ == "__main__": - test_env.run_test_cases() + +async def test_8425(queue, async_conn, test_env): + "8425 - test enqueuing to an object queue with the wrong payload" + props = async_conn.msgproperties(payload="A string") + with test_env.assert_raises_full_code("DPY-2062"): + await queue.enqone(props) diff --git a/tests/test_8500_aq_json_async.py b/tests/test_8500_aq_json_async.py index 0d640be0..087c22bb 100644 --- a/tests/test_8500_aq_json_async.py +++ b/tests/test_8500_aq_json_async.py @@ -29,382 +29,388 @@ import asyncio import datetime import decimal -import unittest import oracledb -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - json_queue_name = "TEST_JSON_QUEUE" - json_data = [ - [ - 2.75, - True, - "Ocean Beach", - b"Some bytes", - {"keyA": 1.0, "KeyB": "Melbourne"}, - datetime.datetime(2022, 8, 1, 0, 0), - ], - [ - True, - False, - "String", - b"Some Bytes", - {}, - {"name": None}, - {"name": "John"}, - {"age": 30}, - {"Permanent": True}, - { - "employee": { - "name": "John", - "age": 30, - "city": "Delhi", - "Parmanent": True, - } - }, - {"employees": ["John", "Matthew", "James"]}, - { - "employees": [ - {"employee1": {"name": "John", "city": "Delhi"}}, - {"employee2": {"name": "Matthew", "city": "Mumbai"}}, - {"employee3": {"name": "James", "city": "Bangalore"}}, - ] - }, - ], - [ - datetime.datetime.today(), - datetime.datetime(2004, 2, 1, 3, 4, 5), - datetime.datetime(2020, 12, 2, 13, 29, 14), - datetime.timedelta(8.5), - datetime.datetime(2002, 12, 13, 9, 36, 0), - oracledb.Timestamp(2002, 12, 13, 9, 36, 0), - datetime.datetime(2002, 12, 13), - ], - dict(name="John", age=30, city="New York"), - [ - 0, - 1, - 25.25, - 6088343244, - -9999999999999999999, - decimal.Decimal("0.25"), - decimal.Decimal("10.25"), - decimal.Decimal("319438950232418390.273596"), - ], - ] +import pytest + +JSON_QUEUE_NAME = "TEST_JSON_QUEUE" +JSON_DATA = [ + [ + 2.75, + True, + "Ocean Beach", + b"Some bytes", + {"keyA": 1.0, "KeyB": "Melbourne"}, + datetime.datetime(2022, 8, 1, 0, 0), + ], + [ + True, + False, + "String", + b"Some Bytes", + {}, + {"name": None}, + {"name": "John"}, + {"age": 30}, + {"Permanent": True}, + { + "employee": { + "name": "John", + "age": 30, + "city": "Delhi", + "Parmanent": True, + } + }, + {"employees": ["John", "Matthew", "James"]}, + { + "employees": [ + {"employee1": {"name": "John", "city": "Delhi"}}, + {"employee2": {"name": "Matthew", "city": "Mumbai"}}, + {"employee3": {"name": "James", "city": "Bangalore"}}, + ] + }, + ], + [ + datetime.datetime.today(), + datetime.datetime(2004, 2, 1, 3, 4, 5), + datetime.datetime(2020, 12, 2, 13, 29, 14), + datetime.timedelta(8.5), + datetime.datetime(2002, 12, 13, 9, 36, 0), + oracledb.Timestamp(2002, 12, 13, 9, 36, 0), + datetime.datetime(2002, 12, 13), + ], + dict(name="John", age=30, city="New York"), + [ + 0, + 1, + 25.25, + 6088343244, + -9999999999999999999, + decimal.Decimal("0.25"), + decimal.Decimal("10.25"), + decimal.Decimal("319438950232418390.273596"), + ], +] + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def queue(async_conn, test_env): + """ + Creates the queue used by the tests in this file. + """ + return await test_env.get_and_clear_queue_async( + async_conn, JSON_QUEUE_NAME, "JSON" + ) + + +async def _deq_in_task(test_env, results): + async with test_env.get_connection_async() as conn: + queue = conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.wait = 10 + props = await queue.deqone() + if props is not None: + results.append(props.payload) + await conn.commit() + + +def _verify_attr(obj, attrName, value): + setattr(obj, attrName, value) + assert getattr(obj, attrName) == value - async def __deq_in_task(self, results): - async with test_env.get_connection_async() as conn: - queue = conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = 10 - props = await queue.deqone() - if props is not None: - results.append(props.payload) - await conn.commit() - - def __verify_attr(self, obj, attrName, value): - setattr(obj, attrName, value) - self.assertEqual(getattr(obj, attrName), value) - - async def test_8500(self): - "8500 - test dequeuing an empty JSON queue" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") + +async def test_8500(queue): + "8500 - test dequeuing an empty JSON queue" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + props = await queue.deqone() + assert props is None + + +async def test_8501(queue, async_conn): + "8501 - test enqueuing and dequeuing multiple JSON messages" + props = async_conn.msgproperties() + for data in JSON_DATA: + props.payload = data + await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + results = [] + while True: + props = await queue.deqone() + if props is None: + break + results.append(props.payload) + await async_conn.commit() + assert results == JSON_DATA + + +@pytest.mark.skip("awaiting fix for bug 37746852") +async def test_8502(queue, async_conn): + "8502 - test dequeuing with DEQ_REMOVE_NODATA option" + data = JSON_DATA[1] + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA + props = await queue.deqone() + assert props is not None + assert props.payload is None + + +async def test_8503(queue): + "8503 - test getting/setting dequeue options attributes" + options = queue.deqoptions + _verify_attr(options, "condition", "TEST_CONDITION") + _verify_attr(options, "consumername", "TEST_CONSUMERNAME") + _verify_attr(options, "correlation", "TEST_CORRELATION") + _verify_attr(options, "mode", oracledb.DEQ_LOCKED) + _verify_attr(options, "navigation", oracledb.DEQ_NEXT_TRANSACTION) + _verify_attr(options, "transformation", "TEST_TRANSFORMATION") + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + _verify_attr(options, "wait", 1287) + _verify_attr(options, "msgid", b"mID") + + +async def test_8504(queue, async_conn, test_env): + "8504 - test waiting for dequeue" + results = [] + task = asyncio.create_task(_deq_in_task(test_env, results)) + data = JSON_DATA[0] + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + await async_conn.commit() + await task + assert results == [data] + + +async def test_8505(queue): + "8505 - test getting/setting enqueue options attributes" + options = queue.enqoptions + _verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) + + +async def test_8506(async_conn): + "8506 - test getting/setting message properties attributes" + props = async_conn.msgproperties() + _verify_attr(props, "correlation", "TEST_CORRELATION") + _verify_attr(props, "delay", 60) + _verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") + _verify_attr(props, "expiration", 30) + assert props.attempts == 0 + _verify_attr(props, "priority", 1) + assert props.state == oracledb.MSG_READY + assert props.deliverymode == 0 + + +async def test_8507(queue, async_conn, test_env): + "8507 - test enqueue visibility options - ENQ_ON_COMMIT" + data = JSON_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG queue.deqoptions.wait = oracledb.DEQ_NO_WAIT props = await queue.deqone() - self.assertIsNone(props) - - async def test_8501(self): - "8501 - test enqueuing and dequeuing multiple JSON messages" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties() - for data in self.json_data: - props.payload = data - await queue.enqone(props) - await self.conn.commit() + assert props is None + await async_conn.commit() + props = await queue.deqone() + assert props is not None + + +async def test_8508(queue, async_conn, test_env): + "8508 - test enqueue visibility option - ENQ_IMMEDIATE" + data = JSON_DATA[0] + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - results = [] - while True: - props = await queue.deqone() - if props is None: - break - results.append(props.payload) - await self.conn.commit() - self.assertEqual(results, self.json_data) - - @unittest.skip("awaiting fix for bug 37746852") - async def test_8502(self): - "8502 - test dequeuing with DEQ_REMOVE_NODATA option" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[1] - props = self.conn.msgproperties(payload=data) - await queue.enqone(props) + props = await queue.deqone() + data = props.payload + results = data + await other_conn.commit() + assert results == JSON_DATA[0] + + +async def test_8509(queue, async_conn, test_env): + "8509 - test enqueue/dequeue delivery modes identical - persistent" + data = JSON_DATA[0] + queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT + queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + + async with test_env.get_connection_async() as other_conn: + queue = other_conn.queue(JSON_QUEUE_NAME, "JSON") + queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG + queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.mode = oracledb.DEQ_REMOVE_NODATA props = await queue.deqone() - self.assertIsNotNone(props) - self.assertIsNone(props.payload) - - async def test_8503(self): - "8503 - test getting/setting dequeue options attributes" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - options = queue.deqoptions - self.__verify_attr(options, "condition", "TEST_CONDITION") - self.__verify_attr(options, "consumername", "TEST_CONSUMERNAME") - self.__verify_attr(options, "correlation", "TEST_CORRELATION") - self.__verify_attr(options, "mode", oracledb.DEQ_LOCKED) - self.__verify_attr( - options, "navigation", oracledb.DEQ_NEXT_TRANSACTION - ) - self.__verify_attr(options, "transformation", "TEST_TRANSFORMATION") - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - self.__verify_attr(options, "wait", 1287) - self.__verify_attr(options, "msgid", b"mID") - - async def test_8504(self): - "8504 - test waiting for dequeue" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - results = [] - task = asyncio.create_task(self.__deq_in_task(results)) - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - await queue.enqone(props) - await self.conn.commit() - await task - self.assertEqual(results, [data]) - - async def test_8505(self): - "8505 - test getting/setting enqueue options attributes" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - options = queue.enqoptions - self.__verify_attr(options, "visibility", oracledb.ENQ_IMMEDIATE) - - async def test_8506(self): - "8506 - test getting/setting message properties attributes" - props = self.conn.msgproperties() - self.__verify_attr(props, "correlation", "TEST_CORRELATION") - self.__verify_attr(props, "delay", 60) - self.__verify_attr(props, "exceptionq", "TEST_EXCEPTIONQ") - self.__verify_attr(props, "expiration", 30) - self.assertEqual(props.attempts, 0) - self.__verify_attr(props, "priority", 1) - self.assertEqual(props.state, oracledb.MSG_READY) - self.assertEqual(props.deliverymode, 0) - - async def test_8507(self): - "8507 - test enqueue visibility options - ENQ_ON_COMMIT" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.visibility = oracledb.ENQ_ON_COMMIT - props = self.conn.msgproperties(payload=data) - await queue.enqone(props) + data = props.payload + results = data + await other_conn.commit() + assert results == JSON_DATA[0] - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - self.assertIsNone(props) - await self.conn.commit() - props = await queue.deqone() - self.assertIsNotNone(props) - - async def test_8508(self): - "8508 - test enqueue visibility option - ENQ_IMMEDIATE" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=data) - await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_ON_COMMIT - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - data = props.payload - results = data - await other_conn.commit() - self.assertEqual(results, self.json_data[0]) - - async def test_8509(self): - "8509 - test enqueue/dequeue delivery modes identical - persistent" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - queue.enqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.enqoptions.visibility = oracledb.ENQ_IMMEDIATE - props = self.conn.msgproperties(payload=data) +async def test_8510(queue, async_conn, test_env): + "8510 - test error for message with no payload" + props = async_conn.msgproperties() + with test_env.assert_raises_full_code("DPY-2000"): await queue.enqone(props) - async with test_env.get_connection_async() as other_conn: - queue = other_conn.queue(self.json_queue_name, "JSON") - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT - queue.deqoptions.navigation = oracledb.DEQ_FIRST_MSG - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - props = await queue.deqone() - data = props.payload - results = data - await other_conn.commit() - self.assertEqual(results, self.json_data[0]) - - async def test_8510(self): - "8510 - test error for message with no payload" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties() - with self.assertRaisesFullCode("DPY-2000"): - await queue.enqone(props) - - async def test_8511(self): - "8511 - verify that the msgid property is returned correctly" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - self.assertIsNone(props.msgid) + +async def test_8511(queue, async_conn, async_cursor): + "8511 - verify that the msgid property is returned correctly" + data = JSON_DATA[0] + props = async_conn.msgproperties(payload=data) + assert props.msgid is None + await queue.enqone(props) + await async_cursor.execute("select msgid from JSON_QUEUE_TAB") + (actual_msgid,) = await async_cursor.fetchone() + assert props.msgid == actual_msgid + props = await queue.deqone() + assert props.msgid == actual_msgid + + +async def test_8512(queue, async_conn, async_cursor): + "8512 - test message props enqtime" + data = JSON_DATA[0] + await async_cursor.execute("select current_timestamp from dual") + (start_date,) = await async_cursor.fetchone() + start_date = start_date.replace(microsecond=0) + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + props = await queue.deqone() + await async_cursor.execute("select current_timestamp from dual") + (end_date,) = await async_cursor.fetchone() + end_date = end_date.replace(microsecond=0) + assert start_date <= props.enqtime <= end_date + + +async def test_8513(queue, async_conn): + "8513 - test message props declared attributes" + data = JSON_DATA[0] + values = dict( + payload=data, + correlation="TEST_CORRELATION", + delay=0, + exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", + expiration=15, + priority=1, + ) + props = async_conn.msgproperties(**values) + for attr_name in values: + assert getattr(props, attr_name) == values[attr_name] + await queue.enqone(props) + await async_conn.commit() + prop = await queue.deqone() + for attr_name in values: + assert getattr(prop, attr_name) == values[attr_name] + + +async def test_8514(queue, async_conn): + "8514 - test getting queue attributes" + assert queue.name == "TEST_JSON_QUEUE" + assert queue.connection is async_conn + + +async def test_8515(queue): + "8515 - test getting write-only attributes" + for options in (queue.enqoptions, queue.deqoptions): + with pytest.raises(AttributeError): + options.deliverymode + + +async def test_8516(queue, async_conn): + "8516 - test deqoption condition with priority" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] + for priority in priorities: + data = JSON_DATA[0] + props = async_conn.msgproperties(payload=data, priority=priority) await queue.enqone(props) - await self.cursor.execute("select msgid from JSON_QUEUE_TAB") - (actual_msgid,) = await self.cursor.fetchone() - self.assertEqual(props.msgid, actual_msgid) + + queue.deqoptions.condition = "priority = 9" + results = [] + while True: props = await queue.deqone() - self.assertEqual(props.msgid, actual_msgid) - - async def test_8512(self): - "8512 - test message props enqtime" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - await self.cursor.execute("select current_timestamp from dual") - (start_date,) = await self.cursor.fetchone() - start_date = start_date.replace(microsecond=0) - props = self.conn.msgproperties(payload=data) + if props is None: + break + results.append(props.payload) + await async_conn.commit() + assert len(results) == 3 + + +async def test_8517(queue, async_conn): + "8517 - test deqoption correlation" + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + correlations = [ + "sample", + "sample correlation", + "sample", + "sample", + "sample correlation", + ] + for correlation in correlations: + data = JSON_DATA[0] + props = async_conn.msgproperties(payload=data, correlation=correlation) await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.correlation = "sample correlation" + results = [] + while True: props = await queue.deqone() - await self.cursor.execute("select current_timestamp from dual") - (end_date,) = await self.cursor.fetchone() - end_date = end_date.replace(microsecond=0) - self.assertTrue(start_date <= props.enqtime <= end_date) - - async def test_8513(self): - "8513 - test message props declared attributes" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - values = dict( - payload=data, - correlation="TEST_CORRELATION", - delay=0, - exceptionq="PYTHONTEST.TEST_EXCEPTIONQ", - expiration=15, - priority=1, - ) - props = self.conn.msgproperties(**values) - for attr_name in values: - self.assertEqual(getattr(props, attr_name), values[attr_name]) - await queue.enqone(props) - await self.conn.commit() - prop = await queue.deqone() - for attr_name in values: - self.assertEqual(getattr(prop, attr_name), values[attr_name]) - - async def test_8514(self): - "8514 - test getting queue attributes" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.name, "TEST_JSON_QUEUE") - self.assertEqual(queue.connection, self.conn) - - async def test_8515(self): - "8515 - test getting write-only attributes" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - for options in (queue.enqoptions, queue.deqoptions): - with self.assertRaises(AttributeError): - options.deliverymode - - async def test_8516(self): - "8516 - test deqoption condition with priority" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - priorities = [5, 5, 5, 5, 10, 9, 9, 10, 9] - for priority in priorities: - data = self.json_data[0] - props = self.conn.msgproperties(payload=data, priority=priority) - await queue.enqone(props) - - queue.deqoptions.condition = "priority = 9" - results = [] - while True: - props = await queue.deqone() - if props is None: - break - results.append(props.payload) - await self.conn.commit() - self.assertEqual(len(results), 3) - - async def test_8517(self): - "8517 - test deqoption correlation" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - correlations = [ - "sample", - "sample correlation", - "sample", - "sample", - "sample correlation", - ] - for correlation in correlations: - data = self.json_data[0] - props = self.conn.msgproperties( - payload=data, correlation=correlation - ) - await queue.enqone(props) - await self.conn.commit() - queue.deqoptions.correlation = "sample correlation" - results = [] - while True: - props = await queue.deqone() - if props is None: - break - results.append(props.payload) - await self.conn.commit() - self.assertEqual(len(results), 2) - - async def test_8518(self): - "8518 - test deqoption msgid" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - data = self.json_data[0] - props = self.conn.msgproperties(payload=data) - await queue.enqone(props) + if props is None: + break + results.append(props.payload) + await async_conn.commit() + assert len(results) == 2 + + +async def test_8518(queue, async_conn): + "8518 - test deqoption msgid" + data = JSON_DATA[0] + props = async_conn.msgproperties(payload=data) + await queue.enqone(props) + await queue.enqone(props) + await async_conn.commit() + msgid = props.msgid + await queue.enqone(props) + await async_conn.commit() + queue.deqoptions.wait = oracledb.DEQ_NO_WAIT + queue.deqoptions.msgid = msgid + prop = await queue.deqone() + await async_conn.commit() + assert prop.msgid == msgid + + +async def test_8519(queue): + "8519 - test payload_type returns the correct value" + assert queue.payload_type == "JSON" + + +async def test_8520(queue): + "8520 - test deprecated attributes (enqOptions, deqOptions)" + assert queue.enqOptions is queue.enqoptions + assert queue.deqOptions is queue.deqoptions + + +async def test_8521(queue, async_conn, test_env): + "8521 - test wrong payload type" + props = async_conn.msgproperties(payload="A string") + with test_env.assert_raises_full_code("DPY-2062"): await queue.enqone(props) - await self.conn.commit() - msgid = props.msgid - await queue.enqone(props) - await self.conn.commit() - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.msgid = msgid - prop = await queue.deqone() - await self.conn.commit() - self.assertEqual(prop.msgid, msgid) - - async def test_8519(self): - "8519 - test payload_type returns the correct value" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.payload_type, "JSON") - - async def test_8520(self): - "8520 - test deprecated attributes (enqOptions, deqOptions)" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - self.assertEqual(queue.enqOptions, queue.enqoptions) - self.assertEqual(queue.deqOptions, queue.deqoptions) - - async def test_8521(self): - "8521 - test wrong payload type" - queue = await self.get_and_clear_queue(self.json_queue_name, "JSON") - props = self.conn.msgproperties(payload="A string") - with self.assertRaisesFullCode("DPY-2062"): - await queue.enqone(props) - - -if __name__ == "__main__": - test_env.run_test_cases() diff --git a/tests/test_8600_cursor_scrollable_async.py b/tests/test_8600_cursor_scrollable_async.py index fbb9c592..02b734c3 100644 --- a/tests/test_8600_cursor_scrollable_async.py +++ b/tests/test_8600_cursor_scrollable_async.py @@ -26,228 +26,203 @@ 8600 - Module for testing scrollable cursors with asyncio """ -import test_env - - -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - async def test_8600(self): - "8600 - test creating a scrollable cursor" - cursor = self.conn.cursor() - self.assertEqual(cursor.scrollable, False) - cursor = self.conn.cursor(True) - self.assertEqual(cursor.scrollable, True) - cursor = self.conn.cursor(scrollable=True) - self.assertEqual(cursor.scrollable, True) - cursor.scrollable = False - self.assertEqual(cursor.scrollable, False) - - async def test_8601(self): - "8601 - test scrolling absolute yields an exception (after result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - with self.assertRaisesFullCode("DPY-2063"): - await cursor.scroll(12, "absolute") - - async def test_8602(self): - "8602 - test scrolling absolute (when in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.prefetchrows = 0 - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.fetchmany() - self.assertTrue( - cursor.arraysize > 1, - "array size must exceed 1 for this test to work correctly", - ) +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +async def test_8600(async_conn): + "8600 - test creating a scrollable cursor" + cursor = async_conn.cursor() + assert not cursor.scrollable + cursor = async_conn.cursor(True) + assert cursor.scrollable + cursor = async_conn.cursor(scrollable=True) + assert cursor.scrollable + cursor.scrollable = False + assert not cursor.scrollable + + +async def test_8601(async_conn, test_env): + "8601 - test scrolling absolute yields an exception (after result set)" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + await cursor.scroll(12, "absolute") + + +async def test_8602(async_conn): + "8602 - test scrolling absolute (when in buffers)" + cursor = async_conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + assert ( + cursor.arraysize > 1 + ), "array size must exceed 1 for this test to work correctly" + await cursor.scroll(1, mode="absolute") + (value,) = await cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +async def test_8603(async_conn): + "8603 - test scrolling absolute (when not in buffers)" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.scroll(6, mode="absolute") + (value,) = await cursor.fetchone() + assert value == 7.5 + assert cursor.rowcount == 6 + + +async def test_8604(async_conn): + "8604 - test scrolling to first row in result set (in buffers)" + cursor = async_conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + await cursor.scroll(mode="first") + (value,) = await cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +async def test_8605(async_conn): + "8605 - test scrolling to first row in result set (not in buffers)" + cursor = async_conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + await cursor.fetchmany() + await cursor.scroll(mode="first") + (value,) = await cursor.fetchone() + assert value == 1.25 + assert cursor.rowcount == 1 + + +async def test_8606(async_conn): + "8606 - test scrolling to last row in result set" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.scroll(mode="last") + (value,) = await cursor.fetchone() + assert value == 12.5 + assert cursor.rowcount == 10 + + +async def test_8607(async_conn, test_env): + "8607 - test scrolling relative yields an exception (after result set)" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + await cursor.scroll(15) + + +async def test_8608(async_conn, test_env): + "8608 - test scrolling relative yields exception (before result set)" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + with test_env.assert_raises_full_code("DPY-2063"): + await cursor.scroll(-5) + + +async def test_8609(async_conn): + "8609 - test scrolling relative (when in buffers)" + cursor = async_conn.cursor(scrollable=True) + cursor.prefetchrows = 0 + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + assert cursor.arraysize > 1, message + await cursor.scroll(2 - cursor.rowcount) + (value,) = await cursor.fetchone() + assert value == 2.5 + assert cursor.rowcount == 2 + + +async def test_8610(async_conn): + "8610 - test scrolling relative (when not in buffers)" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + await cursor.fetchmany() + message = "array size must exceed 1 for this test to work correctly" + assert cursor.arraysize > 1, message + await cursor.scroll(3 - cursor.rowcount) + (value,) = await cursor.fetchone() + assert value == 3.75 + assert cursor.rowcount == 3 + + +async def test_8611(async_conn, async_cursor, test_env): + "8611 - test scrolling when there are no rows" + await async_cursor.execute("truncate table TestTempTable") + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select * from TestTempTable") + await cursor.scroll(mode="last") + assert await cursor.fetchall() == [] + await cursor.scroll(mode="first") + assert await cursor.fetchall() == [] + with test_env.assert_raises_full_code("DPY-2063"): await cursor.scroll(1, mode="absolute") - (value,) = await cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - async def test_8603(self): - "8603 - test scrolling absolute (when not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.scroll(6, mode="absolute") - (value,) = await cursor.fetchone() - self.assertEqual(value, 7.5) - self.assertEqual(cursor.rowcount, 6) - - async def test_8604(self): - "8604 - test scrolling to first row in result set (in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.fetchmany() - await cursor.scroll(mode="first") - (value,) = await cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - async def test_8605(self): - "8605 - test scrolling to first row in result set (not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.fetchmany() - await cursor.fetchmany() - await cursor.scroll(mode="first") - (value,) = await cursor.fetchone() - self.assertEqual(value, 1.25) - self.assertEqual(cursor.rowcount, 1) - - async def test_8606(self): - "8606 - test scrolling to last row in result set" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.scroll(mode="last") - (value,) = await cursor.fetchone() - self.assertEqual(value, 12.5) - self.assertEqual(cursor.rowcount, 10) - - async def test_8607(self): - "8607 - test scrolling relative yields an exception (after result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - with self.assertRaisesFullCode("DPY-2063"): - await cursor.scroll(15) - async def test_8608(self): - "8608 - test scrolling relative yields exception (before result set)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - with self.assertRaisesFullCode("DPY-2063"): - await cursor.scroll(-5) - - async def test_8609(self): - "8609 - test scrolling relative (when in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - cursor.prefetchrows = 0 - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.fetchmany() - message = "array size must exceed 1 for this test to work correctly" - self.assertTrue(cursor.arraysize > 1, message) - await cursor.scroll(2 - cursor.rowcount) - (value,) = await cursor.fetchone() - self.assertEqual(value, 2.5) - self.assertEqual(cursor.rowcount, 2) - - async def test_8610(self): - "8610 - test scrolling relative (when not in buffers)" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" - ) - await cursor.fetchmany() - await cursor.fetchmany() - message = "array size must exceed 1 for this test to work correctly" - self.assertTrue(cursor.arraysize > 1, message) - await cursor.scroll(3 - cursor.rowcount) - (value,) = await cursor.fetchone() - self.assertEqual(value, 3.75) - self.assertEqual(cursor.rowcount, 3) - - async def test_8611(self): - "8611 - test scrolling when there are no rows" - await self.cursor.execute("truncate table TestTempTable") - cursor = self.conn.cursor(scrollable=True) - await cursor.execute("select * from TestTempTable") - await cursor.scroll(mode="last") - self.assertEqual(await cursor.fetchall(), []) - await cursor.scroll(mode="first") - self.assertEqual(await cursor.fetchall(), []) - with self.assertRaisesFullCode("DPY-2063"): - await cursor.scroll(1, mode="absolute") - - async def test_8612(self): - "8612 - test scrolling with differing array and fetch array sizes" - await self.cursor.execute("truncate table TestTempTable") - for i in range(30): - await self.cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, null) - """, - [i + 1], - ) - for arraysize in range(1, 6): - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = arraysize - await cursor.execute( - "select IntCol from TestTempTable order by IntCol" - ) - for num_rows in range(1, arraysize + 1): - await cursor.scroll(15, "absolute") - rows = await cursor.fetchmany(num_rows) - self.assertEqual(rows[0][0], 15) - self.assertEqual(cursor.rowcount, 15 + num_rows - 1) - await cursor.scroll(9) - rows = await cursor.fetchmany(num_rows) - num_rows_fetched = len(rows) - self.assertEqual(rows[0][0], 15 + num_rows + 8) - self.assertEqual( - cursor.rowcount, 15 + num_rows + num_rows_fetched + 7 - ) - await cursor.scroll(-12) - rows = await cursor.fetchmany(num_rows) - count = 15 + num_rows + num_rows_fetched - 5 - self.assertEqual(rows[0][0], count) - count = 15 + num_rows + num_rows_fetched + num_rows - 6 - self.assertEqual(cursor.rowcount, count) - - async def test_8613(self): - "8613 - test calling scroll() with invalid mode" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = self.cursor.arraysize - await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" + +async def test_8612(async_conn, async_cursor): + "8612 - test scrolling with differing array and fetch array sizes" + await async_cursor.execute("truncate table TestTempTable") + for i in range(30): + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, null) + """, + [i + 1], ) - await cursor.fetchmany() - with self.assertRaisesFullCode("DPY-2009"): - await cursor.scroll(mode="middle") - - async def test_8614(self): - "8614 - test scroll after fetching all rows" - cursor = self.conn.cursor(scrollable=True) - cursor.arraysize = 5 - cursor.prefetchrows = 0 + for arraysize in range(1, 6): + cursor = async_conn.cursor(scrollable=True) + cursor.arraysize = arraysize await cursor.execute( - "select NumberCol from TestNumbers order by IntCol" + "select IntCol from TestTempTable order by IntCol" ) - await cursor.fetchall() - await cursor.scroll(5, mode="absolute") - (value,) = await cursor.fetchone() - self.assertEqual(value, 6.25) - self.assertEqual(cursor.rowcount, 5) - - -if __name__ == "__main__": - test_env.run_test_cases() + for num_rows in range(1, arraysize + 1): + await cursor.scroll(15, "absolute") + rows = await cursor.fetchmany(num_rows) + assert rows[0][0] == 15 + assert cursor.rowcount == 15 + num_rows - 1 + await cursor.scroll(9) + rows = await cursor.fetchmany(num_rows) + num_rows_fetched = len(rows) + assert rows[0][0] == 15 + num_rows + 8 + assert cursor.rowcount == 15 + num_rows + num_rows_fetched + 7 + await cursor.scroll(-12) + rows = await cursor.fetchmany(num_rows) + count = 15 + num_rows + num_rows_fetched - 5 + assert rows[0][0] == count + count = 15 + num_rows + num_rows_fetched + num_rows - 6 + assert cursor.rowcount == count + + +async def test_8613(async_conn, test_env): + "8613 - test calling scroll() with invalid mode" + cursor = async_conn.cursor(scrollable=True) + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchmany() + with test_env.assert_raises_full_code("DPY-2009"): + await cursor.scroll(mode="middle") + + +async def test_8614(async_conn): + "8614 - test scroll after fetching all rows" + cursor = async_conn.cursor(scrollable=True) + cursor.arraysize = 5 + cursor.prefetchrows = 0 + await cursor.execute("select NumberCol from TestNumbers order by IntCol") + await cursor.fetchall() + await cursor.scroll(5, mode="absolute") + (value,) = await cursor.fetchone() + assert value == 6.25 + assert cursor.rowcount == 5 diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py index 4759efa1..a0bd3b94 100644 --- a/tests/test_8700_sessionless_transaction.py +++ b/tests/test_8700_sessionless_transaction.py @@ -29,524 +29,492 @@ # procedures with DBMS_TRANSACTION package. # ----------------------------------------------------------------------------- -import test_env - - -@test_env.skip_unless_sessionless_transactions_supported() -class TestCase(test_env.BaseTestCase): - - transaction_id_client = b"test_8700_client" - transaction_id_server = b"test_8700_server" - - def _get_server_start_stmt(self, mode): - "Generate server-side transaction start statement" - return f""" - DECLARE - transaction_id RAW(128); - BEGIN - transaction_id := DBMS_TRANSACTION.START_TRANSACTION( - :transaction_id, - DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, - :timeout, - DBMS_TRANSACTION.TRANSACTION_{mode} - ); - END;""" - - def test_8700(self): - "8700 - test sessionless transaction using client API" - self.cursor.execute("truncate table TestTempTable") - - # create sessionless transaction in one connection - with test_env.get_connection() as conn: - - cursor = conn.cursor() - - # start sessionless transaction - conn.begin_sessionless_transaction( - transaction_id=self.transaction_id_client, - timeout=15, - defer_round_trip=True, - ) +import pytest - # insert data within transaction - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "row1"), - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "row2"), - ) +TRANSACTION_ID_CLIENT = b"test_8700_client" +TRANSACTION_ID_SERVER = b"test_8700_server" - # suspend the sessionless transaction - conn.suspend_sessionless_transaction() - # ensure data is not visible outside transaction - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), []) +@pytest.fixture(autouse=True) +def module_checks(skip_unless_sessionless_transactions_supported): + pass - # resume the transaction in another connection - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction( - transaction_id=self.transaction_id_client, - timeout=5, - defer_round_trip=True, - ) +def _get_server_start_stmt(mode): + "Generate server-side transaction start statement" + return f""" + DECLARE + transaction_id RAW(128); + BEGIN + transaction_id := DBMS_TRANSACTION.START_TRANSACTION( + :transaction_id, + DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, + :timeout, + DBMS_TRANSACTION.TRANSACTION_{mode} + ); + END;""" - # suspend using suspend_on_success flag with executemany - cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [(3, "row3")], - suspend_on_success=True, - ) - # ensure data is not visible as the transaction is suspended - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), []) +def test_8700(cursor, test_env): + "8700 - test sessionless transaction using client API" + cursor.execute("truncate table TestTempTable") - # resume the transaction and commit the changes - conn.resume_sessionless_transaction( - transaction_id=self.transaction_id_client - ) - conn.commit() - - # verify data after commit - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(len(cursor.fetchall()), 3) - - def test_8701(self): - "8701 - test sessionless transaction using server-side procedures" - self.cursor.execute("truncate table TestTempTable") - - # create sessionless transaction in one connection - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - self._get_server_start_stmt("NEW"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) + # create sessionless transaction in one connection + with test_env.get_connection() as conn: - # insert data within transaction - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "row1"), - ) + cursor = conn.cursor() - # Suspend on server - cursor.callproc("dbms_transaction.suspend_transaction") + # start sessionless transaction + conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=15, + defer_round_trip=True, + ) - # verify data is not visible after suspend - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), []) + # insert data within transaction + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) - # resume the transaction in another connection - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - self._get_server_start_stmt("RESUME"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "row2"), - ) - conn.commit() - - # verify data after commit in original connection - self.cursor.execute("SELECT IntCol, StringCol1 FROM TestTempTable") - self.assertEqual(len(self.cursor.fetchall()), 2) - - def test_8702(self): - "8702 - test error conditions with server API sessionless transactions" - self.cursor.execute("truncate table TestTempTable") - - # start a transaction via the server; verify that suspension via the - # client fails but suspension via the server succeeds - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - self._get_server_start_stmt("NEW"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "server_row"), - ) - with self.assertRaisesFullCode("DPY-3034"): - conn.suspend_sessionless_transaction() - cursor.callproc("dbms_transaction.suspend_transaction") - - # resume on a second connection - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - self._get_server_start_stmt("RESUME"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "server_row2"), - ) + # suspend the sessionless transaction + conn.suspend_sessionless_transaction() - # resuming on a different session should fail - with test_env.get_connection() as other_conn: - other_cursor = other_conn.cursor() - with self.assertRaisesFullCode("ORA-25351"): - other_cursor.execute( - self._get_server_start_stmt("RESUME"), - { - "transaction_id": self.transaction_id_server, - "timeout": 2, - }, - ) - - def test_8703(self): - "8703 - test rollback of sessionless transaction" - self.cursor.execute("truncate table TestTempTable") - - # start and work with sessionless transaction - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.begin_sessionless_transaction( - transaction_id=b"rollback_test", timeout=15 - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "rollback_row"), - suspend_on_success=True, - ) + # ensure data is not visible outside transaction + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [] - # resume in new connection and rollback - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction( - transaction_id=b"rollback_test", timeout=5 - ) - conn.rollback() - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), []) - - def test_8704(self): - "8704 - test multiple operations within same sessionless transaction" - self.cursor.execute("truncate table TestTempTable") - - # start transaction and perform multiple operations - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.begin_sessionless_transaction( - transaction_id=b"multi_ops_test", timeout=15 - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "original"), - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "second"), - ) - cursor.execute( - """ - update TestTempTable set StringCol1 = :v1 where IntCol = 1 - """, - v1="updated", - ) - cursor.execute("delete from TestTempTable where IntCol = 2") - conn.suspend_sessionless_transaction() - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), []) - - # resume and commit - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction( - transaction_id=b"multi_ops_test", timeout=5 - ) - conn.commit() - cursor.execute("select IntCol, StringCol1 from TestTempTable") - self.assertEqual(cursor.fetchall(), [(1, "updated")]) - - def test_8705(self): - "8705 - test concurrent sessionless transactions" - self.cursor.execute("truncate table TestTempTable") - - # start first sessionless transaction - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.begin_sessionless_transaction( - transaction_id=b"concurrent_1", timeout=15 - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "concurrent_1"), - suspend_on_success=True, - ) + # resume the transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() - # start second sessionless transaction in another connection - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.begin_sessionless_transaction( - transaction_id=b"concurrent_2", timeout=15 - ) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "concurrent_2"), - suspend_on_success=True, - ) + conn.resume_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=5, + defer_round_trip=True, + ) - # resume and commit both transactions - with test_env.get_connection() as conn: - conn.resume_sessionless_transaction(transaction_id=b"concurrent_1") - conn.commit() - conn.resume_sessionless_transaction(transaction_id=b"concurrent_2") - conn.commit() - - # verify data from both transactions is present - with test_env.get_connection() as conn: - cursor = conn.cursor() - cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] - self.assertEqual(cursor.fetchall(), expected_data) - - def test_8706(self): - "8706 - test sessionless transaction with large data" - self.cursor.execute("delete from TestAllTypes") - self.conn.commit() - - # start sessionless transaction and insert large data - large_string = "X" * 250_000 - with test_env.get_connection() as conn: - cursor = conn.cursor() - transaction_id = conn.begin_sessionless_transaction() - cursor.execute( - """ - insert into TestAllTypes (IntValue, ClobValue) - values (:1, :2) - """, - (1, large_string), - suspend_on_success=True, - ) + # suspend using suspend_on_success flag with executemany + cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [(3, "row3")], + suspend_on_success=True, + ) - # resume transaction and commit - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction(transaction_id) - conn.commit() - cursor.execute( - "select ClobValue from TestAllTypes", fetch_lobs=False - ) - (result,) = cursor.fetchone() - self.assertEqual(result, large_string) - - def test_8707(self): - "8707 - test sessionless transaction with multiple suspends/resumes" - self.cursor.execute("truncate table TestTempTable") - - # define data to insert - data = [ - (1, "first_insert"), - (2, "second_insert"), - (3, "third_insert"), - ] - - # start sessionless transaction and suspend - transaction_id = self.conn.begin_sessionless_transaction() - self.cursor.execute( + # ensure data is not visible as the transaction is suspended + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [] + + # resume the transaction and commit the changes + conn.resume_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT + ) + conn.commit() + + # verify data after commit + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert len(cursor.fetchall()) == 3 + + +def test_8701(conn, test_env): + "8701 - test sessionless transaction using server-side procedures" + base_cursor = conn.cursor() + base_cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + _get_server_start_stmt("NEW"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + + # insert data within transaction + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - data[0], + (1, "row1"), + ) + + # Suspend on server + cursor.callproc("dbms_transaction.suspend_transaction") + + # verify data is not visible after suspend + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [] + + # resume the transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + _get_server_start_stmt("RESUME"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + conn.commit() + + # verify data after commit in original connection + base_cursor.execute("SELECT IntCol, StringCol1 FROM TestTempTable") + assert len(base_cursor.fetchall()) == 2 + + +def test_8702(cursor, test_env): + "8702 - test error conditions with server API sessionless transactions" + cursor.execute("truncate table TestTempTable") + + # start a transaction via the server; verify that suspension via the + # client fails but suspension via the server succeeds + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + _get_server_start_stmt("NEW"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "server_row"), + ) + with test_env.assert_raises_full_code("DPY-3034"): + conn.suspend_sessionless_transaction() + cursor.callproc("dbms_transaction.suspend_transaction") + + # resume on a second connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( + _get_server_start_stmt("RESUME"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "server_row2"), + ) + + # resuming on a different session should fail + with test_env.get_connection() as other_conn: + other_cursor = other_conn.cursor() + with test_env.assert_raises_full_code("ORA-25351"): + other_cursor.execute( + _get_server_start_stmt("RESUME"), + { + "transaction_id": TRANSACTION_ID_SERVER, + "timeout": 2, + }, + ) + + +def test_8703(cursor, test_env): + "8703 - test rollback of sessionless transaction" + cursor.execute("truncate table TestTempTable") + + # start and work with sessionless transaction + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"rollback_test", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "rollback_row"), suspend_on_success=True, ) - # resume and insert second row - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction(transaction_id) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[1], - suspend_on_success=True, - ) + # resume in new connection and rollback + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction( + transaction_id=b"rollback_test", timeout=5 + ) + conn.rollback() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [] + + +def test_8704(cursor, test_env): + "8704 - test multiple operations within same sessionless transaction" + cursor.execute("truncate table TestTempTable") + + # start transaction and perform multiple operations + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "original"), + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "second"), + ) + cursor.execute( + """ + update TestTempTable set StringCol1 = :v1 where IntCol = 1 + """, + v1="updated", + ) + cursor.execute("delete from TestTempTable where IntCol = 2") + conn.suspend_sessionless_transaction() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [] + + # resume and commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=5 + ) + conn.commit() + cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert cursor.fetchall() == [(1, "updated")] - # resume and insert third row, then commit - with test_env.get_connection() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction(transaction_id) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[2], - ) - conn.commit() - # verify all data is present - self.cursor.execute( +def test_8705(cursor, test_env): + "8705 - test concurrent sessionless transactions" + cursor.execute("truncate table TestTempTable") + + # start first sessionless transaction + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"concurrent_1", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "concurrent_1"), + suspend_on_success=True, + ) + + # start second sessionless transaction in another connection + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.begin_sessionless_transaction( + transaction_id=b"concurrent_2", timeout=15 + ) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "concurrent_2"), + suspend_on_success=True, + ) + + # resume and commit both transactions + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(transaction_id=b"concurrent_1") + conn.commit() + conn.resume_sessionless_transaction(transaction_id=b"concurrent_2") + conn.commit() + + # verify data from both transactions is present + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute( """ select IntCol, StringCol1 from TestTempTable order by IntCol """ ) - self.assertEqual(self.cursor.fetchall(), data) + expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] + assert cursor.fetchall() == expected_data - def test_8708(self): - "8708 - Test sessionless transaction with invalid resume attempts" - self.cursor.execute("truncate table TestTempTable") - # start a sessionless transaction - transaction_id = self.conn.begin_sessionless_transaction() +def test_8706(conn, cursor, test_env): + "8706 - test sessionless transaction with large data" + cursor.execute("delete from TestAllTypes") + conn.commit() - # try to resume with the wrong transaction id - if self.conn.thin: - with self.assertRaisesFullCode("DPY-3035"): - self.conn.resume_sessionless_transaction("wrong_id") + # start sessionless transaction and insert large data + large_string = "X" * 250_000 + with test_env.get_connection() as conn: + cursor = conn.cursor() + transaction_id = conn.begin_sessionless_transaction() + cursor.execute( + """ + insert into TestAllTypes (IntValue, ClobValue) + values (:1, :2) + """, + (1, large_string), + suspend_on_success=True, + ) - # try to resume before suspend - if self.conn.thin: - with self.assertRaisesFullCode("DPY-3035"): - self.conn.resume_sessionless_transaction(transaction_id) + # resume transaction and commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + conn.commit() + cursor.execute("select ClobValue from TestAllTypes", fetch_lobs=False) + (result,) = cursor.fetchone() + assert result == large_string + + +def test_8707(conn, test_env): + "8707 - test sessionless transaction with multiple suspends/resumes" + base_cursor = conn.cursor() + base_cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [ + (1, "first_insert"), + (2, "second_insert"), + (3, "third_insert"), + ] + + # start sessionless transaction and suspend + transaction_id = conn.begin_sessionless_transaction() + base_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and insert second row + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) - # suspend and resume correctly - self.conn.suspend_sessionless_transaction() - with test_env.get_connection() as conn: + # resume and insert third row, then commit + with test_env.get_connection() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[2], + ) + conn.commit() + + # verify all data is present + base_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert base_cursor.fetchall() == data + + +def test_8708(conn, cursor, test_env): + "8708 - Test sessionless transaction with invalid resume attempts" + cursor.execute("truncate table TestTempTable") + + # start a sessionless transaction + transaction_id = conn.begin_sessionless_transaction() + + # try to resume with the wrong transaction id + if conn.thin: + with test_env.assert_raises_full_code("DPY-3035"): + conn.resume_sessionless_transaction("wrong_id") + + # try to resume before suspend + if conn.thin: + with test_env.assert_raises_full_code("DPY-3035"): conn.resume_sessionless_transaction(transaction_id) - def test_8709(self): - "8709 - test getting transaction ID of active sessionless transaction" - transaction_id = self.conn.begin_sessionless_transaction() - self.cursor.execute("select dbms_transaction.get_transaction_id()") - (server_transaction_id,) = self.cursor.fetchone() - self.assertEqual(server_transaction_id, transaction_id.hex().upper()) - self.conn.commit() + # suspend and resume correctly + conn.suspend_sessionless_transaction() + with test_env.get_connection() as other_conn: + other_conn.resume_sessionless_transaction(transaction_id) - def test_8710(self): - "8710 - test auto-generated transaction ID uniqueness" - # start first transaction - transaction_id_1 = self.conn.begin_sessionless_transaction() - self.conn.suspend_sessionless_transaction() +def test_8709(conn, cursor): + "8709 - test getting transaction ID of active sessionless transaction" + transaction_id = conn.begin_sessionless_transaction() + cursor.execute("select dbms_transaction.get_transaction_id()") + (server_transaction_id,) = cursor.fetchone() + assert server_transaction_id == transaction_id.hex().upper() + conn.commit() - # start second transaction - with test_env.get_connection() as conn: - transaction_id_2 = conn.begin_sessionless_transaction() - conn.suspend_sessionless_transaction() - self.assertNotEqual(transaction_id_1, transaction_id_2) - conn.resume_sessionless_transaction(transaction_id_2) - conn.rollback() - - # cleanup - self.conn.resume_sessionless_transaction(transaction_id_1) - self.conn.rollback() - - def test_8711(self): - "8711 - test sessionless transactions with connection pool" - self.cursor.execute("truncate table TestTempTable") - - # initialization - data = [(1, "value 1"), (2, "value 2")] - pool = test_env.get_pool(min=2, max=5) - - # start transaction on first connection - with pool.acquire() as conn: - cursor = conn.cursor() - transaction_id = conn.begin_sessionless_transaction() - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[0], - suspend_on_success=True, - ) - # resume on second connection - with pool.acquire() as conn: - cursor = conn.cursor() - conn.resume_sessionless_transaction(transaction_id) - cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[1], - ) - conn.commit() - - # verify data - with pool.acquire() as conn: - cursor = conn.cursor() - cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(cursor.fetchall(), data) +def test_8710(conn, test_env): + "8710 - test auto-generated transaction ID uniqueness" + + # start first transaction + transaction_id_1 = conn.begin_sessionless_transaction() + conn.suspend_sessionless_transaction() + + # start second transaction + with test_env.get_connection() as other_conn: + transaction_id_2 = other_conn.begin_sessionless_transaction() + other_conn.suspend_sessionless_transaction() + assert transaction_id_1 != transaction_id_2 + other_conn.resume_sessionless_transaction(transaction_id_2) + other_conn.rollback() - pool.close() + # cleanup + conn.resume_sessionless_transaction(transaction_id_1) + conn.rollback() - def test_8712(self): - "8712 - Test sessionless transaction with special transaction ids" - self.cursor.execute("truncate table TestTempTable") - # define data to insert - data = [(1, "long_transaction_id"), (2, "special_chars")] +def test_8711(cursor, test_env): + "8711 - test sessionless transactions with connection pool" + cursor.execute("truncate table TestTempTable") - # test with long transaction id - long_transaction_id = b"X" * 64 - self.conn.begin_sessionless_transaction(long_transaction_id) - self.cursor.execute( + # initialization + data = [(1, "value 1"), (2, "value 2")] + pool = test_env.get_pool(min=2, max=5) + + # start transaction on first connection + with pool.acquire() as conn: + cursor = conn.cursor() + transaction_id = conn.begin_sessionless_transaction() + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) @@ -555,105 +523,151 @@ def test_8712(self): suspend_on_success=True, ) - # resume and commit in different connection - with test_env.get_connection() as conn: - conn.resume_sessionless_transaction(long_transaction_id) - conn.commit() - - # test with special characters in transaction id - special_transaction_id = b"SPECIAL@#$%^&*()_+" - self.conn.begin_sessionless_transaction(special_transaction_id) - self.cursor.execute( + # resume on second connection + with pool.acquire() as conn: + cursor = conn.cursor() + conn.resume_sessionless_transaction(transaction_id) + cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, data[1], - suspend_on_success=True, ) + conn.commit() - # resume and commit in different connection - with test_env.get_connection() as conn: - conn.resume_sessionless_transaction(special_transaction_id) - conn.commit() - - # verify both transactions committed - self.cursor.execute( + # verify data + with pool.acquire() as conn: + cursor = conn.cursor() + cursor.execute( """ select IntCol, StringCol1 from TestTempTable order by IntCol """ ) - self.assertEqual(self.cursor.fetchall(), data) - - def test_8713(self): - "8713 - duplicate transaction id across different connections" - transaction_id = "test_8713_transaction_id" - self.conn.begin_sessionless_transaction(transaction_id) - with test_env.get_connection() as conn: - with self.assertRaisesFullCode("ORA-26217"): - conn.begin_sessionless_transaction(transaction_id) - - def test_8714(self): - "8714 - zero timeout behaviour in resume" - transaction_id = self.conn.begin_sessionless_transaction() - with test_env.get_connection() as conn: - with self.assertRaisesFullCode("ORA-25351"): - conn.resume_sessionless_transaction(transaction_id, timeout=0) - - # suspend transaction on first session, and resume will now succeed - self.conn.suspend_sessionless_transaction() - with test_env.get_connection() as conn: - conn.resume_sessionless_transaction(transaction_id, timeout=0) - conn.rollback() - - def test_8715(self): - "8715 - transaction behaviour with DDL operations" - - # create temp table - temp_table_name = "temp_test_8715" - self.cursor.execute(f"drop table if exists {temp_table_name}") - self.cursor.execute( + assert cursor.fetchall() == data + + pool.close() + + +def test_8712(conn, cursor, test_env): + "8712 - Test sessionless transaction with special transaction ids" + cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [(1, "long_transaction_id"), (2, "special_chars")] + + # test with long transaction id + long_transaction_id = b"X" * 64 + conn.begin_sessionless_transaction(long_transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and commit in different connection + with test_env.get_connection() as other_conn: + other_conn.resume_sessionless_transaction(long_transaction_id) + other_conn.commit() + + # test with special characters in transaction id + special_transaction_id = b"SPECIAL@#$%^&*()_+" + conn.begin_sessionless_transaction(special_transaction_id) + cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and commit in different connection + with test_env.get_connection() as other_conn: + other_conn.resume_sessionless_transaction(special_transaction_id) + other_conn.commit() + + # verify both transactions committed + cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert cursor.fetchall() == data + + +def test_8713(conn, test_env): + "8713 - duplicate transaction id across different connections" + transaction_id = "test_8713_transaction_id" + conn.begin_sessionless_transaction(transaction_id) + with test_env.get_connection() as conn: + with test_env.assert_raises_full_code("ORA-26217"): + conn.begin_sessionless_transaction(transaction_id) + + +def test_8714(conn, test_env): + "8714 - zero timeout behaviour in resume" + transaction_id = conn.begin_sessionless_transaction() + with test_env.get_connection() as other_conn: + with test_env.assert_raises_full_code("ORA-25351"): + other_conn.resume_sessionless_transaction( + transaction_id, timeout=0 + ) + + # suspend transaction on first session, and resume will now succeed + conn.suspend_sessionless_transaction() + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(transaction_id, timeout=0) + conn.rollback() + + +def test_8715(conn, cursor, test_env): + "8715 - transaction behaviour with DDL operations" + + # create temp table + temp_table_name = "temp_test_8715" + cursor.execute(f"drop table if exists {temp_table_name}") + cursor.execute( + f""" + create table {temp_table_name} ( + id number, + data varchar2(50) + )""" + ) + + # beging sessionless transaction and perform DDL which performs an + # implicit commit + conn.begin_sessionless_transaction() + cursor.execute(f"alter table {temp_table_name} add temp_col varchar2(20)") + + # further DML operations are part of a local transaction + local_data = (1, "LOCAL_TRANSACTION", "abc") + cursor.execute( + f"insert into {temp_table_name} values (:1, :2, :3)", + local_data, + ) + + # suspend will fail now as a local transaction is active and only + # sessionless transactions are suspendable + with test_env.assert_raises_full_code("DPY-3036"): + cursor.execute( f""" - create table {temp_table_name} ( - id number, - data varchar2(50) - )""" - ) - - # beging sessionless transaction and perform DDL which performs an - # implicit commit - self.conn.begin_sessionless_transaction() - self.cursor.execute( - f"alter table {temp_table_name} add temp_col varchar2(20)" - ) - - # further DML operations are part of a local transaction - local_data = (1, "LOCAL_TRANSACTION", "abc") - self.cursor.execute( - f"insert into {temp_table_name} values (:1, :2, :3)", - local_data, - ) - - # suspend will fail now as a local transaction is active and only - # sessionless transactions are suspendable - with self.assertRaisesFullCode("DPY-3036"): - self.cursor.execute( - f""" - insert into {temp_table_name} - values (2, 'LOCAL_TRANSACTION', 'def') - """, - suspend_on_success=True, - ) - - # verify data from local transaction is all that is present - self.cursor.execute(f"select * from {temp_table_name}") - self.assertEqual(self.cursor.fetchall(), [local_data]) - - # drop temp table - self.cursor.execute(f"drop table {temp_table_name} purge") + insert into {temp_table_name} + values (2, 'LOCAL_TRANSACTION', 'def') + """, + suspend_on_success=True, + ) + # verify data from local transaction is all that is present + cursor.execute(f"select * from {temp_table_name}") + assert cursor.fetchall() == [local_data] -if __name__ == "__main__": - test_env.run_test_cases() + # drop temp table + cursor.execute(f"drop table {temp_table_name} purge") diff --git a/tests/test_8800_sessionless_transaction_async.py b/tests/test_8800_sessionless_transaction_async.py index b88c4dec..7986fbd5 100644 --- a/tests/test_8800_sessionless_transaction_async.py +++ b/tests/test_8800_sessionless_transaction_async.py @@ -29,545 +29,498 @@ # server-side procedures with the DBMS_TRANSACTION package. # ----------------------------------------------------------------------------- -import test_env - - -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_sessionless_transactions_supported() -class TestCase(test_env.BaseAsyncTestCase): - - transaction_id_client = b"test_8800_client" - transaction_id_server = b"test_8800_server" - - def _get_server_start_stmt(self, mode): - "Generate server-side transaction start statement" - return f""" - DECLARE - transaction_id RAW(128); - BEGIN - transaction_id := DBMS_TRANSACTION.START_TRANSACTION( - :transaction_id, - DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, - :timeout, - DBMS_TRANSACTION.TRANSACTION_{mode} - ); - END;""" - - async def test_8800(self): - "8800 - test sessionless transaction using client API" - await self.cursor.execute("truncate table TestTempTable") - - # create sessionless transaction in one connection - async with test_env.get_connection_async() as conn: - - cursor = conn.cursor() - - # start sessionless transaction - await conn.begin_sessionless_transaction( - transaction_id=self.transaction_id_client, - timeout=15, - defer_round_trip=True, - ) +import pytest + +TRANSACTION_ID_CLIENT = b"test_8800_client" +TRANSACTION_ID_SERVER = b"test_8800_server" + + +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, + skip_unless_thin_mode, + skip_unless_sessionless_transactions_supported, +): + pass + + +def _get_server_start_stmt(mode): + "Generate server-side transaction start statement" + return f""" + DECLARE + transaction_id RAW(128); + BEGIN + transaction_id := DBMS_TRANSACTION.START_TRANSACTION( + :transaction_id, + DBMS_TRANSACTION.TRANSACTION_TYPE_SESSIONLESS, + :timeout, + DBMS_TRANSACTION.TRANSACTION_{mode} + ); + END;""" + + +async def test_8800(async_cursor, test_env): + "8800 - test sessionless transaction using client API" + await async_cursor.execute("truncate table TestTempTable") + + # create sessionless transaction in one connection + async with test_env.get_connection_async() as conn: + + cursor = conn.cursor() + + # start sessionless transaction + await conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=15, + defer_round_trip=True, + ) - # insert data within transaction - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "row1"), - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "row2"), - ) + # insert data within transaction + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) - # suspend the sessionless transaction - await conn.suspend_sessionless_transaction() + # suspend the sessionless transaction + await conn.suspend_sessionless_transaction() - # ensure data is not visible outside transaction - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), []) + # ensure data is not visible outside transaction + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [] - # resume the transaction in another connection - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() + # resume the transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() - await conn.resume_sessionless_transaction( - transaction_id=self.transaction_id_client, - timeout=5, - defer_round_trip=True, - ) + await conn.resume_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=5, + defer_round_trip=True, + ) - # suspend using suspend_on_success flag with executemany - await cursor.executemany( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - [(3, "row3")], - suspend_on_success=True, - ) + # suspend using suspend_on_success flag with executemany + await cursor.executemany( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + [(3, "row3")], + suspend_on_success=True, + ) - # ensure data is not visible as the transaction is suspended - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), []) + # ensure data is not visible as the transaction is suspended + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [] - # resume the transaction and commit the changes - await conn.resume_sessionless_transaction( - transaction_id=self.transaction_id_client - ) - await conn.commit() + # resume the transaction and commit the changes + await conn.resume_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT + ) + await conn.commit() - # verify data after commit - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(len(await cursor.fetchall()), 3) - - async def test_8801(self): - "8801 - test sessionless transaction using server-side procedures" - await self.cursor.execute("truncate table TestTempTable") - - # create sessionless transaction in one connection - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - self._get_server_start_stmt("NEW"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) + # verify data after commit + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert len(await cursor.fetchall()) == 3 - # insert data within transaction - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "row1"), - ) - # Suspend on server - await cursor.callproc("dbms_transaction.suspend_transaction") +async def test_8801(async_cursor, test_env): + "8801 - test sessionless transaction using server-side procedures" + await async_cursor.execute("truncate table TestTempTable") - # verify data is not visible after suspend - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), []) - - # resume the transaction in another connection - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - self._get_server_start_stmt("RESUME"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "row2"), - ) - await conn.commit() + # create sessionless transaction in one connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + _get_server_start_stmt("NEW"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) - # verify data after commit in original connection - await self.cursor.execute( - "SELECT IntCol, StringCol1 FROM TestTempTable" + # insert data within transaction + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "row1"), ) - self.assertEqual(len(await self.cursor.fetchall()), 2) - async def test_8802(self): - "8802 - test error conditions with server API sessionless transactions" - await self.cursor.execute("truncate table TestTempTable") + # Suspend on server + await cursor.callproc("dbms_transaction.suspend_transaction") - # start a transaction via the server; verify that suspension via the - # client fails but suspension via the server succeeds - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - self._get_server_start_stmt("NEW"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "server_row"), - ) - with self.assertRaisesFullCode("DPY-3034"): - await conn.suspend_sessionless_transaction() - await cursor.callproc("dbms_transaction.suspend_transaction") - - # resume on a second connection - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - self._get_server_start_stmt("RESUME"), - {"transaction_id": self.transaction_id_server, "timeout": 5}, - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "server_row2"), - ) - - # resuming on a different session should fail - async with test_env.get_connection_async() as other_conn: - other_cursor = other_conn.cursor() - with self.assertRaisesFullCode("ORA-25351"): - await other_cursor.execute( - self._get_server_start_stmt("RESUME"), - { - "transaction_id": self.transaction_id_server, - "timeout": 2, - }, - ) - - async def test_8803(self): - "8803 - test rollback of sessionless transaction" - await self.cursor.execute("truncate table TestTempTable") - - # start and work with sessionless transaction - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.begin_sessionless_transaction( - transaction_id=b"rollback_test", timeout=15 - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "rollback_row"), - suspend_on_success=True, - ) + # verify data is not visible after suspend + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [] - # resume in new connection and rollback - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction( - transaction_id=b"rollback_test", timeout=5 - ) - await conn.rollback() - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), []) + # resume the transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + _get_server_start_stmt("RESUME"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "row2"), + ) + await conn.commit() - async def test_8804(self): - "8804 - test multiple operations within same sessionless transaction" - await self.cursor.execute("truncate table TestTempTable") + # verify data after commit in original connection + await async_cursor.execute("SELECT IntCol, StringCol1 FROM TestTempTable") + assert len(await async_cursor.fetchall()) == 2 - # start transaction and perform multiple operations - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.begin_sessionless_transaction( - transaction_id=b"multi_ops_test", timeout=15 - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "original"), - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "second"), - ) - await cursor.execute( - """ - update TestTempTable set StringCol1 = :v1 where IntCol = 1 - """, - v1="updated", - ) - await cursor.execute("delete from TestTempTable where IntCol = 2") - await conn.suspend_sessionless_transaction() - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), []) - # resume and commit - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction( - transaction_id=b"multi_ops_test", timeout=5 - ) - await conn.commit() - await cursor.execute( - "select IntCol, StringCol1 from TestTempTable" - ) - self.assertEqual(await cursor.fetchall(), [(1, "updated")]) +async def test_8802(async_cursor, test_env): + "8802 - test error conditions with server API sessionless transactions" + await async_cursor.execute("truncate table TestTempTable") - async def test_8805(self): - "8805 - test concurrent sessionless transactions" - await self.cursor.execute("truncate table TestTempTable") + # start a transaction via the server; verify that suspension via the + # client fails but suspension via the server succeeds + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + _get_server_start_stmt("NEW"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "server_row"), + ) + with test_env.assert_raises_full_code("DPY-3034"): + await conn.suspend_sessionless_transaction() + await cursor.callproc("dbms_transaction.suspend_transaction") + + # resume on a second connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + _get_server_start_stmt("RESUME"), + {"transaction_id": TRANSACTION_ID_SERVER, "timeout": 5}, + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "server_row2"), + ) - # start first sessionless transaction - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.begin_sessionless_transaction( - transaction_id=b"concurrent_1", timeout=15 - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (1, "concurrent_1"), - suspend_on_success=True, - ) + # resuming on a different session should fail + async with test_env.get_connection_async() as other_conn: + other_cursor = other_conn.cursor() + with test_env.assert_raises_full_code("ORA-25351"): + await other_cursor.execute( + _get_server_start_stmt("RESUME"), + { + "transaction_id": TRANSACTION_ID_SERVER, + "timeout": 2, + }, + ) - # start second sessionless transaction in another connection - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.begin_sessionless_transaction( - transaction_id=b"concurrent_2", timeout=15 - ) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - (2, "concurrent_2"), - suspend_on_success=True, - ) - # resume and commit both transactions - async with test_env.get_connection_async() as conn: - await conn.resume_sessionless_transaction( - transaction_id=b"concurrent_1" - ) - await conn.commit() - await conn.resume_sessionless_transaction( - transaction_id=b"concurrent_2" - ) - await conn.commit() - - # verify data from both transactions is present - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] - self.assertEqual(await cursor.fetchall(), expected_data) - - async def test_8806(self): - "8806 - test sessionless transaction with large data" - await self.cursor.execute("delete from TestAllTypes") - await self.conn.commit() - - # start sessionless transaction and insert large data - large_string = "X" * 250_000 - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - transaction_id = await conn.begin_sessionless_transaction() - await cursor.execute( - """ - insert into TestAllTypes (IntValue, ClobValue) - values (:1, :2) - """, - (1, large_string), - suspend_on_success=True, - ) +async def test_8803(async_cursor, test_env): + "8803 - test rollback of sessionless transaction" + await async_cursor.execute("truncate table TestTempTable") - # resume transaction and commit - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction(transaction_id) - await conn.commit() - await cursor.execute( - "select ClobValue from TestAllTypes", fetch_lobs=False - ) - (result,) = await cursor.fetchone() - self.assertEqual(result, large_string) - - async def test_8807(self): - "8807 - test sessionless transaction with multiple suspends/resumes" - await self.cursor.execute("truncate table TestTempTable") - - # define data to insert - data = [ - (1, "first_insert"), - (2, "second_insert"), - (3, "third_insert"), - ] - - # start sessionless transaction and suspend - transaction_id = await self.conn.begin_sessionless_transaction() - await self.cursor.execute( + # start and work with sessionless transaction + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"rollback_test", timeout=15 + ) + await cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, - data[0], + (1, "rollback_row"), suspend_on_success=True, ) - # resume and insert second row - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction(transaction_id) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[1], - suspend_on_success=True, - ) + # resume in new connection and rollback + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction( + transaction_id=b"rollback_test", timeout=5 + ) + await conn.rollback() + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [] - # resume and insert third row, then commit - async with test_env.get_connection_async() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction(transaction_id) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[2], - ) - await conn.commit() - # verify all data is present - await self.cursor.execute( +async def test_8804(async_cursor, test_env): + "8804 - test multiple operations within same sessionless transaction" + await async_cursor.execute("truncate table TestTempTable") + + # start transaction and perform multiple operations + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=15 + ) + await cursor.execute( """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "original"), + ) + await cursor.execute( """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "second"), ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_8808(self): - "8808 - Test sessionless transaction with invalid resume attempts" - await self.cursor.execute("truncate table TestTempTable") - - # start a sessionless transaction - transaction_id = await self.conn.begin_sessionless_transaction() - - # try to resume with the wrong transaction id - with self.assertRaisesFullCode("DPY-3035"): - await self.conn.resume_sessionless_transaction("wrong_id") + await cursor.execute( + """ + update TestTempTable set StringCol1 = :v1 where IntCol = 1 + """, + v1="updated", + ) + await cursor.execute("delete from TestTempTable where IntCol = 2") + await conn.suspend_sessionless_transaction() + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [] + + # resume and commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction( + transaction_id=b"multi_ops_test", timeout=5 + ) + await conn.commit() + await cursor.execute("select IntCol, StringCol1 from TestTempTable") + assert await cursor.fetchall() == [(1, "updated")] - # try to resume before suspend - with self.assertRaisesFullCode("DPY-3035"): - await self.conn.resume_sessionless_transaction(transaction_id) - # suspend and resume correctly - await self.conn.suspend_sessionless_transaction() - async with test_env.get_connection_async() as conn: - await conn.resume_sessionless_transaction(transaction_id) +async def test_8805(async_cursor, test_env): + "8805 - test concurrent sessionless transactions" + await async_cursor.execute("truncate table TestTempTable") - async def test_8809(self): - "8809 - test getting transaction ID of active sessionless transaction" - transaction_id = await self.conn.begin_sessionless_transaction() - await self.cursor.execute( - "select dbms_transaction.get_transaction_id()" + # start first sessionless transaction + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"concurrent_1", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (1, "concurrent_1"), + suspend_on_success=True, ) - (server_transaction_id,) = await self.cursor.fetchone() - self.assertEqual(server_transaction_id, transaction_id.hex().upper()) - await self.conn.commit() - async def test_8810(self): - "8810 - test auto-generated transaction ID uniqueness" + # start second sessionless transaction in another connection + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.begin_sessionless_transaction( + transaction_id=b"concurrent_2", timeout=15 + ) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + (2, "concurrent_2"), + suspend_on_success=True, + ) - # start first transaction - transaction_id_1 = await self.conn.begin_sessionless_transaction() - await self.conn.suspend_sessionless_transaction() + # resume and commit both transactions + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction( + transaction_id=b"concurrent_1" + ) + await conn.commit() + await conn.resume_sessionless_transaction( + transaction_id=b"concurrent_2" + ) + await conn.commit() - # start second transaction - async with test_env.get_connection_async() as conn: - transaction_id_2 = await conn.begin_sessionless_transaction() - await conn.suspend_sessionless_transaction() - self.assertNotEqual(transaction_id_1, transaction_id_2) - await conn.resume_sessionless_transaction(transaction_id_2) - await conn.rollback() - - # cleanup - await self.conn.resume_sessionless_transaction(transaction_id_1) - await self.conn.rollback() - - async def test_8811(self): - "8811 - test sessionless transactions with connection pool" - await self.cursor.execute("truncate table TestTempTable") - - # initialization - data = [(1, "value 1"), (2, "value 2")] - pool = test_env.get_pool_async(min=2, max=5) - - # start transaction on first connection - async with pool.acquire() as conn: - cursor = conn.cursor() - transaction_id = await conn.begin_sessionless_transaction() - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[0], - suspend_on_success=True, - ) + # verify data from both transactions is present + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + expected_data = [(1, "concurrent_1"), (2, "concurrent_2")] + assert await cursor.fetchall() == expected_data - # resume on second connection - async with pool.acquire() as conn: - cursor = conn.cursor() - await conn.resume_sessionless_transaction(transaction_id) - await cursor.execute( - """ - insert into TestTempTable (IntCol, StringCol1) - values (:1, :2) - """, - data[1], - ) - await conn.commit() - - # verify data - async with pool.acquire() as conn: - cursor = conn.cursor() - await cursor.execute( - """ - select IntCol, StringCol1 - from TestTempTable - order by IntCol - """ - ) - self.assertEqual(await cursor.fetchall(), data) - await pool.close() +async def test_8806(async_conn, async_cursor, test_env): + "8806 - test sessionless transaction with large data" + await async_cursor.execute("delete from TestAllTypes") + await async_conn.commit() - async def test_8812(self): - "8812 - Test sessionless transaction with special transaction ids" - await self.cursor.execute("truncate table TestTempTable") + # start sessionless transaction and insert large data + large_string = "X" * 250_000 + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + transaction_id = await conn.begin_sessionless_transaction() + await cursor.execute( + """ + insert into TestAllTypes (IntValue, ClobValue) + values (:1, :2) + """, + (1, large_string), + suspend_on_success=True, + ) - # define data to insert - data = [(1, "long_transaction_id"), (2, "special_chars")] + # resume transaction and commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await conn.commit() + await cursor.execute( + "select ClobValue from TestAllTypes", fetch_lobs=False + ) + (result,) = await cursor.fetchone() + assert result == large_string + + +async def test_8807(async_conn, async_cursor, test_env): + "8807 - test sessionless transaction with multiple suspends/resumes" + await async_cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [ + (1, "first_insert"), + (2, "second_insert"), + (3, "third_insert"), + ] + + # start sessionless transaction and suspend + transaction_id = await async_conn.begin_sessionless_transaction() + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and insert second row + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) - # test with long transaction id - long_transaction_id = b"X" * 64 - await self.conn.begin_sessionless_transaction(long_transaction_id) - await self.cursor.execute( + # resume and insert third row, then commit + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[2], + ) + await conn.commit() + + # verify all data is present + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert await async_cursor.fetchall() == data + + +async def test_8808(async_conn, async_cursor, test_env): + "8808 - Test sessionless transaction with invalid resume attempts" + await async_cursor.execute("truncate table TestTempTable") + + # start a sessionless transaction + transaction_id = await async_conn.begin_sessionless_transaction() + + # try to resume with the wrong transaction id + with test_env.assert_raises_full_code("DPY-3035"): + await async_conn.resume_sessionless_transaction("wrong_id") + + # try to resume before suspend + with test_env.assert_raises_full_code("DPY-3035"): + await async_conn.resume_sessionless_transaction(transaction_id) + + # suspend and resume correctly + await async_conn.suspend_sessionless_transaction() + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(transaction_id) + + +async def test_8809(async_conn, async_cursor): + "8809 - test getting transaction ID of active sessionless transaction" + transaction_id = await async_conn.begin_sessionless_transaction() + await async_cursor.execute("select dbms_transaction.get_transaction_id()") + (server_transaction_id,) = await async_cursor.fetchone() + assert server_transaction_id == transaction_id.hex().upper() + await async_conn.commit() + + +async def test_8810(async_conn, test_env): + "8810 - test auto-generated transaction ID uniqueness" + + # start first transaction + transaction_id_1 = await async_conn.begin_sessionless_transaction() + await async_conn.suspend_sessionless_transaction() + + # start second transaction + async with test_env.get_connection_async() as conn: + transaction_id_2 = await conn.begin_sessionless_transaction() + await conn.suspend_sessionless_transaction() + assert transaction_id_1 != transaction_id_2 + await conn.resume_sessionless_transaction(transaction_id_2) + await conn.rollback() + + # cleanup + await async_conn.resume_sessionless_transaction(transaction_id_1) + await async_conn.rollback() + + +async def test_8811(async_cursor, test_env): + "8811 - test sessionless transactions with connection pool" + await async_cursor.execute("truncate table TestTempTable") + + # initialization + data = [(1, "value 1"), (2, "value 2")] + pool = test_env.get_pool_async(min=2, max=5) + + # start transaction on first connection + async with pool.acquire() as conn: + cursor = conn.cursor() + transaction_id = await conn.begin_sessionless_transaction() + await cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) @@ -576,109 +529,153 @@ async def test_8812(self): suspend_on_success=True, ) - # resume and commit in different connection - async with test_env.get_connection_async() as conn: - await conn.resume_sessionless_transaction(long_transaction_id) - await conn.commit() - - # test with special characters in transaction id - special_transaction_id = b"SPECIAL@#$%^&*()_+" - await self.conn.begin_sessionless_transaction(special_transaction_id) - await self.cursor.execute( + # resume on second connection + async with pool.acquire() as conn: + cursor = conn.cursor() + await conn.resume_sessionless_transaction(transaction_id) + await cursor.execute( """ insert into TestTempTable (IntCol, StringCol1) values (:1, :2) """, data[1], - suspend_on_success=True, ) + await conn.commit() - # resume and commit in different connection - async with test_env.get_connection_async() as conn: - await conn.resume_sessionless_transaction(special_transaction_id) - await conn.commit() - - # verify both transactions committed - await self.cursor.execute( + # verify data + async with pool.acquire() as conn: + cursor = conn.cursor() + await cursor.execute( """ select IntCol, StringCol1 from TestTempTable order by IntCol """ ) - self.assertEqual(await self.cursor.fetchall(), data) - - async def test_8813(self): - "8813 - duplicate transaction id across different connections" - transaction_id = "test_8813_transaction_id" - await self.conn.begin_sessionless_transaction(transaction_id) - async with test_env.get_connection_async() as conn: - with self.assertRaisesFullCode("ORA-26217"): - await conn.begin_sessionless_transaction(transaction_id) - - async def test_8814(self): - "8814 - zero timeout behaviour in resume" - transaction_id = await self.conn.begin_sessionless_transaction() - async with test_env.get_connection_async() as conn: - with self.assertRaisesFullCode("ORA-25351"): - await conn.resume_sessionless_transaction( - transaction_id, timeout=0 - ) - - # suspend transaction on first session, and resume will now succeed - await self.conn.suspend_sessionless_transaction() - async with test_env.get_connection_async() as conn: + assert await cursor.fetchall() == data + + await pool.close() + + +async def test_8812(async_conn, async_cursor, test_env): + "8812 - Test sessionless transaction with special transaction ids" + await async_cursor.execute("truncate table TestTempTable") + + # define data to insert + data = [(1, "long_transaction_id"), (2, "special_chars")] + + # test with long transaction id + long_transaction_id = b"X" * 64 + await async_conn.begin_sessionless_transaction(long_transaction_id) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[0], + suspend_on_success=True, + ) + + # resume and commit in different connection + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(long_transaction_id) + await conn.commit() + + # test with special characters in transaction id + special_transaction_id = b"SPECIAL@#$%^&*()_+" + await async_conn.begin_sessionless_transaction(special_transaction_id) + await async_cursor.execute( + """ + insert into TestTempTable (IntCol, StringCol1) + values (:1, :2) + """, + data[1], + suspend_on_success=True, + ) + + # resume and commit in different connection + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(special_transaction_id) + await conn.commit() + + # verify both transactions committed + await async_cursor.execute( + """ + select IntCol, StringCol1 + from TestTempTable + order by IntCol + """ + ) + assert await async_cursor.fetchall() == data + + +async def test_8813(async_conn, test_env): + "8813 - duplicate transaction id across different connections" + transaction_id = "test_8813_transaction_id" + await async_conn.begin_sessionless_transaction(transaction_id) + async with test_env.get_connection_async() as conn: + with test_env.assert_raises_full_code("ORA-26217"): + await conn.begin_sessionless_transaction(transaction_id) + + +async def test_8814(async_conn, test_env): + "8814 - zero timeout behaviour in resume" + transaction_id = await async_conn.begin_sessionless_transaction() + async with test_env.get_connection_async() as conn: + with test_env.assert_raises_full_code("ORA-25351"): await conn.resume_sessionless_transaction( transaction_id, timeout=0 ) - await conn.rollback() - async def test_8815(self): - "8815 - transaction behaviour with DDL operations" - - # create temp table - temp_table_name = "temp_test_8815" - await self.cursor.execute(f"drop table if exists {temp_table_name}") - await self.cursor.execute( + # suspend transaction on first session, and resume will now succeed + await async_conn.suspend_sessionless_transaction() + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(transaction_id, timeout=0) + await conn.rollback() + + +async def test_8815(async_conn, async_cursor, test_env): + "8815 - transaction behaviour with DDL operations" + + # create temp table + temp_table_name = "temp_test_8815" + await async_cursor.execute(f"drop table if exists {temp_table_name}") + await async_cursor.execute( + f""" + create table {temp_table_name} ( + id number, + data varchar2(50) + )""" + ) + + # beging sessionless transaction and perform DDL which performs an + # implicit commit + await async_conn.begin_sessionless_transaction() + await async_cursor.execute( + f"alter table {temp_table_name} add temp_col varchar2(20)" + ) + + # further DML operations are part of a local transaction + local_data = (1, "LOCAL_TRANSACTION", "abc") + await async_cursor.execute( + f"insert into {temp_table_name} values (:1, :2, :3)", + local_data, + ) + + # suspend will fail now as a local transaction is active and only + # sessionless transactions are suspendable + with test_env.assert_raises_full_code("DPY-3036"): + await async_cursor.execute( f""" - create table {temp_table_name} ( - id number, - data varchar2(50) - )""" - ) - - # beging sessionless transaction and perform DDL which performs an - # implicit commit - await self.conn.begin_sessionless_transaction() - await self.cursor.execute( - f"alter table {temp_table_name} add temp_col varchar2(20)" - ) - - # further DML operations are part of a local transaction - local_data = (1, "LOCAL_TRANSACTION", "abc") - await self.cursor.execute( - f"insert into {temp_table_name} values (:1, :2, :3)", - local_data, - ) - - # suspend will fail now as a local transaction is active and only - # sessionless transactions are suspendable - with self.assertRaisesFullCode("DPY-3036"): - await self.cursor.execute( - f""" - insert into {temp_table_name} - values (2, 'LOCAL_TRANSACTION', 'def') - """, - suspend_on_success=True, - ) - - # verify data from local transaction is all that is present - await self.cursor.execute(f"select * from {temp_table_name}") - self.assertEqual(await self.cursor.fetchall(), [local_data]) - - # drop temp table - await self.cursor.execute(f"drop table {temp_table_name} purge") + insert into {temp_table_name} + values (2, 'LOCAL_TRANSACTION', 'def') + """, + suspend_on_success=True, + ) + # verify data from local transaction is all that is present + await async_cursor.execute(f"select * from {temp_table_name}") + assert await async_cursor.fetchall() == [local_data] -if __name__ == "__main__": - test_env.run_test_cases() + # drop temp table + await async_cursor.execute(f"drop table {temp_table_name} purge") diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index ddefbace..a838f658 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -30,8 +30,7 @@ import decimal import pyarrow - -import test_env +import pytest SPARSE_VECTOR_FIELDS_FLOAT32 = [ ("num_dimensions", pyarrow.int64()), @@ -52,573 +51,336 @@ ] -class TestCase(test_env.BaseTestCase): - - def test_8900(self): - "8900 - test basic ingestion of data frame" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1), - datetime.datetime(2021, 2, 2), - datetime.datetime(2022, 3, 3), - ], - pyarrow.timestamp("s"), - ), - ] - names = ["Id", "FirstName", "Salary", "DateOfBirth"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( +@pytest.fixture +def empty_tab(cursor): + cursor.execute("delete from TestDataFrame") + + +def test_8900(conn, cursor, empty_tab): + "8900 - test basic ingestion of data frame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + datetime.datetime(2021, 2, 2), + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8901(conn, cursor, empty_tab): + "8901 - test ingestion with null values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", None, "Bob"], pyarrow.string()), + pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + None, + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8902(conn, cursor, empty_tab): + "8902 - test ingestion with single column" + arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] + names = ["Id"] + df = pyarrow.table(arrays, names) + cursor.executemany("insert into TestDataFrame (Id) values (:1)", df) + conn.commit() + odf = conn.fetch_df_all( + """ + select Id as "Id" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8903(conn, cursor, empty_tab): + "8903 - test ingestion with large data types" + long_str = "X" * 32_768 + long_raw = b"Y" * 32_768 + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([long_str], pyarrow.large_string()), + pyarrow.array([long_raw], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8904(conn, cursor, empty_tab): + "8904 - test ingestion with decimal values" + arrays = [ + pyarrow.array( + [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + pyarrow.decimal128(9, 0), + ), + pyarrow.array( + [ + decimal.Decimal("1234567890.1234"), + decimal.Decimal("-9876543210.9876"), + decimal.Decimal("0.0001"), + ], + pyarrow.decimal128(15, 4), + ), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame + (Id, DecimalData) + values (:1, :2) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8905(skip_unless_native_boolean_supported, conn, cursor): + "8905 - test ingestion with boolean values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array([True, False, True], pyarrow.bool_()), + pyarrow.array([False, True, None], pyarrow.bool_()), + ] + names = ["IntCol", "BooleanCol1", "BooleanCol2"] + df = pyarrow.table(arrays, names) + cursor.execute("truncate table TestBooleans") + cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + IntCol as "IntCol", + BooleanCol1 as "BooleanCol1", + BooleanCol2 as "BooleanCol2" + from TestBooleans + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8906(conn, cursor, empty_tab): + "8906 - test ingestion with timestamp values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1, 0, 0, 0), + datetime.datetime(2021, 2, 2, 12, 34, 56), + datetime.datetime(2022, 3, 3, 23, 59, 59), + ], + pyarrow.timestamp("us"), + ), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame + (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8907(cursor, test_env): + "8907 - test ingestion with mismatched column count" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + ] + names = ["ID", "NAME"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-4009", "ORA-01008"): + cursor.executemany( """ - insert into TestDataFrame - (Id, FirstName, Salary, DateOfBirth) - values (:1, :2, :3, :4) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary", - DateOfBirth as "DateOfBirth" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8901(self): - "8901 - test ingestion with null values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", None, "Bob"], pyarrow.string()), - pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1), - None, - datetime.datetime(2022, 3, 3), - ], - pyarrow.timestamp("s"), - ), - ] - names = ["Id", "FirstName", "Salary", "DateOfBirth"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame - (Id, FirstName, Salary, DateOfBirth) - values (:1, :2, :3, :4) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary", - DateOfBirth as "DateOfBirth" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8902(self): - "8902 - test ingestion with single column" - arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] - names = ["Id"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - "insert into TestDataFrame (Id) values (:1)", df - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select Id as "Id" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8903(self): - "8903 - test ingestion with large data types" - long_str = "X" * 32_768 - long_raw = b"Y" * 32_768 - arrays = [ - pyarrow.array([1], pyarrow.int64()), - pyarrow.array([long_str], pyarrow.large_string()), - pyarrow.array([long_raw], pyarrow.large_binary()), - ] - names = ["Id", "LongData", "LongRawData"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongData, LongRawData) + insert into TestDataFrame (Id, FirstName, Salary) values (:1, :2, :3) """, df, ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - LongData as "LongData", - LongRawData as "LongRawData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - def test_8904(self): - "8904 - test ingestion with decimal values" - arrays = [ - pyarrow.array( - [ - decimal.Decimal("1"), - decimal.Decimal("2"), - decimal.Decimal("3"), - ], - pyarrow.decimal128(9, 0), - ), - pyarrow.array( - [ - decimal.Decimal("1234567890.1234"), - decimal.Decimal("-9876543210.9876"), - decimal.Decimal("0.0001"), - ], - pyarrow.decimal128(15, 4), - ), - ] - names = ["Id", "DecimalData"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame - (Id, DecimalData) - values (:1, :2) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """, - fetch_decimals=True, - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_native_boolean_supported() - def test_8905(self): - "8905 - test ingestion with boolean values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array([True, False, True], pyarrow.bool_()), - pyarrow.array([False, True, None], pyarrow.bool_()), - ] - names = ["IntCol", "BooleanCol1", "BooleanCol2"] - df = pyarrow.table(arrays, names) - self.cursor.execute("truncate table TestBooleans") - self.cursor.executemany( - """ - insert into TestBooleans - (IntCol, BooleanCol1, BooleanCol2) - values (:1, :2, :3) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - IntCol as "IntCol", - BooleanCol1 as "BooleanCol1", - BooleanCol2 as "BooleanCol2" - from TestBooleans - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - def test_8906(self): - "8906 - test ingestion with timestamp values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1, 0, 0, 0), - datetime.datetime(2021, 2, 2, 12, 34, 56), - datetime.datetime(2022, 3, 3, 23, 59, 59), - ], - pyarrow.timestamp("us"), - ), - ] - names = ["Id", "LastUpdated"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame - (Id, LastUpdated) - values (:1, :2) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - LastUpdated as "LastUpdated" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8907(self): - "8907 - test ingestion with mismatched column count" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - ] - names = ["ID", "NAME"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): - self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName, Salary) - values (:1, :2, :3) - """, - df, - ) - - def test_8908(self): - "8908 - test ingestion with invalid data type" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [["a", "b"], ["c"], ["d", "e", "f"]], - pyarrow.list_(pyarrow.string()), - ), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-3033"): - self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName) - values (:1, :2) - """, - df, - ) - - def test_8909(self): - "8909 - test execute() with DataFrame" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-2003"): - self.cursor.execute( - """ - insert into TestDataFrame (Id, FirstName) - values (:1, :2) - """, - df, - ) - - def test_8910(self): - "8910 - test consecutive executemany() calls with same dataframe" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), - ] - names = ["Id", "FirstName", "Salary"] - df = pyarrow.table(arrays, names) - for i in range(3): - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName, Salary) - values (:1, :2, :3) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8911(self): - "8911 - test nulls/None for all datatypes" - arrays = [ - pyarrow.array([1], pyarrow.int64()), - pyarrow.array([None], pyarrow.float32()), - pyarrow.array([None], pyarrow.float64()), - pyarrow.array([None], pyarrow.string()), - pyarrow.array([None], pyarrow.timestamp("s")), - pyarrow.array([None], pyarrow.binary()), - ] - names = [ - "Id", - "FloatData", - "DoubleData", - "FirstName", - "DateOfBirth", - "RawData", - ] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - bind_names = ",".join(names) - bind_values = ",".join(f":{i + 1}" for i in range(len(names))) - self.cursor.executemany( - f""" - insert into TestDataFrame ({bind_names}) - values ({bind_values}) - """, - df, - ) - self.conn.commit() - query_values = ",".join(f'{name} as "{name}"' for name in names) - odf = self.conn.fetch_df_all( - f""" - select {query_values} - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8912(self): - "8912 - test LOB sizes around 32K boundary using DataFrame ingestion" - test_sizes = [32766, 32767, 32768, 32769, 32770] - arrays = [ - pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), - pyarrow.array( - ["X" * s for s in test_sizes], pyarrow.large_string() - ), - pyarrow.array( - [b"Y" * s for s in test_sizes], pyarrow.large_binary() - ), - ] - names = ["Id", "LongData", "LongRawData"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongData, LongRawData) - values (:1, :2, :3) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - LongData as "LongData", - LongRawData as "LongRawData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8913(self): - "8913 - test ingestion with mixed characters using DataFrame" - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - - test_data = [ - "ASCII: Hello World", # Pure ASCII - "Latin: café España", # Latin-1 Supplement - "Cyrillic: русский текст", # Actual Cyrillic - "Chinese: 中文测试", # Actual Chinese - "Emoji: 👍😊❤️", # Emojis - "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols - "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters - ] - arrays = [ - pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), - pyarrow.array(test_data, pyarrow.string()), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( +def test_8908(cursor, test_env): + "8908 - test ingestion with invalid data type" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [["a", "b"], ["c"], ["d", "e", "f"]], + pyarrow.list_(pyarrow.string()), + ), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-3033"): + cursor.executemany( """ insert into TestDataFrame (Id, FirstName) values (:1, :2) """, df, ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8914(self): - "8914 - test various numeric values" - test_data = [ - decimal.Decimal(0), - decimal.Decimal(1), - decimal.Decimal(-1), - decimal.Decimal("99999999999.9999"), - decimal.Decimal("-99999999999.9999"), - decimal.Decimal("10000000000.0001"), - decimal.Decimal("-10000000000.0001"), - decimal.Decimal(".0001"), - decimal.Decimal("-.0001"), - decimal.Decimal(".9"), - decimal.Decimal("-.9"), - decimal.Decimal(".09"), - decimal.Decimal("-.09"), - decimal.Decimal(".009"), - decimal.Decimal("-.009"), - ] - ids = [decimal.Decimal(i) for i in range(len(test_data))] - arrays = [ - pyarrow.array(ids, pyarrow.decimal128(9, 0)), - pyarrow.array(test_data, pyarrow.decimal128(15, 4)), - ] - names = ["Id", "DecimalData"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, DecimalData) - values (:1, :2) - """, - df, - ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """, - fetch_decimals=True, - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8915(self): - "8915 - test various timestamp values" - test_data = [ - datetime.datetime(2056, 2, 29), - datetime.datetime(2020, 2, 29), - datetime.datetime(1900, 1, 1), - datetime.datetime(2000, 1, 1), - datetime.datetime(1970, 1, 1), - datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), - datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), - datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), - ] - ids = list(range(len(test_data))) - arrays = [ - pyarrow.array(ids, pyarrow.int64()), - pyarrow.array(test_data, pyarrow.timestamp("us")), - ] - names = ["Id", "LastUpdated"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( + + +def test_8909(cursor, test_env): + "8909 - test execute() with DataFrame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-2003"): + cursor.execute( """ - insert into TestDataFrame (Id, LastUpdated) + insert into TestDataFrame (Id, FirstName) values (:1, :2) """, df, ) - self.conn.commit() - odf = self.conn.fetch_df_all( - """ - select - Id as "Id", - LastUpdated as "LastUpdated" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8916(self): - "8916 - test insertion with large data volumes" - num_rows = 10_000 - ids = list(range(1, num_rows + 1)) - names = [f"Employee-{i}" for i in ids] - salaries = [i * 100.25 for i in ids] - arrays = [ - pyarrow.array(ids, pyarrow.int64()), - pyarrow.array(names, pyarrow.string()), - pyarrow.array(salaries, pyarrow.float64()), - ] - names = ["Id", "FirstName", "Salary"] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( + + +def test_8910(conn, cursor): + "8910 - test consecutive executemany() calls with same dataframe" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + for i in range(3): + cursor.execute("delete from TestDataFrame") + cursor.executemany( """ insert into TestDataFrame (Id, FirstName, Salary) values (:1, :2, :3) """, df, ) - self.conn.commit() - odf = self.conn.fetch_df_all( + conn.commit() + odf = conn.fetch_df_all( """ select Id as "Id", @@ -629,322 +391,549 @@ def test_8916(self): """ ) fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_8917(self): - "8917 - test ingestion of sparse vectors" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [ - None, - dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), - dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), - ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), - ), - pyarrow.array( - [ - dict( - num_dimensions=16, indices=[1, 3], values=[1.1, -1.1] - ), - None, - dict( - num_dimensions=16, indices=[5, 10], values=[2.2, -2.2] - ), - ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), - ), - pyarrow.array( + assert fetched_df.equals(df) + + +def test_8911(conn, cursor, empty_tab): + "8911 - test nulls/None for all datatypes" + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([None], pyarrow.float32()), + pyarrow.array([None], pyarrow.float64()), + pyarrow.array([None], pyarrow.string()), + pyarrow.array([None], pyarrow.timestamp("s")), + pyarrow.array([None], pyarrow.binary()), + ] + names = [ + "Id", + "FloatData", + "DoubleData", + "FirstName", + "DateOfBirth", + "RawData", + ] + df = pyarrow.table(arrays, names) + bind_names = ",".join(names) + bind_values = ",".join(f":{i + 1}" for i in range(len(names))) + cursor.executemany( + f""" + insert into TestDataFrame ({bind_names}) + values ({bind_values}) + """, + df, + ) + conn.commit() + query_values = ",".join(f'{name} as "{name}"' for name in names) + odf = conn.fetch_df_all( + f""" + select {query_values} + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8912(conn, cursor, empty_tab): + "8912 - test LOB sizes around 32K boundary using DataFrame ingestion" + test_sizes = [32766, 32767, 32768, 32769, 32770] + arrays = [ + pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), + pyarrow.array(["X" * s for s in test_sizes], pyarrow.large_string()), + pyarrow.array([b"Y" * s for s in test_sizes], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8913(conn, cursor, empty_tab, test_env): + "8913 - test ingestion with mixed characters using DataFrame" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + + test_data = [ + "ASCII: Hello World", # Pure ASCII + "Latin: café España", # Latin-1 Supplement + "Cyrillic: русский текст", # Actual Cyrillic + "Chinese: 中文测试", # Actual Chinese + "Emoji: 👍😊❤️", # Emojis + "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols + "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters + ] + arrays = [ + pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), + pyarrow.array(test_data, pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8914(conn, cursor, empty_tab): + "8914 - test various numeric values" + test_data = [ + decimal.Decimal(0), + decimal.Decimal(1), + decimal.Decimal(-1), + decimal.Decimal("99999999999.9999"), + decimal.Decimal("-99999999999.9999"), + decimal.Decimal("10000000000.0001"), + decimal.Decimal("-10000000000.0001"), + decimal.Decimal(".0001"), + decimal.Decimal("-.0001"), + decimal.Decimal(".9"), + decimal.Decimal("-.9"), + decimal.Decimal(".09"), + decimal.Decimal("-.09"), + decimal.Decimal(".009"), + decimal.Decimal("-.009"), + ] + ids = [decimal.Decimal(i) for i in range(len(test_data))] + arrays = [ + pyarrow.array(ids, pyarrow.decimal128(9, 0)), + pyarrow.array(test_data, pyarrow.decimal128(15, 4)), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, DecimalData) + values (:1, :2) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8915(conn, cursor, empty_tab): + "8915 - test various timestamp values" + test_data = [ + datetime.datetime(2056, 2, 29), + datetime.datetime(2020, 2, 29), + datetime.datetime(1900, 1, 1), + datetime.datetime(2000, 1, 1), + datetime.datetime(1970, 1, 1), + datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), + datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), + datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), + ] + ids = list(range(len(test_data))) + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(test_data, pyarrow.timestamp("us")), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8916(conn, cursor, empty_tab): + "8916 - test insertion with large data volumes" + num_rows = 10_000 + ids = list(range(1, num_rows + 1)) + names = [f"Employee-{i}" for i in ids] + salaries = [i * 100.25 for i in ids] + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(names, pyarrow.string()), + pyarrow.array(salaries, pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + conn.commit() + odf = conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8917(skip_unless_sparse_vectors_supported, conn, cursor): + "8917 - test ingestion of sparse vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), + dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), + ), + pyarrow.array( + [ + dict(num_dimensions=16, indices=[1, 3], values=[1.1, -1.1]), + None, + dict(num_dimensions=16, indices=[5, 10], values=[2.2, -2.2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), + ), + pyarrow.array( + [ + dict(num_dimensions=16, indices=[1, 3], values=[1.25, -1.25]), + dict(num_dimensions=16, indices=[5, 10], values=[2.5, -2.5]), + None, + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), + ), + ] + names = [ + "IntCol", + "SparseVector8Col", + "SparseVector32Col", + "SparseVector64Col", + ] + df = pyarrow.table(arrays, names) + cursor.execute("delete from TestSparseVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + cursor.executemany( + f""" + insert into TestSparseVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = conn.fetch_df_all( + f""" + select {query_names} + from TestSparseVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +def test_8918(skip_unless_vectors_supported, conn, cursor): + "8918 - test ingestion of dense vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, [ - dict( - num_dimensions=16, indices=[1, 3], values=[1.25, -1.25] - ), - dict( - num_dimensions=16, indices=[5, 10], values=[2.5, -2.5] - ), - None, + -127, + -100, + -5, + -1, + 0, + 0, + 0, + 0, + 1, + 5, + 7, + 25, + 13, + 0, + 10, + 127, ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), - ), - ] - names = [ - "IntCol", - "SparseVector8Col", - "SparseVector32Col", - "SparseVector64Col", - ] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestSparseVectors") - column_names = ",".join(names) - bind_names = ",".join(f":{i + 1}" for i in range(len(names))) - self.cursor.executemany( - f""" - insert into TestSparseVectors ({column_names}) - values ({bind_names}) - """, - df, - ) - self.conn.commit() - query_names = ",".join(f'{name} as "{name}"' for name in names) - odf = self.conn.fetch_df_all( - f""" - select {query_names} - from TestSparseVectors - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_vectors_supported() - def test_8918(self): - "8918 - test ingestion of dense vectors" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( [ - None, - [ - -127, - -100, - -5, - -1, - 0, - 0, - 0, - 0, - 1, - 5, - 7, - 25, - 13, - 0, - 10, - 127, - ], - [ - -25, - 25, - -15, - 15, - -5, - 5, - 0, - 0, - -127, - 127, - -25, - 25, - -105, - 105, - -1, - 1, - ], + -25, + 25, + -15, + 15, + -5, + 5, + 0, + 0, + -127, + 127, + -25, + 25, + -105, + 105, + -1, + 1, ], - pyarrow.list_(pyarrow.int8()), - ), - pyarrow.array( + ], + pyarrow.list_(pyarrow.int8()), + ), + pyarrow.array( + [ + None, [ - None, - [ - -12.5, - -578.625, - -100.25, - -87.5, - 0, - 25, - 0, - 0, - 1, - 1.25, - 1.75, - 2.5, - 1.75, - 0, - 5889.125, - 6500.375, - ], - [ - -25.5, - 25.5, - -15.25, - 15.25, - -5.3, - 5.3, - 0, - 0, - -127.8, - 127.8, - -15.222, - 15.222, - -105.333, - 105.333, - -1, - 1, - ], + -12.5, + -578.625, + -100.25, + -87.5, + 0, + 25, + 0, + 0, + 1, + 1.25, + 1.75, + 2.5, + 1.75, + 0, + 5889.125, + 6500.375, ], - pyarrow.list_(pyarrow.float32()), - ), - pyarrow.array( [ - None, - [ - -22.5, - -278.625, - -200.25, - -77.5, - 0, - 35, - 0, - 0, - 1, - 8.25, - 9.75, - 3.5, - 4.75, - 0, - 6889.125, - 7500.375, - ], - [ - -35.5, - 35.5, - -25.25, - 25.25, - -8.3, - 8.3, - 0, - 0, - -227.8, - 227.8, - -215.222, - 415.222, - -505.333, - 605.333, - -1, - 1, - ], + -25.5, + 25.5, + -15.25, + 15.25, + -5.3, + 5.3, + 0, + 0, + -127.8, + 127.8, + -15.222, + 15.222, + -105.333, + 105.333, + -1, + 1, ], - pyarrow.list_(pyarrow.float64()), - ), - ] - names = [ - "IntCol", - "Vector8Col", - "Vector32Col", - "Vector64Col", - ] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestVectors") - column_names = ",".join(names) - bind_names = ",".join(f":{i + 1}" for i in range(len(names))) - self.cursor.executemany( - f""" - insert into TestVectors ({column_names}) - values ({bind_names}) - """, - df, - ) - self.conn.commit() - query_names = ",".join(f'{name} as "{name}"' for name in names) - odf = self.conn.fetch_df_all( - f""" - select {query_names} - from TestVectors - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - def test_8919(self): - "8919 - test ingestion with various integer data types" - scenarios = [ - ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), - ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), - ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), - ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), - ([0, 2**7, 2**8 - 1], pyarrow.uint8()), - ([0, 2**15, 2**16 - 1], pyarrow.uint16()), - ([0, 2**31, 2**32 - 1], pyarrow.uint32()), - ([0, 2**63, 2**64 - 1], pyarrow.uint64()), - ] - names = ["Id", "LongIntegerData"] - for values, dtype in scenarios: - with self.subTest(dtype=str(dtype)): - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int8()), - pyarrow.array(values, dtype), - ] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongIntegerData) - values (:1, :2) - """, - df, - ) - self.conn.commit() - self.cursor.execute( - """ - select to_char(LongIntegerData) - from TestDataFrame - order by Id - """ - ) - fetched_values = [int(s) for s, in self.cursor] - self.assertEqual(fetched_values, values) - - def test_8920(self): - "8920 - test ingestion with alternative date types" - scenarios = [ - ( + ], + pyarrow.list_(pyarrow.float32()), + ), + pyarrow.array( + [ + None, [ - datetime.datetime(1915, 9, 11), - None, - datetime.datetime(2045, 2, 28), + -22.5, + -278.625, + -200.25, + -77.5, + 0, + 35, + 0, + 0, + 1, + 8.25, + 9.75, + 3.5, + 4.75, + 0, + 6889.125, + 7500.375, ], - pyarrow.date32(), - ), - ( [ - datetime.datetime(1905, 3, 30), - None, - datetime.datetime(2060, 10, 5), + -35.5, + 35.5, + -25.25, + 25.25, + -8.3, + 8.3, + 0, + 0, + -227.8, + 227.8, + -215.222, + 415.222, + -505.333, + 605.333, + -1, + 1, ], - pyarrow.date64(), - ), - ] - names = ["Id", "DateOfBirth"] - for values, dtype in scenarios: - with self.subTest(dtype=str(dtype)): - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int8()), - pyarrow.array(values, dtype), - ] - df = pyarrow.table(arrays, names) - self.cursor.execute("delete from TestDataFrame") - self.cursor.executemany( - """ - insert into TestDataFrame (Id, DateOfBirth) - values (:1, :2) - """, - df, - ) - self.conn.commit() - self.cursor.execute( - """ - select DateOfBirth - from TestDataFrame - order by Id - """ - ) - fetched_values = [d for d, in self.cursor] - self.assertEqual(fetched_values, values) - - -if __name__ == "__main__": - test_env.run_test_cases() + ], + pyarrow.list_(pyarrow.float64()), + ), + ] + names = [ + "IntCol", + "Vector8Col", + "Vector32Col", + "Vector64Col", + ] + df = pyarrow.table(arrays, names) + cursor.execute("delete from TestVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + cursor.executemany( + f""" + insert into TestVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = conn.fetch_df_all( + f""" + select {query_names} + from TestVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +@pytest.mark.parametrize( + "values, dtype", + [ + ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), + ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), + ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), + ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), + ([0, 2**7, 2**8 - 1], pyarrow.uint8()), + ([0, 2**15, 2**16 - 1], pyarrow.uint16()), + ([0, 2**31, 2**32 - 1], pyarrow.uint32()), + ([0, 2**63, 2**64 - 1], pyarrow.uint64()), + ], +) +def test_8919(values, dtype, conn, cursor, empty_tab): + "8919 - test ingestion with various integer data types" + names = ["Id", "LongIntegerData"] + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, LongIntegerData) + values (:1, :2) + """, + df, + ) + conn.commit() + cursor.execute( + """ + select to_char(LongIntegerData) + from TestDataFrame + order by Id + """ + ) + fetched_values = [int(s) for s, in cursor] + assert fetched_values == values + + +@pytest.mark.parametrize( + "values, dtype", + [ + ( + [ + datetime.datetime(1915, 9, 11), + None, + datetime.datetime(2045, 2, 28), + ], + pyarrow.date32(), + ), + ( + [ + datetime.datetime(1905, 3, 30), + None, + datetime.datetime(2060, 10, 5), + ], + pyarrow.date64(), + ), + ], +) +def test_8920(values, dtype, conn, cursor, empty_tab): + "8920 - test ingestion with alternative date types" + names = ["Id", "DateOfBirth"] + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + cursor.executemany( + """ + insert into TestDataFrame (Id, DateOfBirth) + values (:1, :2) + """, + df, + ) + conn.commit() + cursor.execute( + """ + select DateOfBirth + from TestDataFrame + order by Id + """ + ) + fetched_values = [d for d, in cursor] + assert fetched_values == values diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 553e85de..66c6bdf4 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -26,13 +26,12 @@ Module for testing DataFrame ingestion with asyncio """ +import pytest import datetime import decimal import pyarrow -import test_env - SPARSE_VECTOR_FIELDS_FLOAT32 = [ ("num_dimensions", pyarrow.int64()), ("indices", pyarrow.list_(pyarrow.uint32())), @@ -52,574 +51,344 @@ ] -@test_env.skip_unless_thin_mode() -class TestCase(test_env.BaseAsyncTestCase): - - async def test_9000(self): - "9000 - test basic ingestion of data frame" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1), - datetime.datetime(2021, 2, 2), - datetime.datetime(2022, 3, 3), - ], - pyarrow.timestamp("s"), - ), - ] - names = ["Id", "FirstName", "Salary", "DateOfBirth"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def empty_tab(async_cursor): + await async_cursor.execute("delete from TestDataFrame") + + +async def test_9000(async_conn, async_cursor, empty_tab): + "9000 - test basic ingestion of data frame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + datetime.datetime(2021, 2, 2), + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9001(async_conn, async_cursor, empty_tab): + "9001 - test ingestion with null values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", None, "Bob"], pyarrow.string()), + pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1), + None, + datetime.datetime(2022, 3, 3), + ], + pyarrow.timestamp("s"), + ), + ] + names = ["Id", "FirstName", "Salary", "DateOfBirth"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame + (Id, FirstName, Salary, DateOfBirth) + values (:1, :2, :3, :4) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary", + DateOfBirth as "DateOfBirth" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9002(async_conn, async_cursor, empty_tab): + "9002 - test ingestion with single column" + arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] + names = ["Id"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + "insert into TestDataFrame (Id) values (:1)", df + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select Id as "Id" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9003(async_conn, async_cursor, empty_tab): + "9003 - test ingestion with large data types" + long_str = "X" * 32_768 + long_raw = b"Y" * 32_768 + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([long_str], pyarrow.large_string()), + pyarrow.array([long_raw], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9004(async_conn, async_cursor, empty_tab): + "9004 - test ingestion with decimal values" + arrays = [ + pyarrow.array( + [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + pyarrow.decimal128(9, 0), + ), + pyarrow.array( + [ + decimal.Decimal("1234567890.1234"), + decimal.Decimal("-9876543210.9876"), + decimal.Decimal("0.0001"), + ], + pyarrow.decimal128(15, 4), + ), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame + (Id, DecimalData) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9005(skip_unless_native_boolean_supported, async_conn): + "9005 - test ingestion with boolean values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array([True, False, True], pyarrow.bool_()), + pyarrow.array([False, True, None], pyarrow.bool_()), + ] + names = ["IntCol", "BooleanCol1", "BooleanCol2"] + cursor = async_conn.cursor() + df = pyarrow.table(arrays, names) + await cursor.execute("truncate table TestBooleans") + await cursor.executemany( + """ + insert into TestBooleans + (IntCol, BooleanCol1, BooleanCol2) + values (:1, :2, :3) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + IntCol as "IntCol", + BooleanCol1 as "BooleanCol1", + BooleanCol2 as "BooleanCol2" + from TestBooleans + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9006(async_conn, async_cursor, empty_tab): + "9006 - test ingestion with timestamp values" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + datetime.datetime(2020, 1, 1, 0, 0, 0), + datetime.datetime(2021, 2, 2, 12, 34, 56), + datetime.datetime(2022, 3, 3, 23, 59, 59), + ], + pyarrow.timestamp("us"), + ), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame + (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9007(async_cursor, test_env): + "9007 - test ingestion with mismatched column count" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + ] + names = ["ID", "NAME"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-4009", "ORA-01008"): + await async_cursor.executemany( """ - insert into TestDataFrame - (Id, FirstName, Salary, DateOfBirth) - values (:1, :2, :3, :4) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary", - DateOfBirth as "DateOfBirth" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9001(self): - "9001 - test ingestion with null values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", None, "Bob"], pyarrow.string()), - pyarrow.array([None, 2000.75, 3000.25], pyarrow.float64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1), - None, - datetime.datetime(2022, 3, 3), - ], - pyarrow.timestamp("s"), - ), - ] - names = ["Id", "FirstName", "Salary", "DateOfBirth"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame - (Id, FirstName, Salary, DateOfBirth) - values (:1, :2, :3, :4) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary", - DateOfBirth as "DateOfBirth" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9002(self): - "9002 - test ingestion with single column" - arrays = [pyarrow.array([1, 2, 3], pyarrow.int64())] - names = ["Id"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - "insert into TestDataFrame (Id) values (:1)", df - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select Id as "Id" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9003(self): - "9003 - test ingestion with large data types" - long_str = "X" * 32_768 - long_raw = b"Y" * 32_768 - arrays = [ - pyarrow.array([1], pyarrow.int64()), - pyarrow.array([long_str], pyarrow.large_string()), - pyarrow.array([long_raw], pyarrow.large_binary()), - ] - names = ["Id", "LongData", "LongRawData"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongData, LongRawData) + insert into TestDataFrame (Id, FirstName, Salary) values (:1, :2, :3) """, df, ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - LongData as "LongData", - LongRawData as "LongRawData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - async def test_9004(self): - "9004 - test ingestion with decimal values" - arrays = [ - pyarrow.array( - [ - decimal.Decimal("1"), - decimal.Decimal("2"), - decimal.Decimal("3"), - ], - pyarrow.decimal128(9, 0), - ), - pyarrow.array( - [ - decimal.Decimal("1234567890.1234"), - decimal.Decimal("-9876543210.9876"), - decimal.Decimal("0.0001"), - ], - pyarrow.decimal128(15, 4), - ), - ] - names = ["Id", "DecimalData"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame - (Id, DecimalData) - values (:1, :2) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """, - fetch_decimals=True, - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_native_boolean_supported() - async def test_9005(self): - "9005 - test ingestion with boolean values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array([True, False, True], pyarrow.bool_()), - pyarrow.array([False, True, None], pyarrow.bool_()), - ] - names = ["IntCol", "BooleanCol1", "BooleanCol2"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("truncate table TestBooleans") - await self.cursor.executemany( - """ - insert into TestBooleans - (IntCol, BooleanCol1, BooleanCol2) - values (:1, :2, :3) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - IntCol as "IntCol", - BooleanCol1 as "BooleanCol1", - BooleanCol2 as "BooleanCol2" - from TestBooleans - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - async def test_9006(self): - "9006 - test ingestion with timestamp values" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [ - datetime.datetime(2020, 1, 1, 0, 0, 0), - datetime.datetime(2021, 2, 2, 12, 34, 56), - datetime.datetime(2022, 3, 3, 23, 59, 59), - ], - pyarrow.timestamp("us"), - ), - ] - names = ["Id", "LastUpdated"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame - (Id, LastUpdated) - values (:1, :2) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - LastUpdated as "LastUpdated" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9007(self): - "9007 - test ingestion with mismatched column count" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - ] - names = ["ID", "NAME"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-4009", "ORA-01008"): - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName, Salary) - values (:1, :2, :3) - """, - df, - ) - - async def test_9008(self): - "9008 - test ingestion with invalid data type" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [["a", "b"], ["c"], ["d", "e", "f"]], - pyarrow.list_(pyarrow.string()), - ), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-3033"): - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName) - values (:1, :2) - """, - df, - ) - - async def test_9009(self): - "9009 - test execute() with DataFrame" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - with self.assertRaisesFullCode("DPY-2003"): - await self.cursor.execute( - """ - insert into TestDataFrame (Id, FirstName) - values (:1, :2) - """, - df, - ) - - async def test_9010(self): - "9010 - test consecutive executemany() calls with same dataframe" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), - pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), - ] - names = ["Id", "FirstName", "Salary"] - df = pyarrow.table(arrays, names) - for i in range(3): - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, FirstName, Salary) - values (:1, :2, :3) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName", - Salary as "Salary" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9011(self): - "9011 - test nulls/None for all datatypes" - arrays = [ - pyarrow.array([1], pyarrow.int64()), - pyarrow.array([None], pyarrow.float32()), - pyarrow.array([None], pyarrow.float64()), - pyarrow.array([None], pyarrow.string()), - pyarrow.array([None], pyarrow.timestamp("s")), - pyarrow.array([None], pyarrow.binary()), - ] - names = [ - "Id", - "FloatData", - "DoubleData", - "FirstName", - "DateOfBirth", - "RawData", - ] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - bind_names = ",".join(names) - bind_values = ",".join(f":{i + 1}" for i in range(len(names))) - await self.cursor.executemany( - f""" - insert into TestDataFrame ({bind_names}) - values ({bind_values}) - """, - df, - ) - await self.conn.commit() - query_values = ",".join(f'{name} as "{name}"' for name in names) - odf = await self.conn.fetch_df_all( - f""" - select {query_values} - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9012(self): - "9012 - test LOB sizes around 32K boundary using DataFrame ingestion" - test_sizes = [32766, 32767, 32768, 32769, 32770] - arrays = [ - pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), - pyarrow.array( - ["X" * s for s in test_sizes], pyarrow.large_string() - ), - pyarrow.array( - [b"Y" * s for s in test_sizes], pyarrow.large_binary() - ), - ] - names = ["Id", "LongData", "LongRawData"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongData, LongRawData) - values (:1, :2, :3) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - LongData as "LongData", - LongRawData as "LongRawData" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9013(self): - "9013 - test ingestion with mixed characters using DataFrame" - if test_env.get_charset() != "AL32UTF8": - self.skipTest("Database character set must be AL32UTF8") - - test_data = [ - "ASCII: Hello World", # Pure ASCII - "Latin: café España", # Latin-1 Supplement - "Cyrillic: русский текст", # Actual Cyrillic - "Chinese: 中文测试", # Actual Chinese - "Emoji: 👍😊❤️", # Emojis - "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols - "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters - ] - arrays = [ - pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), - pyarrow.array(test_data, pyarrow.string()), - ] - names = ["Id", "FirstName"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( +async def test_9008(async_cursor, test_env): + "9008 - test ingestion with invalid data type" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [["a", "b"], ["c"], ["d", "e", "f"]], + pyarrow.list_(pyarrow.string()), + ), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-3033"): + await async_cursor.executemany( """ insert into TestDataFrame (Id, FirstName) values (:1, :2) """, df, ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - FirstName as "FirstName" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9014(self): - "9014 - test various numeric values" - test_data = [ - decimal.Decimal(0), - decimal.Decimal(1), - decimal.Decimal(-1), - decimal.Decimal("99999999999.9999"), - decimal.Decimal("-99999999999.9999"), - decimal.Decimal("10000000000.0001"), - decimal.Decimal("-10000000000.0001"), - decimal.Decimal(".0001"), - decimal.Decimal("-.0001"), - decimal.Decimal(".9"), - decimal.Decimal("-.9"), - decimal.Decimal(".09"), - decimal.Decimal("-.09"), - decimal.Decimal(".009"), - decimal.Decimal("-.009"), - ] - ids = [decimal.Decimal(i) for i in range(len(test_data))] - arrays = [ - pyarrow.array(ids, pyarrow.decimal128(9, 0)), - pyarrow.array(test_data, pyarrow.decimal128(15, 4)), - ] - names = ["Id", "DecimalData"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, DecimalData) - values (:1, :2) - """, - df, - ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - DecimalData as "DecimalData" - from TestDataFrame - order by Id - """, - fetch_decimals=True, - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9015(self): - "9015 - test various timestamp values" - test_data = [ - datetime.datetime(2056, 2, 29), - datetime.datetime(2020, 2, 29), - datetime.datetime(1900, 1, 1), - datetime.datetime(2000, 1, 1), - datetime.datetime(1970, 1, 1), - datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), - datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), - datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), - ] - ids = list(range(len(test_data))) - arrays = [ - pyarrow.array(ids, pyarrow.int64()), - pyarrow.array(test_data, pyarrow.timestamp("us")), - ] - names = ["Id", "LastUpdated"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( + + +async def test_9009(async_cursor, test_env): + "9009 - test execute() with DataFrame" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Sue"], pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + with test_env.assert_raises_full_code("DPY-2003"): + await async_cursor.execute( """ - insert into TestDataFrame (Id, LastUpdated) + insert into TestDataFrame (Id, FirstName) values (:1, :2) """, df, ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( - """ - select - Id as "Id", - LastUpdated as "LastUpdated" - from TestDataFrame - order by Id - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9016(self): - "9016 - test insertion with large data volumes" - num_rows = 10_000 - ids = list(range(1, num_rows + 1)) - names = [f"Employee-{i}" for i in ids] - salaries = [i * 100.25 for i in ids] - arrays = [ - pyarrow.array(ids, pyarrow.int64()), - pyarrow.array(names, pyarrow.string()), - pyarrow.array(salaries, pyarrow.float64()), - ] - names = ["Id", "FirstName", "Salary"] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( + + +async def test_9010(async_conn, async_cursor): + "9010 - test consecutive executemany() calls with same dataframe" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array(["John", "Jane", "Bob"], pyarrow.string()), + pyarrow.array([1000.50, 2000.75, 3000.25], pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + for i in range(3): + await async_cursor.execute("delete from TestDataFrame") + await async_cursor.executemany( """ insert into TestDataFrame (Id, FirstName, Salary) values (:1, :2, :3) """, df, ) - await self.conn.commit() - odf = await self.conn.fetch_df_all( + await async_conn.commit() + odf = await async_conn.fetch_df_all( """ select Id as "Id", @@ -630,322 +399,551 @@ async def test_9016(self): """ ) fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_sparse_vectors_supported() - async def test_9017(self): - "9017 - test ingestion of sparse vectors" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( - [ - None, - dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), - dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), - ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), - ), - pyarrow.array( - [ - dict( - num_dimensions=16, indices=[1, 3], values=[1.1, -1.1] - ), - None, - dict( - num_dimensions=16, indices=[5, 10], values=[2.2, -2.2] - ), - ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), - ), - pyarrow.array( + assert fetched_df.equals(df) + + +async def test_9011(async_conn, async_cursor, empty_tab): + "9011 - test nulls/None for all datatypes" + arrays = [ + pyarrow.array([1], pyarrow.int64()), + pyarrow.array([None], pyarrow.float32()), + pyarrow.array([None], pyarrow.float64()), + pyarrow.array([None], pyarrow.string()), + pyarrow.array([None], pyarrow.timestamp("s")), + pyarrow.array([None], pyarrow.binary()), + ] + names = [ + "Id", + "FloatData", + "DoubleData", + "FirstName", + "DateOfBirth", + "RawData", + ] + df = pyarrow.table(arrays, names) + bind_names = ",".join(names) + bind_values = ",".join(f":{i + 1}" for i in range(len(names))) + await async_cursor.executemany( + f""" + insert into TestDataFrame ({bind_names}) + values ({bind_values}) + """, + df, + ) + await async_conn.commit() + query_values = ",".join(f'{name} as "{name}"' for name in names) + odf = await async_conn.fetch_df_all( + f""" + select {query_values} + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9012(async_conn, async_cursor, empty_tab): + "9012 - test LOB sizes around 32K boundary using DataFrame ingestion" + test_sizes = [32766, 32767, 32768, 32769, 32770] + arrays = [ + pyarrow.array(range(1, len(test_sizes) + 1), pyarrow.int64()), + pyarrow.array(["X" * s for s in test_sizes], pyarrow.large_string()), + pyarrow.array([b"Y" * s for s in test_sizes], pyarrow.large_binary()), + ] + names = ["Id", "LongData", "LongRawData"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, LongData, LongRawData) + values (:1, :2, :3) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + LongData as "LongData", + LongRawData as "LongRawData" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9013(async_conn, async_cursor, empty_tab, test_env): + "9013 - test ingestion with mixed characters using DataFrame" + if test_env.charset != "AL32UTF8": + pytest.skip("Database character set must be AL32UTF8") + + test_data = [ + "ASCII: Hello World", # Pure ASCII + "Latin: café España", # Latin-1 Supplement + "Cyrillic: русский текст", # Actual Cyrillic + "Chinese: 中文测试", # Actual Chinese + "Emoji: 👍😊❤️", # Emojis + "Special: ~!@#$%^&*()_+{}|:\"<>?`-=[]\\;',./", # ASCII symbols + "Mixed: 你好, world! café? 123@# русский 👍", # Mixed characters + ] + arrays = [ + pyarrow.array(range(1, len(test_data) + 1), pyarrow.int64()), + pyarrow.array(test_data, pyarrow.string()), + ] + names = ["Id", "FirstName"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9014(async_conn, async_cursor, empty_tab): + "9014 - test various numeric values" + test_data = [ + decimal.Decimal(0), + decimal.Decimal(1), + decimal.Decimal(-1), + decimal.Decimal("99999999999.9999"), + decimal.Decimal("-99999999999.9999"), + decimal.Decimal("10000000000.0001"), + decimal.Decimal("-10000000000.0001"), + decimal.Decimal(".0001"), + decimal.Decimal("-.0001"), + decimal.Decimal(".9"), + decimal.Decimal("-.9"), + decimal.Decimal(".09"), + decimal.Decimal("-.09"), + decimal.Decimal(".009"), + decimal.Decimal("-.009"), + ] + ids = [decimal.Decimal(i) for i in range(len(test_data))] + arrays = [ + pyarrow.array(ids, pyarrow.decimal128(9, 0)), + pyarrow.array(test_data, pyarrow.decimal128(15, 4)), + ] + names = ["Id", "DecimalData"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, DecimalData) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + DecimalData as "DecimalData" + from TestDataFrame + order by Id + """, + fetch_decimals=True, + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9015(async_conn, async_cursor, empty_tab): + "9015 - test various timestamp values" + test_data = [ + datetime.datetime(2056, 2, 29), + datetime.datetime(2020, 2, 29), + datetime.datetime(1900, 1, 1), + datetime.datetime(2000, 1, 1), + datetime.datetime(1970, 1, 1), + datetime.datetime(2020, 2, 29, 23, 59, 59, 123456), + datetime.datetime(2023, 12, 31, 23, 59, 59, 567890), + datetime.datetime(2024, 1, 1, 0, 0, 0, 789012), + ] + ids = list(range(len(test_data))) + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(test_data, pyarrow.timestamp("us")), + ] + names = ["Id", "LastUpdated"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, LastUpdated) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + LastUpdated as "LastUpdated" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9016(async_conn, async_cursor, empty_tab): + "9016 - test insertion with large data volumes" + num_rows = 10_000 + ids = list(range(1, num_rows + 1)) + names = [f"Employee-{i}" for i in ids] + salaries = [i * 100.25 for i in ids] + arrays = [ + pyarrow.array(ids, pyarrow.int64()), + pyarrow.array(names, pyarrow.string()), + pyarrow.array(salaries, pyarrow.float64()), + ] + names = ["Id", "FirstName", "Salary"] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, FirstName, Salary) + values (:1, :2, :3) + """, + df, + ) + await async_conn.commit() + odf = await async_conn.fetch_df_all( + """ + select + Id as "Id", + FirstName as "FirstName", + Salary as "Salary" + from TestDataFrame + order by Id + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9017(skip_unless_sparse_vectors_supported, async_conn): + "9017 - test ingestion of sparse vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, + dict(num_dimensions=16, indices=[1, 3], values=[1, -1]), + dict(num_dimensions=16, indices=[5, 10], values=[2, -2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_INT8), + ), + pyarrow.array( + [ + dict(num_dimensions=16, indices=[1, 3], values=[1.1, -1.1]), + None, + dict(num_dimensions=16, indices=[5, 10], values=[2.2, -2.2]), + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT32), + ), + pyarrow.array( + [ + dict(num_dimensions=16, indices=[1, 3], values=[1.25, -1.25]), + dict(num_dimensions=16, indices=[5, 10], values=[2.5, -2.5]), + None, + ], + pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), + ), + ] + names = [ + "IntCol", + "SparseVector8Col", + "SparseVector32Col", + "SparseVector64Col", + ] + df = pyarrow.table(arrays, names) + cursor = async_conn.cursor() + await cursor.execute("delete from TestSparseVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + await cursor.executemany( + f""" + insert into TestSparseVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + await async_conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = await async_conn.fetch_df_all( + f""" + select {query_names} + from TestSparseVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +async def test_9018(skip_unless_vectors_supported, async_conn, async_cursor): + "9018 - test ingestion of dense vectors" + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int64()), + pyarrow.array( + [ + None, [ - dict( - num_dimensions=16, indices=[1, 3], values=[1.25, -1.25] - ), - dict( - num_dimensions=16, indices=[5, 10], values=[2.5, -2.5] - ), - None, + -127, + -100, + -5, + -1, + 0, + 0, + 0, + 0, + 1, + 5, + 7, + 25, + 13, + 0, + 10, + 127, ], - pyarrow.struct(SPARSE_VECTOR_FIELDS_FLOAT64), - ), - ] - names = [ - "IntCol", - "SparseVector8Col", - "SparseVector32Col", - "SparseVector64Col", - ] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestSparseVectors") - column_names = ",".join(names) - bind_names = ",".join(f":{i + 1}" for i in range(len(names))) - await self.cursor.executemany( - f""" - insert into TestSparseVectors ({column_names}) - values ({bind_names}) - """, - df, - ) - await self.conn.commit() - query_names = ",".join(f'{name} as "{name}"' for name in names) - odf = await self.conn.fetch_df_all( - f""" - select {query_names} - from TestSparseVectors - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - @test_env.skip_unless_vectors_supported() - async def test_9018(self): - "9018 - test ingestion of dense vectors" - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int64()), - pyarrow.array( [ - None, - [ - -127, - -100, - -5, - -1, - 0, - 0, - 0, - 0, - 1, - 5, - 7, - 25, - 13, - 0, - 10, - 127, - ], - [ - -25, - 25, - -15, - 15, - -5, - 5, - 0, - 0, - -127, - 127, - -25, - 25, - -105, - 105, - -1, - 1, - ], + -25, + 25, + -15, + 15, + -5, + 5, + 0, + 0, + -127, + 127, + -25, + 25, + -105, + 105, + -1, + 1, ], - pyarrow.list_(pyarrow.int8()), - ), - pyarrow.array( + ], + pyarrow.list_(pyarrow.int8()), + ), + pyarrow.array( + [ + None, [ - None, - [ - -12.5, - -578.625, - -100.25, - -87.5, - 0, - 25, - 0, - 0, - 1, - 1.25, - 1.75, - 2.5, - 1.75, - 0, - 5889.125, - 6500.375, - ], - [ - -25.5, - 25.5, - -15.25, - 15.25, - -5.3, - 5.3, - 0, - 0, - -127.8, - 127.8, - -15.222, - 15.222, - -105.333, - 105.333, - -1, - 1, - ], + -12.5, + -578.625, + -100.25, + -87.5, + 0, + 25, + 0, + 0, + 1, + 1.25, + 1.75, + 2.5, + 1.75, + 0, + 5889.125, + 6500.375, ], - pyarrow.list_(pyarrow.float32()), - ), - pyarrow.array( [ - None, - [ - -22.5, - -278.625, - -200.25, - -77.5, - 0, - 35, - 0, - 0, - 1, - 8.25, - 9.75, - 3.5, - 4.75, - 0, - 6889.125, - 7500.375, - ], - [ - -35.5, - 35.5, - -25.25, - 25.25, - -8.3, - 8.3, - 0, - 0, - -227.8, - 227.8, - -215.222, - 415.222, - -505.333, - 605.333, - -1, - 1, - ], + -25.5, + 25.5, + -15.25, + 15.25, + -5.3, + 5.3, + 0, + 0, + -127.8, + 127.8, + -15.222, + 15.222, + -105.333, + 105.333, + -1, + 1, ], - pyarrow.list_(pyarrow.float64()), - ), - ] - names = [ - "IntCol", - "Vector8Col", - "Vector32Col", - "Vector64Col", - ] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestVectors") - column_names = ",".join(names) - bind_names = ",".join(f":{i + 1}" for i in range(len(names))) - await self.cursor.executemany( - f""" - insert into TestVectors ({column_names}) - values ({bind_names}) - """, - df, - ) - await self.conn.commit() - query_names = ",".join(f'{name} as "{name}"' for name in names) - odf = await self.conn.fetch_df_all( - f""" - select {query_names} - from TestVectors - order by IntCol - """ - ) - fetched_df = pyarrow.table(odf) - self.assertTrue(fetched_df.equals(df)) - - async def test_9019(self): - "9019 - test ingestion with various integer data types" - scenarios = [ - ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), - ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), - ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), - ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), - ([0, 2**7, 2**8 - 1], pyarrow.uint8()), - ([0, 2**15, 2**16 - 1], pyarrow.uint16()), - ([0, 2**31, 2**32 - 1], pyarrow.uint32()), - ([0, 2**63, 2**64 - 1], pyarrow.uint64()), - ] - names = ["Id", "LongIntegerData"] - for values, dtype in scenarios: - with self.subTest(dtype=str(dtype)): - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int8()), - pyarrow.array(values, dtype), - ] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, LongIntegerData) - values (:1, :2) - """, - df, - ) - await self.conn.commit() - await self.cursor.execute( - """ - select to_char(LongIntegerData) - from TestDataFrame - order by Id - """ - ) - fetched_values = [int(s) async for s, in self.cursor] - self.assertEqual(fetched_values, values) - - async def test_9020(self): - "9020 - test ingestion with alternative date types" - scenarios = [ - ( + ], + pyarrow.list_(pyarrow.float32()), + ), + pyarrow.array( + [ + None, [ - datetime.datetime(1915, 9, 11), - None, - datetime.datetime(2045, 2, 28), + -22.5, + -278.625, + -200.25, + -77.5, + 0, + 35, + 0, + 0, + 1, + 8.25, + 9.75, + 3.5, + 4.75, + 0, + 6889.125, + 7500.375, ], - pyarrow.date32(), - ), - ( [ - datetime.datetime(1905, 3, 30), - None, - datetime.datetime(2060, 10, 5), + -35.5, + 35.5, + -25.25, + 25.25, + -8.3, + 8.3, + 0, + 0, + -227.8, + 227.8, + -215.222, + 415.222, + -505.333, + 605.333, + -1, + 1, ], - pyarrow.date64(), - ), - ] - names = ["Id", "DateOfBirth"] - for values, dtype in scenarios: - with self.subTest(dtype=str(dtype)): - arrays = [ - pyarrow.array([1, 2, 3], pyarrow.int8()), - pyarrow.array(values, dtype), - ] - df = pyarrow.table(arrays, names) - await self.cursor.execute("delete from TestDataFrame") - await self.cursor.executemany( - """ - insert into TestDataFrame (Id, DateOfBirth) - values (:1, :2) - """, - df, - ) - await self.conn.commit() - await self.cursor.execute( - """ - select DateOfBirth - from TestDataFrame - order by Id - """ - ) - fetched_values = [d async for d, in self.cursor] - self.assertEqual(fetched_values, values) - - -if __name__ == "__main__": - test_env.run_test_cases() + ], + pyarrow.list_(pyarrow.float64()), + ), + ] + names = [ + "IntCol", + "Vector8Col", + "Vector32Col", + "Vector64Col", + ] + df = pyarrow.table(arrays, names) + await async_cursor.execute("delete from TestVectors") + column_names = ",".join(names) + bind_names = ",".join(f":{i + 1}" for i in range(len(names))) + await async_cursor.executemany( + f""" + insert into TestVectors ({column_names}) + values ({bind_names}) + """, + df, + ) + await async_conn.commit() + query_names = ",".join(f'{name} as "{name}"' for name in names) + odf = await async_conn.fetch_df_all( + f""" + select {query_names} + from TestVectors + order by IntCol + """ + ) + fetched_df = pyarrow.table(odf) + assert fetched_df.equals(df) + + +@pytest.mark.parametrize( + "values, dtype", + [ + ([-(2**7), 0, 2**7 - 1], pyarrow.int8()), + ([-(2**15), 0, 2**15 - 1], pyarrow.int16()), + ([-(2**31), 0, 2**31 - 1], pyarrow.int32()), + ([-(2**63), 0, 2**63 - 1], pyarrow.int64()), + ([0, 2**7, 2**8 - 1], pyarrow.uint8()), + ([0, 2**15, 2**16 - 1], pyarrow.uint16()), + ([0, 2**31, 2**32 - 1], pyarrow.uint32()), + ([0, 2**63, 2**64 - 1], pyarrow.uint64()), + ], +) +async def test_9019(values, dtype, async_conn, async_cursor, empty_tab): + "9019 - test ingestion with various integer data types" + names = ["Id", "LongIntegerData"] + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + await async_cursor.execute("delete from TestDataFrame") + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, LongIntegerData) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + await async_cursor.execute( + """ + select to_char(LongIntegerData) + from TestDataFrame + order by Id + """ + ) + fetched_values = [int(s) async for s, in async_cursor] + assert fetched_values == values + + +@pytest.mark.parametrize( + "values, dtype", + [ + ( + [ + datetime.datetime(1915, 9, 11), + None, + datetime.datetime(2045, 2, 28), + ], + pyarrow.date32(), + ), + ( + [ + datetime.datetime(1905, 3, 30), + None, + datetime.datetime(2060, 10, 5), + ], + pyarrow.date64(), + ), + ], +) +async def test_9020(values, dtype, async_conn, async_cursor, empty_tab): + "9020 - test ingestion with alternative date types" + names = ["Id", "DateOfBirth"] + arrays = [ + pyarrow.array([1, 2, 3], pyarrow.int8()), + pyarrow.array(values, dtype), + ] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + """ + insert into TestDataFrame (Id, DateOfBirth) + values (:1, :2) + """, + df, + ) + await async_conn.commit() + await async_cursor.execute( + """ + select DateOfBirth + from TestDataFrame + order by Id + """ + ) + fetched_values = [d async for d, in async_cursor] + assert fetched_values == values diff --git a/tests/test_9100_dataframe_vector.py b/tests/test_9100_dataframe_vector.py index f63bbe2f..241c71e3 100644 --- a/tests/test_9100_dataframe_vector.py +++ b/tests/test_9100_dataframe_vector.py @@ -28,342 +28,317 @@ import array -import numpy -import pandas import pyarrow +import pytest -import test_env +@pytest.fixture(autouse=True) +def module_checks(skip_unless_vectors_supported): + pass -@test_env.skip_unless_vectors_supported() -class TestCase(test_env.BaseTestCase): - def __convert_df_value(self, df_val): +def test_9100(conn, test_env): + "9100 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (None,), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float32) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) """ - This method converts a dataframe cell value to use with assertEqual() - For e.g. NaN and np.array cannot be compared directly. Values are - converted according to the following rules: - - NaN -> None - - np.array -> np.array.tolist() (Python list) + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9101(conn, test_env): + "9101 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + (None,), + ([34.6, 77.8, 55.9],), + ] + ora_df = conn.fetch_df_all( """ - if isinstance(df_val, numpy.ndarray): - return df_val.tolist() - elif pandas.isna(df_val): - return None - elif isinstance(df_val, dict): - return {k: self.__convert_df_value(v) for k, v in df_val.items()} - else: - return df_val - - def __get_data_from_df(self, df): + select to_vector('[34.6, 77.8]', 2, float64) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9102(conn, test_env): + "9102 - fetch int8 vector" + data = [ + ([34, -77],), + (None,), + ([34, 77, 55],), + ] + ora_df = conn.fetch_df_all( """ - Returns data from the data frame in a normalized fashion suitable for - comparison. In particular, NaN values cannot be compared to one another - so they are converted to the value None for comparison purposes. + select to_vector('[34, -77]', 2, int8) + union all + select null + union all + select to_vector('[34, 77, 55]', 3, int8) """ - return [ - tuple(self.__convert_df_value(v) for v in row) - for row in df.itertuples(index=False, name=None) - ] - - def test_9100(self): - "9100 - fetch float32 vector" - - # float32 is a special case while comparing dataframe values - # Converting Dataframe cell value of type numpy.ndarray[float32] - # using .tolist() converts each value to Python float. Python - # float uses 64-bit precision causing mismatches in assertEqual. - # As a workaround we use array.array('f', src).tolist() on the - # source data - data = [ - (array.array("f", [34.6, 77.8]).tolist(),), - (None,), - (array.array("f", [34.6, 77.8, 55.9]).tolist(),), - ] - ora_df = self.conn.fetch_df_all( + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9103(skip_unless_binary_vectors_supported, conn, test_env): + "9103 - fetch binary vector" + data = [ + ([3, 2, 3],), + (None,), + ([3, 2],), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[3, 2, 3]', 24, binary) + union all + select null + union all + select to_vector('[3, 2]', 16, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9104(conn, test_env): + "9104 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9105(skip_unless_sparse_vectors_supported, conn, test_env): + "9105 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9106(skip_unless_sparse_vectors_supported, conn, test_env): + "9106 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + (None,), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) + union all + select null + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9107(conn, test_env): + "9107 - DPY-3031 - Unsupported flexible vector formats" + with test_env.assert_raises_full_code("DPY-3031"): + conn.fetch_df_all( """ - select to_vector('[34.6, 77.8]', 2, float32) - union all - select null + select to_vector('[44, 55, 89]', 3, int8) as flex_col union all select to_vector('[34.6, 77.8, 55.9]', 3, float32) """ ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9101(self): - "9101 - fetch float64 vector" - data = [ - ([34.6, 77.8],), - (None,), - ([34.6, 77.8, 55.9],), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[34.6, 77.8]', 2, float64) - union all - select null - union all - select to_vector('[34.6, 77.8, 55.9]', 3, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9102(self): - "9102 - fetch int8 vector" - data = [ - ([34, -77],), - (None,), - ([34, 77, 55],), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[34, -77]', 2, int8) - union all - select null - union all - select to_vector('[34, 77, 55]', 3, int8) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_binary_vectors_supported() - def test_9103(self): - "9103 - fetch binary vector" - data = [ - ([3, 2, 3],), - (None,), - ([3, 2],), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[3, 2, 3]', 24, binary) - union all - select null - union all - select to_vector('[3, 2]', 16, binary) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9104(self): - "9104 - fetch duplicate float64 vectors" - data = [ - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_9105(self): - "9105 - fetch float32 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 77.8]).tolist(), - }, - ), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 9.1]).tolist(), - }, - ), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) - union all - select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_9106(self): - "9106 - fetch float64 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 77.8], - }, - ), - (None,), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 9.1], - }, - ), - ] - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) - union all - select null - union all - select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9107(self): - "9107 - DPY-3031 - Unsupported flexible vector formats" - with self.assertRaisesFullCode("DPY-3031"): - self.conn.fetch_df_all( - """ - select to_vector('[44, 55, 89]', 3, int8) as flex_col - union all - select to_vector('[34.6, 77.8, 55.9]', 3, float32) - """ - ) - - def test_9108(self): - "9108 - test vector operations with different dimensions" - data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] - - ora_df = self.conn.fetch_df_all( - """ - select to_vector('[1, 0, 3]', 3, float64) from dual - union all - select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual - union all - select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9109(self): - "9109 - test vector operations with large arrays" - large_array = list(range(1, 1001)) - data = [(large_array,), (large_array,)] - str_value = ",".join(str(i) for i in large_array) - ora_df = self.conn.fetch_df_all( - f""" - select to_vector('[{str_value}]', {len(large_array)}, float64) - union all - select to_vector('[{str_value}]', {len(large_array)}, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - def test_9110(self): - "9110 - test sparse vector operations with different dimensions" - with self.assertRaisesFullCode("DPY-2065"): - self.conn.fetch_df_all( - """ - select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) - union all - select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) - """ - ) - - def test_9111(self): - "9111 - test mixed vector types in a single dataframe" - data = [ - ([1.5, 2.5, 3.5], [1, 2, 3]), - ([4.25, 5.25, 6.25], [4, 5, 6]), - ] - ora_df = self.conn.fetch_df_all( - """ - select - to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, - to_vector('[1, 2, 3]', 3, int8) as int_vec - union all - select - to_vector('[4.25, 5.25, 6.25]', 3, float64), - to_vector('[4, 5, 6]', 3, int8) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - def test_9112(self): - "9112 - test vectors with very large dimensions" - large_dim = 800 - large_vector = [2.25] * large_dim - large_vector[12] = 1.5 - large_vector[-25] = 8.5 - data = [(large_vector,)] - vector_str = ",".join(str(i) for i in large_vector) - ora_df = self.conn.fetch_df_all( - f""" - select to_vector('[{vector_str}]', {large_dim}, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_binary_vectors_supported() - def test_9113(self): - "9113 - test binary vector edge case - max value" - data = [ - ([255, 255, 255],), - ([255, 0, 255],), - ] - ora_df = self.conn.fetch_df_all( + + +def test_9108(conn, test_env): + "9108 - test vector operations with different dimensions" + data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] + + ora_df = conn.fetch_df_all( + """ + select to_vector('[1, 0, 3]', 3, float64) from dual + union all + select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual + union all + select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9109(conn, test_env): + "9109 - test vector operations with large arrays" + large_array = list(range(1, 1001)) + data = [(large_array,), (large_array,)] + str_value = ",".join(str(i) for i in large_array) + ora_df = conn.fetch_df_all( + f""" + select to_vector('[{str_value}]', {len(large_array)}, float64) + union all + select to_vector('[{str_value}]', {len(large_array)}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9110(skip_unless_sparse_vectors_supported, conn, test_env): + "9110 - test sparse vector operations with different dimensions" + with test_env.assert_raises_full_code("DPY-2065"): + conn.fetch_df_all( """ - select to_vector('[255, 255, 255]', 24, binary) + select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) union all - select to_vector('[255, 0, 255]', 24, binary) + select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) """ ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) -if __name__ == "__main__": - test_env.run_test_cases() +def test_9111(conn, test_env): + "9111 - test mixed vector types in a single dataframe" + data = [ + ([1.5, 2.5, 3.5], [1, 2, 3]), + ([4.25, 5.25, 6.25], [4, 5, 6]), + ] + ora_df = conn.fetch_df_all( + """ + select + to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, + to_vector('[1, 2, 3]', 3, int8) as int_vec + union all + select + to_vector('[4.25, 5.25, 6.25]', 3, float64), + to_vector('[4, 5, 6]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9112(conn, test_env): + "9112 - test vectors with very large dimensions" + large_dim = 800 + large_vector = [2.25] * large_dim + large_vector[12] = 1.5 + large_vector[-25] = 8.5 + data = [(large_vector,)] + vector_str = ",".join(str(i) for i in large_vector) + ora_df = conn.fetch_df_all( + f""" + select to_vector('[{vector_str}]', {large_dim}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +def test_9113(skip_unless_binary_vectors_supported, conn, test_env): + "9113 - test binary vector edge case - max value" + data = [ + ([255, 255, 255],), + ([255, 0, 255],), + ] + ora_df = conn.fetch_df_all( + """ + select to_vector('[255, 255, 255]', 24, binary) + union all + select to_vector('[255, 0, 255]', 24, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) diff --git a/tests/test_9200_dataframe_vector_async.py b/tests/test_9200_dataframe_vector_async.py index db012360..596585ec 100644 --- a/tests/test_9200_dataframe_vector_async.py +++ b/tests/test_9200_dataframe_vector_async.py @@ -28,342 +28,328 @@ import array -import numpy -import pandas import pyarrow +import pytest -import test_env +@pytest.fixture(autouse=True) +def module_checks( + anyio_backend, skip_unless_thin_mode, skip_unless_vectors_supported +): + pass -@test_env.skip_unless_thin_mode() -@test_env.skip_unless_vectors_supported() -class TestCase(test_env.BaseAsyncTestCase): - def __convert_df_value(self, df_val): +async def test_9200(async_conn, test_env): + "9200 - fetch float32 vector" + + # float32 is a special case while comparing dataframe values + # Converting Dataframe cell value of type numpy.ndarray[float32] + # using .tolist() converts each value to Python float. Python + # float uses 64-bit precision causing mismatches in assertEqual. + # As a workaround we use array.array('f', src).tolist() on the + # source data + data = [ + (array.array("f", [34.6, 77.8]).tolist(),), + (None,), + (array.array("f", [34.6, 77.8, 55.9]).tolist(),), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float32) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float32) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9201(async_conn, test_env): + "9201 - fetch float64 vector" + data = [ + ([34.6, 77.8],), + (None,), + ([34.6, 77.8, 55.9],), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select null + union all + select to_vector('[34.6, 77.8, 55.9]', 3, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9202(async_conn, test_env): + "9202 - fetch int8 vector" + data = [ + ([34, -77],), + (None,), + ([34, 77, 55],), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[34, -77]', 2, int8) + union all + select null + union all + select to_vector('[34, 77, 55]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9203( + skip_unless_binary_vectors_supported, async_conn, test_env +): + "9203 - fetch binary vector" + data = [ + ([3, 2, 3],), + (None,), + ([3, 2],), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[3, 2, 3]', 24, binary) + union all + select null + union all + select to_vector('[3, 2]', 16, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9204(async_conn, test_env): + "9204 - fetch duplicate float64 vectors" + data = [ + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ([34.6, 77.8],), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) + union all + select to_vector('[34.6, 77.8]', 2, float64) """ - This method converts a dataframe cell value to use with assertEqual() - For e.g. NaN and np.array cannot be compared directly. Values are - converted according to the following rules: - - NaN -> None - - np.array -> np.array.tolist() (Python list) + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9205( + skip_unless_sparse_vectors_supported, async_conn, test_env +): + "9205 - fetch float32 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 77.8]).tolist(), + }, + ), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": array.array("f", [34.6, 9.1]).tolist(), + }, + ), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) """ - if isinstance(df_val, numpy.ndarray): - return df_val.tolist() - elif pandas.isna(df_val): - return None - elif isinstance(df_val, dict): - return {k: self.__convert_df_value(v) for k, v in df_val.items()} - else: - return df_val - - def __get_data_from_df(self, df): + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9206( + skip_unless_sparse_vectors_supported, async_conn, test_env +): + "9206 - fetch float64 sparse vectors" + data = [ + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 77.8], + }, + ), + (None,), + ( + { + "num_dimensions": 8, + "indices": [0, 7], + "values": [34.6, 9.1], + }, + ), + ] + ora_df = await async_conn.fetch_df_all( """ - Returns data from the data frame in a normalized fashion suitable for - comparison. In particular, NaN values cannot be compared to one another - so they are converted to the value None for comparison purposes. + select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) + union all + select null + union all + select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) """ - return [ - tuple(self.__convert_df_value(v) for v in row) - for row in df.itertuples(index=False, name=None) - ] - - async def test_9200(self): - "9200 - fetch float32 vector" - - # float32 is a special case while comparing dataframe values - # Converting Dataframe cell value of type numpy.ndarray[float32] - # using .tolist() converts each value to Python float. Python - # float uses 64-bit precision causing mismatches in assertEqual. - # As a workaround we use array.array('f', src).tolist() on the - # source data - data = [ - (array.array("f", [34.6, 77.8]).tolist(),), - (None,), - (array.array("f", [34.6, 77.8, 55.9]).tolist(),), - ] - ora_df = await self.conn.fetch_df_all( + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9207(async_conn, test_env): + "9207 - DPY-3031 - Unsupported flexible vector formats" + with test_env.assert_raises_full_code("DPY-3031"): + await async_conn.fetch_df_all( """ - select to_vector('[34.6, 77.8]', 2, float32) - union all - select null + select to_vector('[44, 55, 89]', 3, int8) as flex_col union all select to_vector('[34.6, 77.8, 55.9]', 3, float32) """ ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9201(self): - "9201 - fetch float64 vector" - data = [ - ([34.6, 77.8],), - (None,), - ([34.6, 77.8, 55.9],), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[34.6, 77.8]', 2, float64) - union all - select null - union all - select to_vector('[34.6, 77.8, 55.9]', 3, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9202(self): - "9202 - fetch int8 vector" - data = [ - ([34, -77],), - (None,), - ([34, 77, 55],), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[34, -77]', 2, int8) - union all - select null - union all - select to_vector('[34, 77, 55]', 3, int8) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_binary_vectors_supported() - async def test_9203(self): - "9203 - fetch binary vector" - data = [ - ([3, 2, 3],), - (None,), - ([3, 2],), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[3, 2, 3]', 24, binary) - union all - select null - union all - select to_vector('[3, 2]', 16, binary) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9204(self): - "9204 - fetch duplicate float64 vectors" - data = [ - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ([34.6, 77.8],), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - union all - select to_vector('[34.6, 77.8]', 2, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - async def test_9205(self): - "9205 - fetch float32 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 77.8]).tolist(), - }, - ), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": array.array("f", [34.6, 9.1]).tolist(), - }, - ), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float32, sparse) - union all - select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float32, sparse) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - async def test_9206(self): - "9206 - fetch float64 sparse vectors" - data = [ - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 77.8], - }, - ), - (None,), - ( - { - "num_dimensions": 8, - "indices": [0, 7], - "values": [34.6, 9.1], - }, - ), - ] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[8, [0, 7], [34.6, 77.8]]', 8, float64, sparse) - union all - select null - union all - select to_vector('[8, [0, 7], [34.6, 9.1]]', 8, float64, sparse) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9207(self): - "9207 - DPY-3031 - Unsupported flexible vector formats" - with self.assertRaisesFullCode("DPY-3031"): - await self.conn.fetch_df_all( - """ - select to_vector('[44, 55, 89]', 3, int8) as flex_col - union all - select to_vector('[34.6, 77.8, 55.9]', 3, float32) - """ - ) - - async def test_9208(self): - "9208 - test vector operations with different dimensions" - data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] - ora_df = await self.conn.fetch_df_all( - """ - select to_vector('[1, 0, 3]', 3, float64) from dual - union all - select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual - union all - select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9209(self): - "9209 - test vector operations with large arrays" - large_array = list(range(1, 1001)) - data = [(large_array,), (large_array,)] - str_value = ",".join(str(i) for i in large_array) - ora_df = await self.conn.fetch_df_all( - f""" - select to_vector('[{str_value}]', {len(large_array)}, float64) - union all - select to_vector('[{str_value}]', {len(large_array)}, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_sparse_vectors_supported() - async def test_9210(self): - "9210 - test sparse vector operations with different dimensions" - with self.assertRaisesFullCode("DPY-2065"): - await self.conn.fetch_df_all( - """ - select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) - union all - select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) - """ - ) - - async def test_9211(self): - "9211 - test mixed vector types in a single dataframe" - data = [ - ([1.5, 2.5, 3.5], [1, 2, 3]), - ([4.25, 5.25, 6.25], [4, 5, 6]), - ] - ora_df = await self.conn.fetch_df_all( - """ - select - to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, - to_vector('[1, 2, 3]', 3, int8) as int_vec - union all - select - to_vector('[4.25, 5.25, 6.25]', 3, float64), - to_vector('[4, 5, 6]', 3, int8) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - async def test_9212(self): - "9212 - test vectors with very large dimensions" - large_dim = 800 - large_vector = [2.25] * large_dim - large_vector[12] = 1.5 - large_vector[-25] = 8.5 - data = [(large_vector,)] - vector_str = ",".join(str(i) for i in large_vector) - ora_df = await self.conn.fetch_df_all( - f""" - select to_vector('[{vector_str}]', {large_dim}, float64) - """ - ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) - - @test_env.skip_unless_binary_vectors_supported() - async def test_9213(self): - "9213 - test binary vector edge case - max value" - data = [ - ([255, 255, 255],), - ([255, 0, 255],), - ] - ora_df = await self.conn.fetch_df_all( + + +async def test_9208(async_conn, test_env): + "9208 - test vector operations with different dimensions" + data = [([1, 0, 3],), ([0, 5, -12.25, 0],), ([5.5, -6.25, 7, 8, 9],)] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[1, 0, 3]', 3, float64) from dual + union all + select to_vector('[0, 5, -12.25, 0]', 4, float64) from dual + union all + select to_vector('[5.5, -6.25, 7, 8, 9]', 5, float64) from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9209(async_conn, test_env): + "9209 - test vector operations with large arrays" + large_array = list(range(1, 1001)) + data = [(large_array,), (large_array,)] + str_value = ",".join(str(i) for i in large_array) + ora_df = await async_conn.fetch_df_all( + f""" + select to_vector('[{str_value}]', {len(large_array)}, float64) + union all + select to_vector('[{str_value}]', {len(large_array)}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9210( + skip_unless_sparse_vectors_supported, async_conn, test_env +): + "9210 - test sparse vector operations with different dimensions" + with test_env.assert_raises_full_code("DPY-2065"): + await async_conn.fetch_df_all( """ - select to_vector('[255, 255, 255]', 24, binary) + select to_vector('[10, [1, 3], [2, 4]]', 10, float64, sparse) union all - select to_vector('[255, 0, 255]', 24, binary) + select to_vector('[5, [1, 3], [2, 4]]', 5, float64, sparse) """ ) - fetched_df = pyarrow.table(ora_df).to_pandas() - self.assertEqual(data, self.__get_data_from_df(fetched_df)) -if __name__ == "__main__": - test_env.run_test_cases() +async def test_9211(async_conn, test_env): + "9211 - test mixed vector types in a single dataframe" + data = [ + ([1.5, 2.5, 3.5], [1, 2, 3]), + ([4.25, 5.25, 6.25], [4, 5, 6]), + ] + ora_df = await async_conn.fetch_df_all( + """ + select + to_vector('[1.5, 2.5, 3.5]', 3, float64) as float_vec, + to_vector('[1, 2, 3]', 3, int8) as int_vec + union all + select + to_vector('[4.25, 5.25, 6.25]', 3, float64), + to_vector('[4, 5, 6]', 3, int8) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9212(async_conn, test_env): + "9212 - test vectors with very large dimensions" + large_dim = 800 + large_vector = [2.25] * large_dim + large_vector[12] = 1.5 + large_vector[-25] = 8.5 + data = [(large_vector,)] + vector_str = ",".join(str(i) for i in large_vector) + ora_df = await async_conn.fetch_df_all( + f""" + select to_vector('[{vector_str}]', {large_dim}, float64) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) + + +async def test_9213( + skip_unless_binary_vectors_supported, async_conn, test_env +): + "9213 - test binary vector edge case - max value" + data = [ + ([255, 255, 255],), + ([255, 0, 255],), + ] + ora_df = await async_conn.fetch_df_all( + """ + select to_vector('[255, 255, 255]', 24, binary) + union all + select to_vector('[255, 0, 255]', 24, binary) + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert data == test_env.get_data_from_df(fetched_df) diff --git a/tests/test_env.py b/tests/test_env.py deleted file mode 100644 index 51193f93..00000000 --- a/tests/test_env.py +++ /dev/null @@ -1,896 +0,0 @@ -# ----------------------------------------------------------------------------- -# Copyright (c) 2020, 2025, Oracle and/or its affiliates. -# -# This software is dual-licensed to you under the Universal Permissive License -# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License -# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose -# either license. -# -# If you elect to accept the software under the Apache License, Version 2.0, -# the following applies: -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# Sets the environment used by the python-oracledb test suite. Production -# applications should consider using External Authentication to avoid hard -# coded credentials. -# -# You can set values in environment variables to bypass having the test suite -# request the information it requires. -# -# PYO_TEST_MAIN_USER: user used for most test cases -# PYO_TEST_MAIN_PASSWORD: password of user used for most test cases -# PYO_TEST_PROXY_USER: user for testing proxying -# PYO_TEST_PROXY_PASSWORD: password of user for testing proxying -# PYO_TEST_CONNECT_STRING: connect string for test suite -# PYO_TEST_ADMIN_USER: administrative user for test suite -# PYO_TEST_ADMIN_PASSWORD: administrative password for test suite -# PYO_TEST_WALLET_LOCATION: location of wallet file (thin mode, mTLS) -# PYO_TEST_WALLET_PASSWORD: password for wallet file (thin mode, mTLS) -# PYO_TEST_DRIVER_MODE: python-oracledb mode (thick or thin) to use -# PYO_TEST_EXTERNAL_USER: user for testing external authentication -# PYO_TEST_EDITION_NAME: name of edition for editioning tests -# PYO_TEST_PLUGINS: list of plugins to import before running tests -# PYO_TEST_ORACLE_CLIENT_PATH: Oracle Client or Instant Client library dir -# -# PYO_TEST_CONNECT_STRING can be set to an Easy Connect string, or a -# Net Service Name from a tnsnames.ora file or external naming service, -# or it can be the name of a local Oracle database instance. -# -# On Windows set PYO_TEST_ORACLE_CLIENT_PATH if Oracle libraries are not in -# PATH. On macOS set the variable to the Instant Client directory. On Linux do -# not set the variable; instead set LD_LIBRARY_PATH or configure ldconfig -# before running Python. -# -# If oracledb is using Instant Client, then an Easy Connect string is generally -# appropriate. The syntax is: -# -# [//]host_name[:port][/service_name][:server_type][/instance_name] -# -# Commonly just the host_name and service_name are needed -# e.g. "localhost/orclpdb1" or "localhost/XEPDB1" -# -# If using a tnsnames.ora file, the file can be in a default -# location such as $ORACLE_HOME/network/admin/tnsnames.ora or -# /etc/tnsnames.ora. Alternatively set the TNS_ADMIN environment -# variable and put the file in $TNS_ADMIN/tnsnames.ora. -# -# The administrative user for cloud databases is ADMIN and the administrative -# user for on premises databases is SYSTEM. -# ----------------------------------------------------------------------------- - -import getpass -import importlib -import os -import platform -import secrets -import sys -import string -import unittest - -import oracledb - -# default values -DEFAULT_MAIN_USER = "pythontest" -DEFAULT_PROXY_USER = "pythontestproxy" -DEFAULT_CONNECT_STRING = "localhost/orclpdb1" -DEFAULT_EDITION_NAME = "pythonedition" - -# dictionary containing all parameters; these are acquired as needed by the -# methods below (which should be used instead of consulting this dictionary -# directly) and then stored so that a value is not requested more than once -PARAMETERS = {} - - -def _initialize(): - """ - Performs initialization of the test environment. This ensures the desired - mode is set and imports any required plugins. - """ - if PARAMETERS.get("INITIALIZED"): - return - if run_in_thick_mode() and oracledb.is_thin_mode(): - oracledb.init_oracle_client(lib_dir=get_oracle_client()) - oracledb.defaults.thick_mode_dsn_passthrough = False - plugin_names = os.environ.get("PYO_TEST_PLUGINS") - if plugin_names is not None: - for name in plugin_names.split(","): - module_name = f"oracledb.plugins.{name}" - print("importing module", module_name) - importlib.import_module(module_name) - PARAMETERS["INITIALIZED"] = True - - -def get_value(name, label, default_value=None, password=False): - try: - return PARAMETERS[name] - except KeyError: - pass - env_name = "PYO_TEST_" + name - value = os.environ.get(env_name) - if value is None: - if default_value is not None: - label += " [%s]" % default_value - label += ": " - if password: - value = getpass.getpass(label) - else: - value = input(label).strip() - if not value: - value = default_value - PARAMETERS[name] = value - return value - - -def get_admin_connection(use_async=False): - _initialize() - admin_user = get_value("ADMIN_USER", "Administrative user", "admin") - admin_password = get_value( - "ADMIN_PASSWORD", f"Password for {admin_user}", password=True - ) - params = get_connect_params() - if admin_user and admin_user.upper() == "SYS": - params = params.copy() - params.set(mode=oracledb.AUTH_MODE_SYSDBA) - method = oracledb.connect_async if use_async else oracledb.connect - return method( - dsn=get_connect_string(), - params=params, - user=admin_user, - password=admin_password, - ) - - -async def get_admin_connection_async(use_async=False): - return await get_admin_connection(use_async=True) - - -def get_charset(): - value = PARAMETERS.get("CHARSET") - if value is None: - with get_connection() as conn: - with conn.cursor() as cursor: - cursor.execute( - """ - select value - from nls_database_parameters - where parameter = 'NLS_CHARACTERSET' - """ - ) - (value,) = cursor.fetchone() - PARAMETERS["CHARSET"] = value - return value - - -async def get_charset_async(): - value = PARAMETERS.get("CHARSET") - if value is None: - async with get_connection_async() as conn: - with conn.cursor() as cursor: - await cursor.execute( - """ - select value - from nls_database_parameters - where parameter = 'NLS_CHARACTERSET' - """ - ) - (value,) = await cursor.fetchone() - PARAMETERS["CHARSET"] = value - return value - - -def get_charset_ratios(): - value = PARAMETERS.get("CS_RATIO") - if value is None: - connection = get_connection() - cursor = connection.cursor() - cursor.execute( - """ - select - cast('X' as varchar2(1)), - cast('Y' as nvarchar2(1)) - from dual - """ - ) - varchar_column_info, nvarchar_column_info = cursor.description - value = (varchar_column_info[3], nvarchar_column_info[3]) - PARAMETERS["CS_RATIO"] = value - return value - - -async def get_charset_ratios_async(): - value = PARAMETERS.get("CS_RATIO") - if value is None: - connection = await get_connection_async() - cursor = connection.cursor() - await cursor.execute( - """ - select - cast('X' as varchar2(1)), - cast('Y' as nvarchar2(1)) - from dual - """ - ) - varchar_column_info, nvarchar_column_info = cursor.description - value = (varchar_column_info[3], nvarchar_column_info[3]) - PARAMETERS["CS_RATIO"] = value - return value - - -def get_client_version(): - name = "CLIENT_VERSION" - value = PARAMETERS.get(name) - if value is None: - _initialize() - value = oracledb.clientversion()[:2] - PARAMETERS[name] = value - return value - - -def get_oracle_client(): - if platform.system() == "Darwin" or platform.system() == "Windows": - return get_value("ORACLE_CLIENT_PATH", "Oracle Instant Client Path") - - -def get_connect_params(): - wallet_location = get_wallet_location() - return oracledb.ConnectParams( - user=get_main_user(), - password=get_main_password(), - config_dir=wallet_location, - wallet_location=wallet_location, - wallet_password=get_wallet_password(), - disable_oob=True, - ) - - -def get_connection(dsn=None, use_async=False, **kwargs): - _initialize() - if dsn is None: - dsn = get_connect_string() - method = oracledb.connect_async if use_async else oracledb.connect - return method(dsn=dsn, params=get_connect_params(), **kwargs) - - -def get_connection_async(dsn=None, **kwargs): - return get_connection(dsn, use_async=True, **kwargs) - - -def get_connect_string(): - return get_value( - "CONNECT_STRING", "Connect String", DEFAULT_CONNECT_STRING - ) - - -def get_edition_name(): - return get_value("EDITION_NAME", "Edition Name", DEFAULT_EDITION_NAME) - - -def get_is_drcp(): - value = PARAMETERS.get("IS_DRCP") - if value is None: - params = oracledb.ConnectParams() - params.parse_connect_string(get_connect_string()) - server_type = params.server_type - value = ( - server_type == "pooled" - or isinstance(server_type, list) - and "pooled" in server_type - ) - PARAMETERS["IS_DRCP"] = value - return value - - -def get_is_implicit_pooling(): - value = PARAMETERS.get("IS_IMPLICIT_POOLING") - if value is None: - if not get_is_drcp(): - value = False - else: - params = oracledb.ConnectParams() - params.parse_connect_string(get_connect_string()) - pool_boundary = params.pool_boundary - value = ( - pool_boundary is not None - or isinstance(pool_boundary, list) - and [s for s in pool_boundary if s] - ) - PARAMETERS["IS_IMPLICIT_POOLING"] = value - return value - - -def get_main_password(): - return get_value( - "MAIN_PASSWORD", f"Password for {get_main_user()}", password=True - ) - - -def get_main_user(): - return get_value("MAIN_USER", "Main User Name", DEFAULT_MAIN_USER) - - -def get_pool(use_async=False, **kwargs): - _initialize() - method = oracledb.create_pool_async if use_async else oracledb.create_pool - return method(dsn=get_connect_string(), params=get_pool_params(), **kwargs) - - -def get_pool_async(**kwargs): - return get_pool(use_async=True, **kwargs) - - -def get_pool_params(): - wallet_location = get_wallet_location() - return oracledb.PoolParams( - user=get_main_user(), - password=get_main_password(), - config_dir=wallet_location, - wallet_location=wallet_location, - wallet_password=get_wallet_password(), - ) - - -def get_proxy_password(): - return get_value( - "PROXY_PASSWORD", f"Password for {get_proxy_user()}", password=True - ) - - -def get_proxy_user(): - return get_value("PROXY_USER", "Proxy User Name", DEFAULT_PROXY_USER) - - -def get_sleep_proc_name(): - if not has_server_version(18): - return "dbms_lock.sleep" - return "dbms_session.sleep" - - -def get_server_version(): - name = "SERVER_VERSION" - value = PARAMETERS.get(name) - if value is None: - conn = get_connection() - value = tuple(int(s) for s in conn.version.split("."))[:2] - PARAMETERS[name] = value - return value - - -async def get_server_version_async(): - name = "SERVER_VERSION" - value = PARAMETERS.get(name) - if value is None: - async with await get_connection_async() as conn: - value = tuple(int(s) for s in conn.version.split("."))[:2] - PARAMETERS[name] = value - return value - - -def get_wallet_location(): - if not run_in_thick_mode(): - return get_value("WALLET_LOCATION", "Wallet Location") - - -def get_wallet_password(): - if not run_in_thick_mode(): - return get_value("WALLET_PASSWORD", "Wallet Password", password=True) - - -def get_external_user(): - if run_in_thick_mode(): - return get_value("EXTERNAL_USER", "External User") - - -def get_random_string(length=10): - return "".join(secrets.choice(string.ascii_letters) for i in range(length)) - - -def has_client_version(major_version, minor_version=0): - if not run_in_thick_mode(): - return True - return get_client_version() >= (major_version, minor_version) - - -def has_server_version(major_version, minor_version=0): - return get_server_version() >= (major_version, minor_version) - - -async def has_server_version_async(major_version, minor_version=0): - await get_server_version_async() - return has_server_version(major_version, minor_version) - - -def is_on_oracle_cloud(connection): - if not has_server_version(18): - return False - cursor = connection.cursor() - cursor.execute( - """ - select sys_context('userenv', 'cloud_service') - from dual - """ - ) - (service_name,) = cursor.fetchone() - return service_name is not None - - -async def is_on_oracle_cloud_async(connection): - if not await has_server_version_async(18): - return False - cursor = connection.cursor() - await cursor.execute( - """ - select sys_context('userenv', 'cloud_service') - from dual - """ - ) - (service_name,) = await cursor.fetchone() - return service_name is not None - - -def run_in_thick_mode(): - driver_mode = get_value("DRIVER_MODE", "Driver mode (thin|thick)", "thin") - return driver_mode != "thin" - - -def run_sql_script(conn, script_name, **kwargs): - statement_parts = [] - cursor = conn.cursor() - replace_values = [("&" + k + ".", v) for k, v in kwargs.items()] + [ - ("&" + k, v) for k, v in kwargs.items() - ] - script_dir = os.path.dirname(os.path.abspath(sys.argv[0])) - file_name = os.path.join(script_dir, "sql", script_name + ".sql") - for line in open(file_name): - if line.strip() == "/": - statement = "".join(statement_parts).strip() - if statement: - for search_value, replace_value in replace_values: - statement = statement.replace(search_value, replace_value) - try: - cursor.execute(statement) - except: - print("Failed to execute SQL:", statement) - raise - statement_parts = [] - else: - statement_parts.append(line) - cursor.execute( - """ - select name, type, line, position, text - from dba_errors - where owner = upper(:owner) - order by name, type, line, position - """, - owner=get_main_user(), - ) - prev_name = prev_obj_type = None - for name, obj_type, line_num, position, text in cursor: - if name != prev_name or obj_type != prev_obj_type: - print("%s (%s)" % (name, obj_type)) - prev_name = name - prev_obj_type = obj_type - print(" %s/%s %s" % (line_num, position, text)) - - -def run_test_cases(): - run_in_thick_mode() - unittest.main(testRunner=unittest.TextTestRunner(verbosity=2)) - - -def skip_soda_tests(): - if not run_in_thick_mode(): - return True - if not has_client_version(18, 3): - return True - if not has_server_version(18): - return True - if has_server_version(20, 1) and not has_client_version(20, 1): - return True - if has_client_version(23, 3) and platform.system() == "Darwin": - return True - return False - - -def skip_if_drcp(): - return unittest.skipIf(get_is_drcp(), "not supported with DRCP") - - -def skip_if_implicit_pooling(): - return unittest.skipIf( - get_is_implicit_pooling(), "not supported with implicit pooling" - ) - - -def skip_unless_binary_vectors_supported(): - supported = has_client_version(23, 5) and has_server_version(23, 5) - return unittest.skipUnless(supported, "no binary vector support") - - -def skip_unless_call_timeout_supported(): - supported = has_client_version(18) - return unittest.skipUnless(supported, "no call timeout support") - - -def skip_unless_domains_supported(): - supported = has_server_version(23) - return unittest.skipUnless(supported, "no domain support") - - -def skip_unless_json_supported(): - supported = has_client_version(12, 2) and has_server_version(12, 2) - return unittest.skipUnless(supported, "no JSON support") - - -def skip_unless_long_passwords_supported(): - supported = has_server_version(23) - return unittest.skipUnless(supported, "no long password support") - - -def skip_unless_native_boolean_supported(): - supported = has_client_version(23) and has_server_version(23) - return unittest.skipUnless(supported, "no native boolean support") - - -def skip_unless_native_json_extensions_supported(): - supported = has_client_version(23) and has_server_version(23) - return unittest.skipUnless(supported, "no native JSON extensions support") - - -def skip_unless_native_json_supported(): - supported = has_client_version(21) and has_server_version(21) - return unittest.skipUnless(supported, "no native JSON support") - - -def skip_unless_plsql_boolean_supported(): - supported = has_client_version(12, 1) and has_server_version(12, 1) - return unittest.skipUnless(supported, "no PL/SQL boolean support") - - -def skip_unless_pool_timed_wait_supported(): - supported = has_client_version(12, 2) and has_server_version(12, 2) - return unittest.skipUnless(supported, "no pool timed wait support") - - -def skip_unless_sessionless_transactions_supported(): - return unittest.skipUnless( - has_client_version(23, 6) and has_server_version(23, 6), - "no sessionless transactions support", - ) - - -def skip_unless_sparse_vectors_supported(): - supported = has_client_version(23, 7) and has_server_version(23, 7) - return unittest.skipUnless(supported, "no sparse vector support") - - -def skip_unless_thick_mode(): - return unittest.skipUnless(run_in_thick_mode(), "requires thick mode") - - -def skip_unless_thin_mode(): - return unittest.skipIf(run_in_thick_mode(), "requires thin mode") - - -def skip_unless_vectors_supported(): - supported = has_client_version(23, 4) and has_server_version(23, 4) - return unittest.skipUnless(supported, "no vector support") - - -class DefaultsContextManager: - def __init__(self, attribute, desired_value): - self.attribute = attribute - self.desired_value = desired_value - - def __enter__(self): - self.original_value = getattr(oracledb.defaults, self.attribute) - setattr(oracledb.defaults, self.attribute, self.desired_value) - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - setattr(oracledb.defaults, self.attribute, self.original_value) - - -class SystemStatInfo: - get_sid_sql = "select sys_context('userenv', 'sid') from dual" - get_stat_sql = """ - select ss.value - from v$sesstat ss, v$statname sn - where ss.sid = :sid - and ss.statistic# = sn.statistic# - and sn.name = :stat_name - """ - stat_name = None - - def _initialize(self, connection): - self.prev_value = 0 - self.admin_conn = get_admin_connection() - with connection.cursor() as cursor: - cursor.execute(self.get_sid_sql) - (self.sid,) = cursor.fetchone() - self.get_value() - - async def _initialize_async(self, connection): - self.prev_value = 0 - self.admin_conn = await get_admin_connection_async() - with connection.cursor() as cursor: - await cursor.execute(self.get_sid_sql) - (self.sid,) = await cursor.fetchone() - await self.get_value_async() - - def get_value(self): - with self.admin_conn.cursor() as cursor: - cursor.execute( - self.get_stat_sql, sid=self.sid, stat_name=self.stat_name - ) - (current_value,) = cursor.fetchone() - diff_value = current_value - self.prev_value - self.prev_value = current_value - return diff_value - - async def get_value_async(self): - with self.admin_conn.cursor() as cursor: - await cursor.execute( - self.get_stat_sql, sid=self.sid, stat_name=self.stat_name - ) - (current_value,) = await cursor.fetchone() - diff_value = current_value - self.prev_value - self.prev_value = current_value - return diff_value - - -class RoundTripInfo(SystemStatInfo): - stat_name = "SQL*Net roundtrips to/from client" - - -class ParseCountInfo(SystemStatInfo): - stat_name = "parse count (total)" - - -class FullCodeErrorContextManager: - - def __init__(self, full_codes): - self.full_codes = full_codes - if len(full_codes) == 1: - self.message_fragment = f'Error "{full_codes[0]}"' - else: - message_fragment = ", ".join(f'"{s}"' for s in full_codes[:-1]) - message_fragment += f' or "{full_codes[-1]}"' - self.message_fragment = f"One of the errors {message_fragment}" - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, tb): - if exc_type is None: - raise AssertionError(f"{self.message_fragment} was not raised.") - if not issubclass(exc_type, oracledb.Error): - return False - if issubclass(exc_type, oracledb.Error): - self.error_obj = exc_value.args[0] - if self.error_obj.full_code not in self.full_codes: - message = ( - f"{self.message_fragment} should have been raised but " - f'"{self.error_obj.full_code}" was raised instead.' - ) - raise AssertionError(message) - return True - - -class BaseTestCase(unittest.TestCase): - requires_connection = True - - def assertParseCount(self, n): - self.assertEqual(self.parse_count_info.get_value(), n) - - def assertRaisesFullCode(self, *full_codes): - return FullCodeErrorContextManager(full_codes) - - def assertRoundTrips(self, n): - self.assertEqual(self.round_trip_info.get_value(), n) - - def get_and_clear_queue( - self, - queue_name, - payload_type=None, - message="not supported with this client/server combination", - ): - if payload_type == "JSON": - if not has_client_version(21) or not has_server_version(21): - self.skipTest(message) - elif isinstance(payload_type, str): - payload_type = self.conn.gettype(payload_type) - queue = self.conn.queue(queue_name, payload_type) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - while queue.deqone(): - pass - return self.conn.queue(queue_name, payload_type) - - def get_db_object_as_plain_object(self, obj): - if obj.type.iscollection: - element_values = [] - for value in obj.aslist(): - if isinstance(value, oracledb.DbObject): - value = self.get_db_object_as_plain_object(value) - elif isinstance(value, oracledb.LOB): - value = value.read() - element_values.append(value) - return element_values - attr_values = [] - for attribute in obj.type.attributes: - value = getattr(obj, attribute.name) - if isinstance(value, oracledb.DbObject): - value = self.get_db_object_as_plain_object(value) - elif isinstance(value, oracledb.LOB): - value = value.read() - attr_values.append(value) - return tuple(attr_values) - - def get_sid_serial(self, conn=None): - """ - Returns the sid and serial number of the connection as a 2-tuple. - """ - if conn is None: - conn = self.conn - if not run_in_thick_mode(): - return (conn.session_id, conn.serial_num) - else: - with conn.cursor() as cursor: - cursor.execute( - """ - select - dbms_debug_jdwp.current_session_id, - dbms_debug_jdwp.current_session_serial - from dual - """ - ) - return cursor.fetchone() - - def get_soda_database( - self, - minclient=None, - minserver=None, - message="not supported with this client/server combination", - drop_collections=True, - ): - if minclient is not None and not has_client_version(*minclient): - self.skipTest(message) - if minserver is not None and not has_server_version(*minserver): - self.skipTest(message) - soda_db = self.conn.getSodaDatabase() - if drop_collections: - for name in soda_db.getCollectionNames(): - soda_db.openCollection(name).drop() - return soda_db - - def is_on_oracle_cloud(self, connection=None): - if connection is None: - connection = self.conn - return is_on_oracle_cloud(connection) - - def setUp(self): - if self.requires_connection: - self.conn = get_connection() - self.cursor = self.conn.cursor() - self.cursor.execute("alter session set time_zone = '+00:00'") - - def setup_parse_count_checker(self, conn=None): - if get_is_implicit_pooling(): - self.skipTest("sessions can change with implicit pooling") - self.parse_count_info = ParseCountInfo() - self.parse_count_info._initialize(conn or self.conn) - - def setup_round_trip_checker(self, conn=None): - if get_is_implicit_pooling(): - self.skipTest("sessions can change with implicit pooling") - self.round_trip_info = RoundTripInfo() - self.round_trip_info._initialize(conn or self.conn) - - def tearDown(self): - if self.requires_connection: - self.conn.close() - del self.cursor - del self.conn - - -class BaseAsyncTestCase(unittest.IsolatedAsyncioTestCase): - requires_connection = True - - async def assertParseCount(self, n): - self.assertEqual(await self.parse_count_info.get_value_async(), n) - - def assertRaisesFullCode(self, *full_codes): - return FullCodeErrorContextManager(full_codes) - - async def assertRoundTrips(self, n): - self.assertEqual(await self.round_trip_info.get_value_async(), n) - - async def asyncSetUp(self): - if self.requires_connection: - self.conn = await get_connection_async() - self.cursor = self.conn.cursor() - await self.cursor.execute("alter session set time_zone = '+00:00'") - - async def asyncTearDown(self): - if self.requires_connection: - await self.conn.close() - del self.cursor - del self.conn - - async def get_and_clear_queue( - self, - queue_name, - payload_type=None, - message="not supported with this client/server combination", - ): - if payload_type == "JSON": - if not has_server_version(21): - self.skipTest(message) - elif isinstance(payload_type, str): - payload_type = await self.conn.gettype(payload_type) - queue = self.conn.queue(queue_name, payload_type) - queue.deqoptions.wait = oracledb.DEQ_NO_WAIT - queue.deqoptions.deliverymode = oracledb.MSG_PERSISTENT_OR_BUFFERED - queue.deqoptions.visibility = oracledb.DEQ_IMMEDIATE - while await queue.deqone(): - pass - return self.conn.queue(queue_name, payload_type) - - async def get_db_object_as_plain_object(self, obj): - if obj.type.iscollection: - element_values = [] - for value in obj.aslist(): - if isinstance(value, oracledb.DbObject): - value = await self.get_db_object_as_plain_object(value) - elif isinstance(value, oracledb.AsyncLOB): - value = await value.read() - element_values.append(value) - return element_values - attr_values = [] - for attribute in obj.type.attributes: - value = getattr(obj, attribute.name) - if isinstance(value, oracledb.DbObject): - value = await self.get_db_object_as_plain_object(value) - elif isinstance(value, oracledb.AsyncLOB): - value = await value.read() - attr_values.append(value) - return tuple(attr_values) - - async def get_sid_serial(self, conn=None): - """ - Returns the sid and serial number of the connection as a 2-tuple. - """ - if conn is None: - conn = self.conn - return (conn.session_id, conn.serial_num) - - async def is_on_oracle_cloud(self, connection=None): - if connection is None: - connection = self.conn - return await is_on_oracle_cloud_async(connection) - - async def setup_parse_count_checker(self, conn=None): - if get_is_implicit_pooling(): - self.skipTest("sessions can change with implicit pooling") - self.parse_count_info = ParseCountInfo() - await self.parse_count_info._initialize_async(conn or self.conn) - - async def setup_round_trip_checker(self, conn=None): - if get_is_implicit_pooling(): - self.skipTest("sessions can change with implicit pooling") - self.round_trip_info = RoundTripInfo() - await self.round_trip_info._initialize_async(conn or self.conn) diff --git a/tox.ini b/tox.ini index 363a61d6..cda48460 100644 --- a/tox.ini +++ b/tox.ini @@ -2,7 +2,6 @@ envlist = py{39,310,311,312,313,314}-{thin,thick} [testenv] -commands = {envpython} -m unittest discover -v -s tests extras = test passenv = PYO_TEST_MAIN_USER @@ -21,9 +20,7 @@ passenv = ORACLE_HOME [testenv:py{39,310,311,312,313,314}-thick] -setenv = - PYO_TEST_DRIVER_MODE=thick +commands = {envpython} -m pytest --use-thick-mode [testenv:py{39,310,311,312,313,314}-thin] -setenv = - PYO_TEST_DRIVER_MODE=thin +commands = {envpython} -m pytest From 9f59b228df7c5745ba392ca42f829063c6e46d3c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:43:16 -0600 Subject: [PATCH 207/239] Added a batch_size parameter to executemany() to let it operate on data in batches. --- doc/src/api_manual/async_cursor.rst | 4 + doc/src/api_manual/cursor.rst | 4 + doc/src/release_notes.rst | 4 + doc/src/user_guide/batch_statement.rst | 43 ++- src/oracledb/base_impl.pxd | 40 ++- src/oracledb/base_impl.pyx | 3 + src/oracledb/cursor.py | 45 ++- src/oracledb/impl/arrow/dataframe.pyx | 19 +- src/oracledb/impl/base/batch_load_manager.pyx | 284 ++++++++++++++++++ src/oracledb/impl/base/bind_var.pyx | 18 -- src/oracledb/impl/base/cursor.pyx | 98 ++---- src/oracledb/impl/base/var.pyx | 3 +- src/oracledb/impl/thick/cursor.pyx | 3 +- src/oracledb/impl/thick/var.pyx | 17 +- src/oracledb/impl/thin/connection.pyx | 13 +- src/oracledb/impl/thin/cursor.pyx | 10 +- tests/test_4000_cursor_executemany.py | 31 ++ tests/test_6100_cursor_executemany_async.py | 33 ++ tests/test_8700_sessionless_transaction.py | 18 ++ ...test_8800_sessionless_transaction_async.py | 18 ++ tests/test_8900_dataframe_ingestion.py | 64 ++++ tests/test_9000_dataframe_ingestion_async.py | 72 +++++ 22 files changed, 697 insertions(+), 147 deletions(-) create mode 100644 src/oracledb/impl/base/batch_load_manager.pyx diff --git a/doc/src/api_manual/async_cursor.rst b/doc/src/api_manual/async_cursor.rst index 7f53ab79..88bb2415 100644 --- a/doc/src/api_manual/async_cursor.rst +++ b/doc/src/api_manual/async_cursor.rst @@ -77,6 +77,10 @@ AsyncCursor Methods .. automethod:: AsyncCursor.executemany + .. versionchanged:: 3.4.0 + + The ``batch_size`` parameter was added. + .. versionchanged:: 3.3.0 Added support for passing data frames in the ``parameters`` parameter. diff --git a/doc/src/api_manual/cursor.rst b/doc/src/api_manual/cursor.rst index 68f64bd0..ac8669a2 100644 --- a/doc/src/api_manual/cursor.rst +++ b/doc/src/api_manual/cursor.rst @@ -83,6 +83,10 @@ Cursor Methods .. automethod:: Cursor.executemany + .. versionchanged:: 3.4.0 + + The ``batch_size`` parameter was added. + .. versionchanged:: 3.3.0 Added support for passing data frames in the ``parameters`` parameter. diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 3abde50f..263f90b0 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -47,6 +47,10 @@ Common Changes #) Added support for types ``date32`` and ``date64`` when ingesting data frames supporting the Arrow PyCapsule interface as requested (`issue 535 `__). +#) Added a ``batch_size`` parameter to :meth:`Cursor.executemany()` and + :meth:`AsyncCursor.executemany()` to let these methods operate on data in + batches. +#) Data frames with multiple chunks are now supported. #) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to the methods used for fetching rows or data frames from the database. Note that for the creation of pipeline operations, if these parameters are not diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 76d5038e..82dab353 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -70,12 +70,7 @@ Each tuple value maps to one of the bind variable placeholders. This code requires only one :ref:`round-trip ` from the client to the database instead of the five round-trips that would be required for -repeated calls to :meth:`~Cursor.execute()`. For very large data sets, there -may be an external buffer or network limits to how many rows can be processed, -so repeated calls to ``executemany()`` may be required. The limits are based -on both the number of rows being processed as well as the "size" of each row -that is being processed. Repeated calls to :meth:`~Cursor.executemany()` are -still better than repeated calls to :meth:`~Cursor.execute()`. +repeated calls to :meth:`~Cursor.execute()`. To insert a single column, make sure the bind variables are correctly created as tuples, for example: @@ -173,6 +168,38 @@ With named bind variables, use named parameters when calling values (:pid, :pdesc)""", data) +Batching of Large Datasets +-------------------------- + +For very large data sets, there may be a buffer or network limit on how many +rows can be processed. The limit is based on both the number of records as +well as the size of each record that is being processed. In other cases, it may +be faster to process smaller sets of records. + +To reduce the data sizes involved, you can either make repeated calls to +:meth:`~Cursor.executemany()` as shown later in the CSV examples, or you can +use the ``batch_size`` parameter to optimize transfer across the network to the +database. For example: + +.. code-block:: python + + data = [ + (1, "Parent 1"), + (2, "Parent 2"), + . . . + (9_999_999, "Parent 9,999,999"), + (10_000_000, "Parent 10,000,000"), + + ] + + cursor.executemany("insert into ParentTable values (:1, :2)", data, batch_size=200_000) + +This will send the data to the database in batches of 200,000 records until all +10,000,000 records have been inserted. + +If :attr:`Connection.autocommit` is ``True``, then a commit will take place per +batch of records processed. + .. _batchplsql: Batch Execution of PL/SQL @@ -446,8 +473,8 @@ And the schema: create table test (id number, name varchar2(25)); -Data loading can be done in batches of records since the number of records may -prevent all data being inserted at once: +Data loading can be done in batches of records since Python memory limitations +may prevent all the records being held in memory at once: .. code-block:: python diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 78b9012d..a839d77f 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -43,7 +43,8 @@ from .arrow_impl cimport ( ArrowTimeUnit, ArrowType, ArrowArrayImpl, - ArrowSchemaImpl + ArrowSchemaImpl, + DataFrameImpl, ) cdef enum: @@ -263,6 +264,30 @@ cdef class DefaultsImpl: cdef DefaultsImpl C_DEFAULTS +cdef class BatchLoadManager: + cdef: + readonly uint32_t num_rows + readonly uint64_t message_offset + uint64_t offset + BaseCursorImpl cursor_impl + uint32_t batch_size + uint32_t batch_num + object type_handler + object cursor + object conn + + cdef int _calculate_num_rows_in_batch(self, uint64_t total_rows) except -1 + cdef int _next_batch(self) except -1 + cdef int _setup_cursor(self) except -1 + @staticmethod + cdef BatchLoadManager create_for_executemany( + object cursor, + BaseCursorImpl cursor_impl, + object parameters, + uint32_t batch_size, + ) + + cdef class Buffer: cdef: ssize_t _max_size, _size, _pos @@ -689,7 +714,6 @@ cdef class BaseCursorImpl: object params, uint32_t num_rows, uint32_t row_num, bint defer_type_assignment) except -1 - cdef int _check_binds(self, uint32_t num_execs) except -1 cdef int _close(self, bint in_del) except -1 cdef BaseVarImpl _create_fetch_var(self, object conn, object cursor, object type_handler, bint @@ -706,10 +730,9 @@ cdef class BaseCursorImpl: cdef int _perform_binds(self, object conn, uint32_t num_execs) except -1 cdef int _prepare(self, str statement, str tag, bint cache_statement) except -1 - cdef int _reset_bind_vars(self, uint32_t num_rows) except -1 + cdef int _reset_bind_vars(self, uint64_t array_offset, + uint32_t num_rows) except -1 cdef int _verify_var(self, object var) except -1 - cdef object bind_arrow_arrays(self, object cursor, list arrays) - cdef int bind_many(self, object cursor, list parameters) except -1 cdef int bind_one(self, object cursor, object parameters) except -1 cdef object _finish_building_arrow_arrays(self) cdef int _create_arrow_arrays(self) except -1 @@ -749,7 +772,8 @@ cdef class BaseVarImpl: cdef DbType _get_adjusted_type(self, uint8_t ora_type_num) cdef list _get_array_value(self) cdef object _get_scalar_value(self, uint32_t pos) - cdef int _on_reset_bind(self, uint32_t num_rows) except -1 + cdef int _on_reset_bind(self, uint64_t array_offset, + uint32_t num_rows) except -1 cdef int _resize(self, uint32_t new_size) except -1 cdef int _set_metadata_from_type(self, object typ) except -1 cdef int _set_metadata_from_value(self, object value, @@ -857,9 +881,6 @@ cdef class BindVar: ssize_t pos bint has_value - cdef int _create_var_from_arrow_array(self, object conn, - BaseCursorImpl cursor_impl, - ArrowArrayImpl array) except -1 cdef int _create_var_from_type(self, object conn, BaseCursorImpl cursor_impl, object value) except -1 @@ -907,6 +928,7 @@ cdef class PipelineOpImpl: readonly uint8_t op_type readonly bint fetch_lobs readonly bint fetch_decimals + BatchLoadManager batch_load_manager uint32_t num_execs diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index bcf9e9dd..0efec02e 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -82,6 +82,8 @@ from .arrow_impl cimport ( import array import base64 +import collections +import copy import copy import datetime import decimal @@ -169,6 +171,7 @@ include "impl/base/pool.pyx" include "impl/base/cursor.pyx" include "impl/base/var.pyx" include "impl/base/bind_var.pyx" +include "impl/base/batch_load_manager.pyx" include "impl/base/dbobject.pyx" include "impl/base/lob.pyx" include "impl/base/soda.pyx" diff --git a/src/oracledb/cursor.py b/src/oracledb/cursor.py index 0cb53479..2172e9a7 100644 --- a/src/oracledb/cursor.py +++ b/src/oracledb/cursor.py @@ -851,9 +851,11 @@ def executemany( self, statement: Optional[str], parameters: Any, + *, batcherrors: bool = False, arraydmlrowcounts: bool = False, suspend_on_success: bool = False, + batch_size: int = 2**32 - 1, ) -> None: """ Executes a SQL statement once using all bind value mappings or @@ -900,6 +902,12 @@ def executemany( sessionless transaction will be suspended when ``executemany()`` completes successfully. See :ref:`suspendtxns`. + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. When + ``Connection.autocommit`` is *True*, a commit will take place for each + batch. + For maximum efficiency, it is best to use the :meth:`setinputsizes()` method to specify the bind value types and sizes. In particular, if the type is not explicitly specified, the value *None* is assumed to be a @@ -907,14 +915,22 @@ def executemany( dates will raise a TypeError exception. """ self._verify_open() - num_execs = self._impl._prepare_for_executemany( - self, self._normalize_statement(statement), parameters + manager = self._impl._prepare_for_executemany( + self, + self._normalize_statement(statement), + parameters, + batch_size, ) self._impl.suspend_on_success = suspend_on_success - if num_execs > 0: + while manager.num_rows > 0: self._impl.executemany( - self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) + self, + manager.num_rows, + batcherrors, + arraydmlrowcounts, + manager.message_offset, ) + manager.next_batch() def fetchall(self) -> list: """ @@ -1188,9 +1204,11 @@ async def executemany( self, statement: Optional[str], parameters: Any, + *, batcherrors: bool = False, arraydmlrowcounts: bool = False, suspend_on_success: bool = False, + batch_size: int = 2**32 - 1, ) -> None: """ Executes a SQL statement once using all bind value mappings or @@ -1236,6 +1254,12 @@ async def executemany( sessionless transaction will be suspended when ``executemany()`` completes successfully. See :ref:`suspendtxns`. + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. When + ``Connection.autocommit`` is *True*, a commit will take place for each + batch. Do not set ``batch_size`` when ``suspend_on_success`` is *True*. + For maximum efficiency, it is best to use the :meth:`setinputsizes()` method to specify the parameter types and sizes ahead of time. In particular, the value *None* is assumed to be a string of length 1 so @@ -1243,14 +1267,19 @@ async def executemany( TypeError exception. """ self._verify_open() - num_execs = self._impl._prepare_for_executemany( - self, self._normalize_statement(statement), parameters + manager = self._impl._prepare_for_executemany( + self, self._normalize_statement(statement), parameters, batch_size ) self._impl.suspend_on_success = suspend_on_success - if num_execs > 0: + while manager.num_rows > 0: await self._impl.executemany( - self, num_execs, bool(batcherrors), bool(arraydmlrowcounts) + self, + manager.num_rows, + batcherrors, + arraydmlrowcounts, + manager.message_offset, ) + manager.next_batch() async def fetchall(self) -> list: """ diff --git a/src/oracledb/impl/arrow/dataframe.pyx b/src/oracledb/impl/arrow/dataframe.pyx index ebf853fc..837b621d 100644 --- a/src/oracledb/impl/arrow/dataframe.pyx +++ b/src/oracledb/impl/arrow/dataframe.pyx @@ -62,15 +62,16 @@ cdef class DataFrameImpl: df_impl.schema_impls.append(schema_impl) # populate list of arrays - _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) - for i in range(arrow_schema.n_children): - array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) - array_impl.populate_from_array(df_impl.schema_impls[i], - arrow_array.children[i]) - df_impl.arrays.append(array_impl) - _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) - if arrow_array.release != NULL: - raise NotImplementedError("multiple chunks not supported") + while True: + _check_nanoarrow(arrow_stream.get_next(arrow_stream, &arrow_array)) + if arrow_array.release == NULL: + break + for i in range(arrow_schema.n_children): + array_impl = ArrowArrayImpl.__new__(ArrowArrayImpl) + array_impl.populate_from_array(df_impl.schema_impls[i], + arrow_array.children[i]) + df_impl.arrays.append(array_impl) + ArrowArrayStreamRelease(arrow_stream) return df_impl diff --git a/src/oracledb/impl/base/batch_load_manager.pyx b/src/oracledb/impl/base/batch_load_manager.pyx new file mode 100644 index 00000000..3ed57ebb --- /dev/null +++ b/src/oracledb/impl/base/batch_load_manager.pyx @@ -0,0 +1,284 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# batch_load_manager.pyx +# +# Cython file defining the BatchLoadManager implementation class (embedded in +# base_impl.pyx). +#------------------------------------------------------------------------------ + +cdef class BatchLoadManager: + + cdef int _calculate_num_rows_in_batch(self, uint64_t total_rows) except -1: + """ + Calculates the total number of rows to put in the next batch. + """ + cdef uint64_t rows_remaining = total_rows - self.offset + self.num_rows = min(rows_remaining, self.batch_size) + + cdef int _next_batch(self) except -1: + """ + Goes to the next batch in the set of data, if applicable. + """ + raise NotImplementedError() + + cdef int _setup_cursor(self) except -1: + """ + Called after the manager has been populated and helps set up the cursor + if one is being used. + """ + pass + + @staticmethod + cdef BatchLoadManager create_for_executemany( + object cursor, + BaseCursorImpl cursor_impl, + object parameters, + uint32_t batch_size, + ): + """ + Creates a batch load manager object for calling cursor.executemany() + with the given parameters. This allows splitting large source arrays + into multiple chunks and also supports data frames with multiple + chunks. + """ + cdef: + BatchLoadManager manager + DataFrameImpl df_impl + + # batch size must be a positive integer + if batch_size == 0: + raise TypeError("batch_size must be a positive integer") + + # create and populate manager object + # if parameters are an instance, the value refers to the number of + # times to execute the statement + if isinstance(parameters, int): + manager = PrePopulatedBatchLoadManager.create(parameters) + + # if parameters are a list, the value refers to the actual data that is + # to be loaded + elif isinstance(parameters, list): + manager = FullDataBatchLoadManager.create(parameters) + + # if parameters are an Oracle dataframe we can use it directly + elif isinstance(parameters, PY_TYPE_DATAFRAME): + manager = DataFrameBatchLoadManager.create(parameters._impl) + + # if parameters implement the Arrow PyCapsule stream interface, convert + # it to an Oracle dataframe for further processing + elif hasattr(parameters, "__arrow_c_stream__"): + df_impl = DataFrameImpl.from_arrow_stream(parameters) + manager = DataFrameBatchLoadManager.create(df_impl) + + # the parameters are of an unknown type + else: + errors._raise_err(errors.ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE) + + # setup cursor + manager.cursor_impl = cursor_impl + manager.cursor = cursor + manager.conn = cursor.connection + manager.batch_size = batch_size + manager._setup_cursor() + + # setup for first batch + manager._next_batch() + + return manager + + def next_batch(self): + """ + Goes to the next batch in the set of data, if applicable. + """ + self.offset += self.num_rows + self._next_batch() + + +@cython.final +cdef class DataFrameBatchLoadManager(BatchLoadManager): + cdef: + DataFrameImpl df_impl + ssize_t num_chunks + ssize_t chunk_index + ssize_t chunk_num + ssize_t num_cols + int64_t chunk_length + uint64_t num_rows_in_chunk + + cdef int _calculate_num_rows_in_chunk(self) except -1: + """ + Calculates the number of rows in the chunk and stores it for future + use. + """ + cdef: + ArrowArrayImpl array_impl + int64_t num_rows + array_impl = self.df_impl.arrays[self.chunk_index] + array_impl.get_length(&num_rows) + self.num_rows_in_chunk = num_rows + + cdef int _next_batch(self) except -1: + """ + Goes to the next batch of data. + """ + cdef: + ssize_t array_index, num_cols + ArrowArrayImpl array_impl + BindVar bind_var + ssize_t i + self.message_offset = self.offset + self._calculate_num_rows_in_batch(self.num_rows_in_chunk) + if self.num_rows == 0: + self._next_chunk() + if self.num_rows > 0: + for i, bind_var in enumerate(self.cursor_impl.bind_vars): + array_impl = self.df_impl.arrays[self.chunk_index + i] + bind_var.var_impl._arrow_array = array_impl + bind_var.var_impl._on_reset_bind(self.offset, self.num_rows) + + cdef int _next_chunk(self) except -1: + """ + Goes to the next chunk in the list of chunks for the dataframe. + """ + while self.chunk_num + 1 < self.num_chunks: + self.offset = 0 + self.message_offset = 0 + self.chunk_num += 1 + self.chunk_index += self.num_cols + self._calculate_num_rows_in_chunk() + self._calculate_num_rows_in_batch(self.num_rows_in_chunk) + if self.num_rows > 0: + break + + cdef int _setup_cursor(self) except -1: + """ + Called after the manager has been populated and helps set up the cursor + if one is being used. + """ + cdef: + ArrowSchemaImpl schema_impl + BaseVarImpl var_impl + BindVar bind_var + ssize_t i + self.cursor_impl.bind_vars = [] + for i, schema_impl in enumerate(self.df_impl.schema_impls): + bind_var = BindVar.__new__(BindVar) + bind_var.pos = i + 1 + var_impl = self.cursor_impl._create_var_impl(self.conn) + var_impl.metadata = OracleMetadata.from_arrow_schema(schema_impl) + bind_var.var_impl = var_impl + self.cursor_impl.bind_vars.append(bind_var) + + @staticmethod + cdef BatchLoadManager create(DataFrameImpl df_impl): + """ + Creates a batch load manager given a dataframe. + """ + cdef DataFrameBatchLoadManager m + m = DataFrameBatchLoadManager.__new__(DataFrameBatchLoadManager) + m.df_impl = df_impl + m.num_cols = len(df_impl.schema_impls) + m.num_chunks = len(df_impl.arrays) // m.num_cols + m._calculate_num_rows_in_chunk() + return m + + +@cython.final +cdef class FullDataBatchLoadManager(BatchLoadManager): + cdef: + list all_rows + uint64_t total_num_rows + object type_handler + + cdef int _next_batch(self) except -1: + """ + Goes to the next batch of data. + """ + cdef: + bint defer_type_assignment = (self.offset == 0) + object row + ssize_t i + self._calculate_num_rows_in_batch(self.total_num_rows) + self.cursor_impl._reset_bind_vars(self.offset, self.num_rows) + for i in range(self.num_rows): + if i == self.num_rows - 1: + defer_type_assignment = False + row = self.all_rows[self.offset + i] + self.cursor_impl._bind_values(self.cursor, self.type_handler, + row, self.num_rows, i, + defer_type_assignment) + + cdef int _setup_cursor(self) except -1: + """ + Called after the manager has been populated and helps set up the cursor + if one is being used. + """ + self.type_handler = self.cursor_impl._get_input_type_handler() + + @staticmethod + cdef BatchLoadManager create(list all_rows): + """ + Creates a batch load manager given a list of rows. + """ + cdef FullDataBatchLoadManager m + m = FullDataBatchLoadManager.__new__(FullDataBatchLoadManager) + m.all_rows = all_rows + m.total_num_rows = len(all_rows) + return m + + +@cython.final +cdef class PrePopulatedBatchLoadManager(BatchLoadManager): + cdef: + uint64_t total_num_rows + + cdef int _next_batch(self) except -1: + """ + Goes to the next batch in the set of data, if applicable. + """ + cdef: + BaseVarImpl var_impl + BindVar bind_var + self._calculate_num_rows_in_batch(self.total_num_rows) + if self.cursor_impl.bind_vars is not None: + for bind_var in self.cursor_impl.bind_vars: + if bind_var is None or bind_var.var_impl is None: + continue + var_impl = bind_var.var_impl + if var_impl.num_elements < self.num_rows: + errors._raise_err(errors.ERR_INCORRECT_VAR_ARRAYSIZE, + var_arraysize=var_impl.num_elements, + required_arraysize=self.num_rows) + + @staticmethod + cdef BatchLoadManager create(uint64_t total_num_rows): + """ + Creates a batch load manager given the number of pre-populated rows. + """ + cdef PrePopulatedBatchLoadManager m + m = PrePopulatedBatchLoadManager.__new__(PrePopulatedBatchLoadManager) + m.total_num_rows = total_num_rows + return m diff --git a/src/oracledb/impl/base/bind_var.pyx b/src/oracledb/impl/base/bind_var.pyx index 4bd64e1f..a3896255 100644 --- a/src/oracledb/impl/base/bind_var.pyx +++ b/src/oracledb/impl/base/bind_var.pyx @@ -32,24 +32,6 @@ @cython.freelist(20) cdef class BindVar: - cdef int _create_var_from_arrow_array(self, object conn, - BaseCursorImpl cursor_impl, - ArrowArrayImpl array_impl) except -1: - """ - Creates a variable given an Arrow array. - """ - cdef: - BaseVarImpl var_impl - int64_t length - var_impl = cursor_impl._create_var_impl(conn) - array_impl.get_length(&length) - var_impl.num_elements = length - var_impl.metadata = \ - OracleMetadata.from_arrow_schema(array_impl.schema_impl) - var_impl._arrow_array = array_impl - var_impl._finalize_init() - self.var_impl = var_impl - cdef int _create_var_from_type(self, object conn, BaseCursorImpl cursor_impl, object value) except -1: diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index 22a35f09..e4ea5110 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -135,20 +135,6 @@ cdef class BaseCursorImpl: return json.loads(value) return converter - cdef int _check_binds(self, uint32_t num_execs) except -1: - """ - Checks that all binds are capable of handling the number of executions - provided. - """ - cdef BindVar bind_var - for bind_var in self.bind_vars: - if bind_var is None or bind_var.var_impl is None: - continue - if bind_var.var_impl.num_elements < num_execs: - errors._raise_err(errors.ERR_INCORRECT_VAR_ARRAYSIZE, - var_arraysize=bind_var.var_impl.num_elements, - required_arraysize=num_execs) - cdef int _close(self, bint in_del) except -1: """ Internal method for closing the cursor. @@ -421,12 +407,16 @@ cdef class BaseCursorImpl: self.warning = None self.rowcount = 0 - def _prepare_for_executemany(self, object cursor, str statement, - object parameters): + def _prepare_for_executemany( + self, + object cursor, + str statement, + object parameters, + uint32_t batch_size = 2 ** 32 - 1, + ): """ Internal method for preparing a statement for execution multiple times. """ - cdef DataFrameImpl df_impl # prepare statement, if necessary if statement is None and self.statement is None: @@ -437,31 +427,20 @@ cdef class BaseCursorImpl: finally: self.set_input_sizes = False - # perform bind, if applicable - if isinstance(parameters, int): - num_execs = parameters - if self.bind_vars is not None: - self._check_binds(num_execs) - elif isinstance(parameters, list): - num_execs = len(parameters) - if parameters: - self.bind_many(cursor, parameters) - elif isinstance(parameters, PY_TYPE_DATAFRAME): - df_impl = parameters._impl - num_execs = self.bind_arrow_arrays(cursor, df_impl.arrays) - elif hasattr(parameters, "__arrow_c_stream__"): - df_impl = DataFrameImpl.from_arrow_stream(parameters) - num_execs = self.bind_arrow_arrays(cursor, df_impl.arrays) - else: - errors._raise_err(errors.ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE) - # clear any warning and reset rowcount self.warning = None self.rowcount = 0 - return num_execs + # return a batch load manager + return BatchLoadManager.create_for_executemany( + cursor, + self, + parameters, + batch_size, + ) - cdef int _reset_bind_vars(self, uint32_t num_rows) except -1: + cdef int _reset_bind_vars(self, uint64_t array_offset, + uint32_t num_rows) except -1: """ Reset all of the existing bind variables. If any bind variables don't have enough space to store the number of rows specified, expand and @@ -474,7 +453,7 @@ cdef class BaseCursorImpl: for i in range(len(self.bind_vars)): bind_var = self.bind_vars[i] if bind_var.var_impl is not None: - bind_var.var_impl._on_reset_bind(num_rows) + bind_var.var_impl._on_reset_bind(array_offset, num_rows) bind_var.has_value = False def _set_oci_attr(self, uint32_t attr_num, uint32_t attr_type, @@ -493,44 +472,6 @@ cdef class BaseCursorImpl: var_arraysize=var.num_elements, required_arraysize=self.arraysize) - cdef object bind_arrow_arrays(self, object cursor, list arrays): - """ - Internal method for binding Arrow arrays. The number of elements in the - array is returned for use by the caller. - """ - cdef: - ArrowArrayImpl array - int64_t num_rows - BindVar bind_var - ssize_t i - conn = cursor.connection - array = arrays[0] - array.get_length(&num_rows) - self._reset_bind_vars(num_rows) - self.bind_vars = [] - for i, array in enumerate(arrays): - bind_var = BindVar.__new__(BindVar) - bind_var.pos = i + 1 - bind_var._create_var_from_arrow_array(conn, self, array) - self.bind_vars.append(bind_var) - return num_rows - - cdef int bind_many(self, object cursor, list parameters) except -1: - """ - Internal method used for binding multiple rows of data. - """ - cdef: - bint defer_type_assignment - ssize_t i, num_rows - object params_row - type_handler = self._get_input_type_handler() - num_rows = len(parameters) - self._reset_bind_vars(num_rows) - for i, params_row in enumerate(parameters): - defer_type_assignment = (i < num_rows - 1) - self._bind_values(cursor, type_handler, params_row, num_rows, i, - defer_type_assignment) - cdef int bind_one(self, object cursor, object parameters) except -1: """ Internal method used for binding a single row of data. @@ -543,7 +484,7 @@ cdef class BaseCursorImpl: BindVar bind_var dict dict_params type_handler = self._get_input_type_handler() - self._reset_bind_vars(num_rows) + self._reset_bind_vars(0, num_rows) self._bind_values(cursor, type_handler, parameters, num_rows, row_num, defer_type_assignment) @@ -601,7 +542,8 @@ cdef class BaseCursorImpl: def execute(self, cursor): errors._raise_not_supported("executing a statement") - def executemany(self, cursor, num_execs, batcherrors, arraydmlrowcounts): + def executemany(self, object cursor, uint32_t num_execs, bint batcherrors, + bint arraydmlrowcounts, uint32_t offset=0): errors._raise_not_supported("executing a statement in batch") def fetch_next_row(self, cursor): diff --git a/src/oracledb/impl/base/var.pyx b/src/oracledb/impl/base/var.pyx index d06e6153..ec2d704b 100644 --- a/src/oracledb/impl/base/var.pyx +++ b/src/oracledb/impl/base/var.pyx @@ -309,7 +309,8 @@ cdef class BaseVarImpl: """ raise NotImplementedError() - cdef int _on_reset_bind(self, uint32_t num_rows) except -1: + cdef int _on_reset_bind(self, uint64_t array_offset, + uint32_t num_rows) except -1: """ Called when the bind variable is being reset, just prior to performing a bind operation. diff --git a/src/oracledb/impl/thick/cursor.pyx b/src/oracledb/impl/thick/cursor.pyx index b08c1b90..7a049ceb 100644 --- a/src/oracledb/impl/thick/cursor.pyx +++ b/src/oracledb/impl/thick/cursor.pyx @@ -326,7 +326,8 @@ cdef class ThickCursorImpl(BaseCursorImpl): elif self._stmt_info.isReturning or self._stmt_info.isPLSQL: self._transform_binds() - def executemany(self, cursor, num_execs, batcherrors, arraydmlrowcounts): + def executemany(self, object cursor, uint32_t num_execs, bint batcherrors, + bint arraydmlrowcounts, uint32_t offset=0): """ Internal method for executing a statement multiple times. """ diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 9b9d79ae..8bf6f84b 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -103,9 +103,6 @@ cdef class ThickVarImpl(BaseVarImpl): ): self._values = [None] * self.num_elements self._create_handle() - if self._arrow_array is not None: - for i in range(self.num_elements): - self._transform_element_from_arrow(i) cdef list _get_array_value(self): """ @@ -203,7 +200,8 @@ cdef class ThickVarImpl(BaseVarImpl): self._values[pos] = value return value - cdef int _on_reset_bind(self, uint32_t num_rows) except -1: + cdef int _on_reset_bind(self, uint64_t array_offset, + uint32_t num_rows) except -1: """ Called when the bind variable is being reset, just prior to performing a bind operation. @@ -211,7 +209,10 @@ cdef class ThickVarImpl(BaseVarImpl): cdef: dpiStmtInfo stmt_info uint32_t i - BaseVarImpl._on_reset_bind(self, num_rows) + BaseVarImpl._on_reset_bind(self, array_offset, num_rows) + if self._arrow_array is not None: + for i in range(num_rows): + self._transform_element_from_arrow(array_offset, i) if self.metadata.dbtype.num == DB_TYPE_NUM_CURSOR: for i in range(self.num_elements): if self._data[i].isNull: @@ -356,7 +357,8 @@ cdef class ThickVarImpl(BaseVarImpl): cpython.PyList_SET_ITEM(return_value, i, element_value) return return_value - cdef int _transform_element_from_arrow(self, uint32_t pos): + cdef int _transform_element_from_arrow(self, uint64_t offset, + uint32_t pos): """ Transforms a single element from an Arrow array to the value required by ODPI-C. @@ -368,7 +370,8 @@ cdef class ThickVarImpl(BaseVarImpl): OracleData ora_data object value value = convert_arrow_to_oracle_data(self.metadata, &ora_data, - self._arrow_array, pos) + self._arrow_array, + (offset + pos)) data.isNull = ora_data.is_null if not ora_data.is_null: ora_type_num = self.metadata.dbtype.num diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index b7b3975f..c4c42fc1 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -871,12 +871,15 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): elif op_impl.op_type == PIPELINE_OP_TYPE_EXECUTE: cursor._prepare_for_execute(op_impl.statement, op_impl.parameters) elif op_impl.op_type == PIPELINE_OP_TYPE_EXECUTE_MANY: - num_execs = cursor_impl._prepare_for_executemany( - cursor, op_impl.statement, op_impl.parameters + op_impl.batch_load_manager = cursor_impl._prepare_for_executemany( + cursor, + op_impl.statement, + op_impl.parameters, + 2 ** 32 - 1 ) - op_impl.num_execs = num_execs - if cursor_impl._statement.requires_single_execute(): - num_execs = 1 + op_impl.num_execs = op_impl.batch_load_manager.num_rows + if not cursor_impl._statement.requires_single_execute(): + num_execs = op_impl.num_execs elif op_impl.op_type == PIPELINE_OP_TYPE_FETCH_ONE: cursor._prepare_for_execute(op_impl.statement, op_impl.parameters) cursor_impl.prefetchrows = 1 diff --git a/src/oracledb/impl/thin/cursor.pyx b/src/oracledb/impl/thin/cursor.pyx index 73697c9c..3310a514 100644 --- a/src/oracledb/impl/thin/cursor.pyx +++ b/src/oracledb/impl/thin/cursor.pyx @@ -280,7 +280,8 @@ cdef class ThinCursorImpl(BaseThinCursorImpl): if message.type_cache is not None: message.type_cache.populate_partial_types(conn) - def executemany(self, cursor, num_execs, batcherrors, arraydmlrowcounts): + def executemany(self, object cursor, uint32_t num_execs, bint batcherrors, + bint arraydmlrowcounts, uint32_t offset=0): cdef: Protocol protocol = self._conn_impl._protocol MessageWithData messsage @@ -293,6 +294,7 @@ cdef class ThinCursorImpl(BaseThinCursorImpl): message.num_execs = num_execs message.batcherrors = batcherrors message.arraydmlrowcounts = arraydmlrowcounts + message.offset = offset stmt = self._statement # only DML statements may use the batch errors or array DML row counts @@ -393,8 +395,9 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): if message.type_cache is not None: await message.type_cache.populate_partial_types(conn) - async def executemany(self, cursor, num_execs, batcherrors, - arraydmlrowcounts): + async def executemany(self, object cursor, uint32_t num_execs, + bint batcherrors, bint arraydmlrowcounts, + uint32_t offset): cdef: BaseAsyncProtocol protocol MessageWithData messsage @@ -408,6 +411,7 @@ cdef class AsyncThinCursorImpl(BaseThinCursorImpl): message.num_execs = num_execs message.batcherrors = batcherrors message.arraydmlrowcounts = arraydmlrowcounts + message.offset = offset stmt = self._statement # only DML statements may use the batch errors or array DML row counts diff --git a/tests/test_4000_cursor_executemany.py b/tests/test_4000_cursor_executemany.py index 2db8c8a3..19e6ef4c 100644 --- a/tests/test_4000_cursor_executemany.py +++ b/tests/test_4000_cursor_executemany.py @@ -460,3 +460,34 @@ def test_4029(cursor, test_env): cursor.executemany("", 5) with test_env.assert_raises_full_code("DPY-2066"): cursor.executemany(" ", 5) + + +def test_4030(cursor): + "4030 - test executemany with batch size 0" + rows = [[1], [2]] + with pytest.raises(TypeError): + cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + rows, + batch_size=0, + ) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +def test_4031(batch_size, conn, cursor, empty_tab, round_trip_checker): + "4030 - test executemany with various batch sizes" + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + batch_size=batch_size, + ) + expected_round_trips = len(rows) // batch_size + if len(rows) % batch_size: + expected_round_trips += 1 + assert round_trip_checker.get_value() == expected_round_trips + conn.commit() + cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert cursor.fetchall() == rows diff --git a/tests/test_6100_cursor_executemany_async.py b/tests/test_6100_cursor_executemany_async.py index 4d86160e..5fe3983f 100644 --- a/tests/test_6100_cursor_executemany_async.py +++ b/tests/test_6100_cursor_executemany_async.py @@ -403,3 +403,36 @@ async def test_6125(async_cursor, test_env): await async_cursor.executemany("", 5) with test_env.assert_raises_full_code("DPY-2066"): await async_cursor.executemany(" ", 5) + + +async def test_6126(cursor): + "6126 - test executemany with batch size 0" + rows = [[1], [2]] + with pytest.raises(TypeError): + await cursor.executemany( + "insert into TestTempTable (IntCol) values (:1)", + rows, + batch_size=0, + ) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +async def test_6127( + batch_size, async_conn, async_cursor, empty_tab, round_trip_checker_async +): + "6127 - test executemany with various batch sizes" + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + await async_cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + batch_size=batch_size, + ) + num_round_trips = len(rows) // batch_size + if len(rows) % batch_size: + num_round_trips += 1 + assert await round_trip_checker_async.get_value_async() == num_round_trips + await async_conn.commit() + await async_cursor.execute( + "select IntCol, StringCol1 from TestTempTable order by IntCol" + ) + assert await async_cursor.fetchall() == rows diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py index a0bd3b94..8a7ab2ff 100644 --- a/tests/test_8700_sessionless_transaction.py +++ b/tests/test_8700_sessionless_transaction.py @@ -671,3 +671,21 @@ def test_8715(conn, cursor, test_env): # drop temp table cursor.execute(f"drop table {temp_table_name} purge") + + +def test_8716(conn, cursor, test_env): + "8716 - test suspend_on_success with batch_size < total rows inserted" + cursor.execute("truncate table TestTempTable") + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=15, + defer_round_trip=True, + ) + with test_env.assert_raises_full_code("DPY-3036"): + cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + batch_size=75, + suspend_on_success=True, + ) diff --git a/tests/test_8800_sessionless_transaction_async.py b/tests/test_8800_sessionless_transaction_async.py index 7986fbd5..03b0b2f5 100644 --- a/tests/test_8800_sessionless_transaction_async.py +++ b/tests/test_8800_sessionless_transaction_async.py @@ -679,3 +679,21 @@ async def test_8815(async_conn, async_cursor, test_env): # drop temp table await async_cursor.execute(f"drop table {temp_table_name} purge") + + +async def test_8816(async_conn, async_cursor, test_env): + "8816 - test suspend_on_success with batch_size < total rows inserted" + await async_cursor.execute("truncate table TestTempTable") + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + await async_conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=15, + defer_round_trip=True, + ) + with test_env.assert_raises_full_code("DPY-3036"): + await async_cursor.executemany( + "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", + rows, + batch_size=75, + suspend_on_success=True, + ) diff --git a/tests/test_8900_dataframe_ingestion.py b/tests/test_8900_dataframe_ingestion.py index a838f658..31f31eca 100644 --- a/tests/test_8900_dataframe_ingestion.py +++ b/tests/test_8900_dataframe_ingestion.py @@ -937,3 +937,67 @@ def test_8920(values, dtype, conn, cursor, empty_tab): ) fetched_values = [d for d, in cursor] assert fetched_values == values + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +def test_8921(batch_size, conn, cursor, empty_tab, round_trip_checker): + "8921 - test ingestion with various batch sizes" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + arrays = [ + pyarrow.array([i for i, _ in rows], pyarrow.int16()), + pyarrow.array([s for _, s in rows], pyarrow.string()), + ] + df = pyarrow.table(arrays, names) + cursor.executemany( + "insert into TestDataFrame (Id, FirstName) values (:1, :2)", + df, + batch_size=batch_size, + ) + expected_round_trips = len(rows) // batch_size + if len(rows) % batch_size: + expected_round_trips += 1 + assert round_trip_checker.get_value() == expected_round_trips + conn.commit() + cursor.execute("select Id, FirstName from TestDataFrame order by Id") + assert cursor.fetchall() == rows + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +def test_8922(batch_size, conn, cursor, empty_tab, round_trip_checker): + "8922 - test ingestion of multi chunk data frames with various batch sizes" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + int_arrays = [ + pyarrow.array([i for i, _ in rows[:25]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[25:59]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[59:75]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[75:190]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[190:]], pyarrow.int16()), + ] + str_arrays = [ + pyarrow.array([s for _, s in rows[:25]], pyarrow.string()), + pyarrow.array([s for _, s in rows[25:59]], pyarrow.string()), + pyarrow.array([s for _, s in rows[59:75]], pyarrow.string()), + pyarrow.array([s for _, s in rows[75:190]], pyarrow.string()), + pyarrow.array([s for _, s in rows[190:]], pyarrow.string()), + ] + chunked_arrays = [ + pyarrow.chunked_array(int_arrays), + pyarrow.chunked_array(str_arrays), + ] + df = pyarrow.table(chunked_arrays, names) + cursor.executemany( + "insert into TestDataFrame (Id, FirstName) values (:1, :2)", + df, + batch_size=batch_size, + ) + num_round_trips = 0 + for array in int_arrays: + num_round_trips += len(array) // batch_size + if len(array) % batch_size: + num_round_trips += 1 + assert round_trip_checker.get_value() == num_round_trips + conn.commit() + cursor.execute("select Id, FirstName from TestDataFrame order by Id") + assert cursor.fetchall() == rows diff --git a/tests/test_9000_dataframe_ingestion_async.py b/tests/test_9000_dataframe_ingestion_async.py index 66c6bdf4..b4326cb4 100644 --- a/tests/test_9000_dataframe_ingestion_async.py +++ b/tests/test_9000_dataframe_ingestion_async.py @@ -947,3 +947,75 @@ async def test_9020(values, dtype, async_conn, async_cursor, empty_tab): ) fetched_values = [d async for d, in async_cursor] assert fetched_values == values + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +async def test_9021( + batch_size, async_conn, async_cursor, empty_tab, round_trip_checker_async +): + "8921 - test ingestion with various batch sizes" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + arrays = [ + pyarrow.array([i for i, _ in rows], pyarrow.int16()), + pyarrow.array([s for _, s in rows], pyarrow.string()), + ] + df = pyarrow.table(arrays, names) + await async_cursor.executemany( + "insert into TestDataFrame (Id, FirstName) values (:1, :2)", + df, + batch_size=batch_size, + ) + num_round_trips = len(rows) // batch_size + if len(rows) % batch_size: + num_round_trips += 1 + assert await round_trip_checker_async.get_value_async() == num_round_trips + await async_conn.commit() + await async_cursor.execute( + "select Id, FirstName from TestDataFrame order by Id" + ) + assert await async_cursor.fetchall() == rows + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +async def test_9022( + batch_size, async_conn, async_cursor, empty_tab, round_trip_checker_async +): + "9022 - test ingestion of multi chunk data frames with various batch sizes" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + int_arrays = [ + pyarrow.array([i for i, _ in rows[:25]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[25:59]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[59:75]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[75:190]], pyarrow.int16()), + pyarrow.array([i for i, _ in rows[190:]], pyarrow.int16()), + ] + str_arrays = [ + pyarrow.array([s for _, s in rows[:25]], pyarrow.string()), + pyarrow.array([s for _, s in rows[25:59]], pyarrow.string()), + pyarrow.array([s for _, s in rows[59:75]], pyarrow.string()), + pyarrow.array([s for _, s in rows[75:190]], pyarrow.string()), + pyarrow.array([s for _, s in rows[190:]], pyarrow.string()), + ] + chunked_arrays = [ + pyarrow.chunked_array(int_arrays), + pyarrow.chunked_array(str_arrays), + ] + df = pyarrow.table(chunked_arrays, names) + await async_cursor.executemany( + "insert into TestDataFrame (Id, FirstName) values (:1, :2)", + df, + batch_size=batch_size, + ) + num_round_trips = 0 + for array in int_arrays: + num_round_trips += len(array) // batch_size + if len(array) % batch_size: + num_round_trips += 1 + assert await round_trip_checker_async.get_value_async() == num_round_trips + await async_conn.commit() + await async_cursor.execute( + "select Id, FirstName from TestDataFrame order by Id" + ) + assert await async_cursor.fetchall() == rows From 59b8b49214b4aa72853ef54e9e1a85d8005e3430 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:46:20 -0600 Subject: [PATCH 208/239] Tweak tests. --- tests/test_8700_sessionless_transaction.py | 30 ++++++++++--------- ...test_8800_sessionless_transaction_async.py | 30 ++++++++++--------- 2 files changed, 32 insertions(+), 28 deletions(-) diff --git a/tests/test_8700_sessionless_transaction.py b/tests/test_8700_sessionless_transaction.py index 8a7ab2ff..6126d31c 100644 --- a/tests/test_8700_sessionless_transaction.py +++ b/tests/test_8700_sessionless_transaction.py @@ -673,19 +673,21 @@ def test_8715(conn, cursor, test_env): cursor.execute(f"drop table {temp_table_name} purge") -def test_8716(conn, cursor, test_env): +def test_8716(test_env): "8716 - test suspend_on_success with batch_size < total rows inserted" - cursor.execute("truncate table TestTempTable") - rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] - conn.begin_sessionless_transaction( - transaction_id=TRANSACTION_ID_CLIENT, - timeout=15, - defer_round_trip=True, - ) - with test_env.assert_raises_full_code("DPY-3036"): - cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - rows, - batch_size=75, - suspend_on_success=True, + with test_env.get_connection() as conn: + cursor = conn.cursor() + cursor.execute("truncate table TestTempTable") + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=5, + defer_round_trip=True, ) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + with test_env.assert_raises_full_code("DPY-3036"): + cursor.executemany( + sql, rows, batch_size=75, suspend_on_success=True + ) + with test_env.get_connection() as conn: + conn.resume_sessionless_transaction(TRANSACTION_ID_CLIENT) diff --git a/tests/test_8800_sessionless_transaction_async.py b/tests/test_8800_sessionless_transaction_async.py index 03b0b2f5..c9ee15c4 100644 --- a/tests/test_8800_sessionless_transaction_async.py +++ b/tests/test_8800_sessionless_transaction_async.py @@ -681,19 +681,21 @@ async def test_8815(async_conn, async_cursor, test_env): await async_cursor.execute(f"drop table {temp_table_name} purge") -async def test_8816(async_conn, async_cursor, test_env): +async def test_8816(test_env): "8816 - test suspend_on_success with batch_size < total rows inserted" - await async_cursor.execute("truncate table TestTempTable") - rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] - await async_conn.begin_sessionless_transaction( - transaction_id=TRANSACTION_ID_CLIENT, - timeout=15, - defer_round_trip=True, - ) - with test_env.assert_raises_full_code("DPY-3036"): - await async_cursor.executemany( - "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)", - rows, - batch_size=75, - suspend_on_success=True, + async with test_env.get_connection_async() as conn: + cursor = conn.cursor() + await cursor.execute("truncate table TestTempTable") + rows = [(i + 1, f"String for row {i + 1}") for i in range(200)] + await conn.begin_sessionless_transaction( + transaction_id=TRANSACTION_ID_CLIENT, + timeout=5, + defer_round_trip=True, ) + sql = "insert into TestTempTable (IntCol, StringCol1) values (:1, :2)" + with test_env.assert_raises_full_code("DPY-3036"): + await cursor.executemany( + sql, rows, batch_size=75, suspend_on_success=True + ) + async with test_env.get_connection_async() as conn: + await conn.resume_sessionless_transaction(TRANSACTION_ID_CLIENT) From 7b54dbc3342fe6f7ae34b614ec865ca3c2fe20b9 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:46:35 -0600 Subject: [PATCH 209/239] Reorder errors to be consistently in alphabetical order. --- src/oracledb/errors.py | 86 +++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 43 deletions(-) diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 175e0778..be4b71a2 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -548,14 +548,37 @@ def _raise_not_supported(feature: str) -> None: ERR_ACCESS_TOKEN_REQUIRES_TCPS: ( "access_token requires use of the tcps protocol" ), - ERR_ARGS_MUST_BE_LIST_OR_TUPLE: "arguments must be a list or tuple", ERR_ARGS_AND_KEYWORD_ARGS: ( "expecting positional arguments or keyword arguments, not both" ), + ERR_ARGS_MUST_BE_LIST_OR_TUPLE: "arguments must be a list or tuple", ERR_ARRAY_DML_ROW_COUNTS_NOT_ENABLED: ( "array DML row counts mode is not enabled" ), ERR_ARRAYS_OF_ARRAYS: "arrays of arrays are not supported", + ERR_ARROW_C_API_ERROR: ( + "Arrow C Data Interface operation failed with error code {code}" + ), + ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED: ( + "Apache Arrow format does not support sparse vectors with flexible " + "dimensions" + ), + ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT: ( + 'conversion from list with child Arrow format "{schema_format}" to ' + "Oracle Database vector is not supported" + ), + ERR_ARROW_UNSUPPORTED_DATA_FORMAT: ( + 'conversion from Arrow format "{schema_format}" to Oracle Database ' + "is not supported" + ), + ERR_ARROW_UNSUPPORTED_DATA_TYPE: ( + "conversion from Oracle Database type {db_type_name} to Apache " + "Arrow format is not supported" + ), + ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT: ( + "flexible vector formats are not supported. Only fixed 'FLOAT32', " + "'FLOAT64', 'INT8' or 'BINARY' formats are supported" + ), ERR_BUFFER_LENGTH_INSUFFICIENT: ( "internal error: buffer of length {actual_buffer_len} " "insufficient to hold {required_buffer_len} bytes" @@ -572,6 +595,7 @@ def _raise_not_supported(feature: str) -> None: "column truncated to {col_value_len} {unit}. " "Untruncated was {actual_len}" ), + ERR_CONNECTION_CLOSED: "the database or network closed the connection", ERR_CONNECTION_FAILED: ( "cannot connect to database (CONNECTION_ID={connection_id})." ), @@ -579,8 +603,8 @@ def _raise_not_supported(feature: str) -> None: ERR_CURSOR_DIFF_CONNECTION: ( "binding a cursor from a different connection is not supported" ), - ERR_CURSOR_NOT_OPEN: "cursor is not open", ERR_CURSOR_HAS_BEEN_CLOSED: "cursor has been closed by the database", + ERR_CURSOR_NOT_OPEN: "cursor is not open", ERR_DBOBJECT_ATTR_MAX_SIZE_VIOLATED: ( "attribute {attr_name} of type {type_name} exceeds its maximum size " "(actual: {actual_size}, maximum: {max_size})" @@ -666,6 +690,9 @@ def _raise_not_supported(feature: str) -> None: ERR_INVALID_LOB_AMOUNT: "LOB amount must be greater than zero", ERR_INVALID_LOB_OFFSET: "LOB offset must be greater than zero", ERR_INVALID_MAKEDSN_ARG: '"{name}" argument contains invalid values', + ERR_INVALID_NETWORK_NAME: ( + '"{name}" includes characters that are not allowed' + ), ERR_INVALID_NUMBER: "invalid number", ERR_INVALID_OBJECT_TYPE_NAME: 'invalid object type name: "{name}"', ERR_INVALID_OCI_ATTR_TYPE: "invalid OCI attribute type {attr_type}", @@ -731,9 +758,9 @@ def _raise_not_supported(feature: str) -> None: ERR_MISSING_CONNECT_DESCRIPTOR: ( '"connect_descriptor" key missing from configuration' ), - ERR_MISSING_FILE: "file '{file_name}' is missing or unreadable", ERR_MISSING_ENDING_DOUBLE_QUOTE: 'missing ending quote (")', ERR_MISSING_ENDING_SINGLE_QUOTE: "missing ending quote (')", + ERR_MISSING_FILE: "file '{file_name}' is missing or unreadable", ERR_MISSING_TYPE_NAME_FOR_OBJECT_VAR: ( "no object type specified for object variable" ), @@ -828,11 +855,19 @@ def _raise_not_supported(feature: str) -> None: "scroll operation would go out of the result set" ), ERR_SELF_BIND_NOT_SUPPORTED: "binding to self is not supported", - ERR_CONNECTION_CLOSED: "the database or network closed the connection", ERR_SERVER_VERSION_NOT_SUPPORTED: ( "connections to this database server version are not supported " "by python-oracledb in thin mode" ), + ERR_SESSIONLESS_ALREADY_ACTIVE: ( + "suspend, commit, or rollback the current active sessionless " + "transaction before beginning or resuming another one" + ), + ERR_SESSIONLESS_DIFFERING_METHODS: ( + "suspending or resuming a Sessionless Transaction can be done with " + "DBMS_TRANSACTION or with python-oracledb, but not both" + ), + ERR_SESSIONLESS_INACTIVE: ("no Sessionless Transaction is active"), ERR_TDS_TYPE_NOT_SUPPORTED: "Oracle TDS data type {num} is not supported", ERR_THICK_MODE_ENABLED: ( "python-oracledb thin mode cannot be used because thick mode has " @@ -850,7 +885,6 @@ def _raise_not_supported(feature: str) -> None: ERR_TOO_MANY_BATCH_ERRORS: ( "the number of batch errors from executemany() exceeds 65535" ), - ERR_UNEXPECTED_PIPELINE_FAILURE: "unexpected pipeline failure", ERR_UNEXPECTED_DATA: "unexpected data received: {data}", ERR_UNEXPECTED_END_OF_DATA: ( "unexpected end of data: want {num_bytes_wanted} bytes but " @@ -864,6 +898,7 @@ def _raise_not_supported(feature: str) -> None: "the listener refused the connection but an unexpected error " "format was returned" ), + ERR_UNEXPECTED_PIPELINE_FAILURE: "unexpected pipeline failure", ERR_UNEXPECTED_XML_TYPE: "unexpected XMLType with flag {flag}", ERR_UNKNOWN_SERVER_PIGGYBACK: ( "internal error: unknown server side piggyback opcode {opcode}" @@ -874,12 +909,12 @@ def _raise_not_supported(feature: str) -> None: ERR_UNKNOWN_TRANSACTION_SYNC_VERSION: ( "internal error: unknown transaction sync version {version}" ), - ERR_UNSUPPORTED_PIPELINE_OPERATION: ( - "unsupported pipeline operation type: {op_type}" - ), ERR_UNSUPPORTED_INBAND_NOTIFICATION: ( "unsupported in-band notification with error number {err_num}" ), + ERR_UNSUPPORTED_PIPELINE_OPERATION: ( + "unsupported pipeline operation type: {op_type}" + ), ERR_UNSUPPORTED_PYTHON_TYPE_FOR_DB_TYPE: ( "unsupported Python type {py_type_name} for database type " "{db_type_name}" @@ -920,39 +955,4 @@ def _raise_not_supported(feature: str) -> None: "scroll mode must be relative, absolute, first or last" ), WRN_COMPILATION_ERROR: "creation succeeded with compilation errors", - ERR_INVALID_NETWORK_NAME: ( - '"{name}" includes characters that are not allowed' - ), - ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED: ( - "Apache Arrow format does not support sparse vectors with flexible " - "dimensions" - ), - ERR_ARROW_UNSUPPORTED_CHILD_DATA_FORMAT: ( - 'conversion from list with child Arrow format "{schema_format}" to ' - "Oracle Database vector is not supported" - ), - ERR_ARROW_UNSUPPORTED_DATA_FORMAT: ( - 'conversion from Arrow format "{schema_format}" to Oracle Database ' - "is not supported" - ), - ERR_ARROW_UNSUPPORTED_DATA_TYPE: ( - "conversion from Oracle Database type {db_type_name} to Apache " - "Arrow format is not supported" - ), - ERR_ARROW_C_API_ERROR: ( - "Arrow C Data Interface operation failed with error code {code}" - ), - ERR_ARROW_UNSUPPORTED_VECTOR_FORMAT: ( - "flexible vector formats are not supported. Only fixed 'FLOAT32', " - "'FLOAT64', 'INT8' or 'BINARY' formats are supported" - ), - ERR_SESSIONLESS_DIFFERING_METHODS: ( - "suspending or resuming a Sessionless Transaction can be done with " - "DBMS_TRANSACTION or with python-oracledb, but not both" - ), - ERR_SESSIONLESS_ALREADY_ACTIVE: ( - "suspend, commit, or rollback the current active sessionless " - "transaction before beginning or resuming another one" - ), - ERR_SESSIONLESS_INACTIVE: ("no Sessionless Transaction is active"), } From 7f06f2644742cca982b436d0cd2215f371bcfc4f Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:47:34 -0600 Subject: [PATCH 210/239] Add support for direct path load in thin mode. --- README.md | 20 +- doc/src/api_manual/async_connection.rst | 8 + doc/src/api_manual/connection.rst | 8 + doc/src/release_notes.rst | 3 + doc/src/user_guide/appendix_a.rst | 4 + doc/src/user_guide/batch_statement.rst | 157 +++++ doc/src/user_guide/dataframes.rst | 46 +- doc/src/user_guide/sql_execution.rst | 4 +- doc/src/user_guide/tuning.rst | 8 +- samples/data/load_csv_direct_path.csv | 92 +++ samples/dataframe_insert.py | 51 +- samples/direct_path_load.py | 67 ++ samples/direct_path_load_async.py | 77 +++ samples/load_csv.py | 78 ++- samples/load_csv_direct_path.py | 157 +++++ ...le-Database-The-New-Wave-of-Scripting.html | 176 ++++- samples/tutorial/async_gather.py | 2 +- samples/tutorial/direct_path.py | 76 +++ samples/tutorial/pipelining.py | 2 +- samples/tutorial/soda.py | 2 +- samples/tutorial/solutions/direct_path.py | 74 +++ samples/tutorial/solutions/soda.py | 2 +- src/oracledb/arrow_impl.pxd | 1 + src/oracledb/base_impl.pxd | 20 + src/oracledb/connection.py | 52 ++ src/oracledb/errors.py | 24 + src/oracledb/impl/arrow/schema.pyx | 6 + src/oracledb/impl/arrow/utils.pyx | 1 + src/oracledb/impl/base/batch_load_manager.pyx | 188 ++++-- src/oracledb/impl/base/buffer.pyx | 18 + src/oracledb/impl/base/connection.pyx | 7 + src/oracledb/impl/base/converters.pyx | 29 +- src/oracledb/impl/base/metadata.pyx | 68 +- src/oracledb/impl/base/types.pyx | 50 +- src/oracledb/impl/thin/connection.pyx | 78 +++ src/oracledb/impl/thin/constants.pxi | 58 ++ src/oracledb/impl/thin/messages/base.pyx | 9 +- .../thin/messages/direct_path_load_stream.pyx | 363 ++++++++++ .../impl/thin/messages/direct_path_op.pyx | 71 ++ .../thin/messages/direct_path_prepare.pyx | 153 +++++ src/oracledb/impl/thin/packet.pyx | 6 +- src/oracledb/thin_impl.pyx | 6 +- tests/test_9600_direct_path_load.py | 614 +++++++++++++++++ tests/test_9700_direct_path_load_async.py | 618 ++++++++++++++++++ utils/templates/connection.py | 48 ++ 45 files changed, 3465 insertions(+), 137 deletions(-) create mode 100644 samples/data/load_csv_direct_path.csv create mode 100644 samples/direct_path_load.py create mode 100644 samples/direct_path_load_async.py create mode 100644 samples/load_csv_direct_path.py create mode 100644 samples/tutorial/direct_path.py create mode 100644 samples/tutorial/solutions/direct_path.py create mode 100644 src/oracledb/impl/thin/messages/direct_path_load_stream.pyx create mode 100644 src/oracledb/impl/thin/messages/direct_path_op.pyx create mode 100644 src/oracledb/impl/thin/messages/direct_path_prepare.pyx create mode 100644 tests/test_9600_direct_path_load.py create mode 100644 tests/test_9700_direct_path_load_async.py diff --git a/README.md b/README.md index a80ec708..3d1c9a83 100644 --- a/README.md +++ b/README.md @@ -1,16 +1,16 @@ # python-oracledb -Python-oracledb is an open-source [Python][python] extension module allowing -Python programs to connect to [Oracle Database][oracledb]. The module conforms -to the [Python Database API 2.0 specification][pep249] with a considerable -number of additions and a couple of minor exclusions, see the [feature -list][features]. It is maintained by Oracle. - -Python-oracledb is used for executing SQL and PL/SQL; for calling NoSQL-style -document APIs; for working with data frames; for receiving database +Python-oracledb is the widely used, open-source [Python][python] extension +module allowing Python programs to connect to [Oracle Database][oracledb]. The +module conforms to the [Python Database API 2.0 specification][pep249] with a +considerable number of additions and a couple of minor exclusions, see the +[feature list][features]. It is maintained by Oracle. + +Python-oracledb is used for executing SQL and PL/SQL; for working with data +frames; for calling NoSQL-style document APIs; for receiving database notifications and messages; and for starting and stopping the database. It has -features for high availability and security. It is used by many Python -Frameworks, SQL Generators, ORMs, and libraries. +features for fast data loading, high availability, and security. It is used by +many Python frameworks, SQL generators, ORMs, and libraries. Synchronous and [concurrent][concurrent] coding styles are supported. Database operations can optionally be [pipelined][pipelining]. diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 46c782f9..86c1af16 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -77,6 +77,14 @@ AsyncConnection Methods .. versionadded:: 2.1.0 +.. automethod:: AsyncConnection.direct_path_load + + See :ref:`directpathloads`. + + .. versionadded:: 3.4.0 + + .. dbapimethodextension:: + .. automethod:: AsyncConnection.encode_oson .. versionadded:: 2.1.0 diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index d7d8f472..3cc944e6 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -73,6 +73,14 @@ Connection Methods .. dbapimethodextension:: +.. automethod:: Connection.direct_path_load + + See :ref:`directpathloads`. + + .. versionadded:: 3.4.0 + + .. dbapimethodextension:: + .. automethod:: Connection.encode_oson .. versionadded:: 2.1.0 diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 263f90b0..3ea4dd0d 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -19,6 +19,9 @@ oracledb `3.4.0 ` functionality which is very efficient for loading large + datasets into a database. #) Fixed bug when setting values of type ``datetime.date`` on variables (such as created by :meth:`Cursor.var()` or implicitly by :meth:`Cursor.setinputsizes()`) of types diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index 1bb51c30..f7a7eafd 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -245,6 +245,10 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - No - Yes - Yes + * - Direct Path Loads (see :ref:`directpathloads`) + - Yes + - No + - No * - Oracle Database 23ai JSON-Relational Duality Views (see :ref:`jsondualityviews`) - Yes - Yes diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 82dab353..1359ca7c 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -14,6 +14,11 @@ easily optimize batch insertion, and also allows "noisy" data (values not in a suitable format) to be filtered for review while other, correct, values are inserted. +In addition to Oracle Database "Array DML" batch loading, +:ref:`directpathloads` can be used for very fast loading of large data sets if +certain schema criteria can be met. Another option for frequent, small inserts +is to load data using the Oracle Database :ref:`memoptimized`. + Related topics include :ref:`tuning` and :ref:`dataframeformat`. Batch Statement Execution @@ -618,3 +623,155 @@ B19E-449D-9968-1121AF06D793>`__ between the databases and using INSERT INTO SELECT or CREATE AS SELECT. You can control the data transfer by changing your SELECT statement. + +.. _directpathloads: + +Direct Path Loads +================= + +Direct Path Loads allows data being inserted into Oracle Database to bypass +code layers such as the database buffer cache. Also there are no INSERT +statements used. This can be very efficient for ingestion of huge amounts of +data but, as a consequence of the architecture, there are restrictions on when +Direct Path Loads can be used. For more information see Oracle Database +documentation such as on SQL*Loader `Direct Path Loads +`__ and on the Oracle Call Interface +`Direct Path Load Interface +`__. + +The end-to-end insertion time when using Direct Path Loads for smaller data +sets may not be faster than using :meth:`Cursor.executemany()`, however there +can still be reduced load on the database. + +.. note:: + + Direct Path Loads are only supported in python-oracledb Thin mode. + +Direct Path Loading is performed by the :meth:`Connection.direct_path_load()` +method. For example, if you have the table:: + + create table TestDirectPathLoad ( + id number(9), + name varchar2(20) + ); + +Then you can load data into it using the code: + +.. code-block:: python + + SCHEMA_NAME = "HR" + TABLE_NAME = "TESTDIRECTPATHLOAD" + COLUMN_NAMES = ["ID", "NAME"] + DATA = [ + (1, "A first row"), + (2, "A second row"), + (3, "A third row"), + ] + + connection.direct_path_load( + schema_name=SCHEMA_NAME, + table_name=TABLE_NAME, + column_names=COLUMN_NAMES, + data=DATA + ) + +The records are always implicitly committed. + +The ``data`` parameter can be a list of sequences, a :ref:`DataFrame +` object, or a third-party DataFrame instance that supports +the Apache Arrow PyCapsule Interface, see :ref:`dfppl`. + +To load into VECTOR columns, pass an appropriate `Python array.array() +`__ value, or a list of values. +For example, if you have the table:: + + create table TestDirectPathLoad ( + id number(9), + name varchar2(20), + v64 vector(3, float64) + ); + +Then you can load data into it using the code: + +.. code-block:: python + + SCHEMA_NAME = "HR" + TABLE_NAME = "TESTDIRECTPATHLOAD" + COLUMN_NAMES = ["ID", "NAME", "V64"] + DATA = [ + (1, "A first row", array.array("d", [1, 2, 3])), + (2, "A second row", [4, 5, 6]), + (3, "A third row", array.array("d", [7, 8, 9])), + ] + + connection.direct_path_load( + schema_name=SCHEMA_NAME, + table_name=TABLE_NAME, + column_names=COLUMN_NAMES, + data=DATA + ) + + +For more on vectors, see :ref:`vectors`. + +Runnable Direct Path Load examples are in the `GitHub examples +`__ directory. + +**Notes on Direct Path Loads** + +- Data is implicitly committed. +- Data being inserted into CLOB or BLOB columns must be strings or bytes, not + python-oracledb :ref:`LOB Objects `. +- Insertion of python-oracledb :ref:`DbObjectType Objects ` is + not supported + +Review Oracle Database documentation for database requirements and +restrictions. + +Batching of Direct Path Loads +----------------------------- + +If buffer, network, or database limits make it desirable to process smaller +sets of records, you can either make repeated calls to +:meth:`Connection.direct_path_load()` or you can use the ``batch_size`` +parameter. For example: + +.. code-block:: python + + SCHEMA_NAME = "HR" + TABLE_NAME = "TESTDIRECTPATHLOAD" + COLUMN_NAMES = ["ID", "NAME"] + DATA = [ + (1, "A first row"), + (2, "A second row"), + . . . + (10_000_000, "Ten millionth row"), + ] + + connection.direct_path_load( + schema_name=SCHEMA_NAME, + table_name=TABLE_NAME, + column_names=COLUMN_NAMES, + data=DATA, + batch_size=1_000_000 + ) + +This will send the data to the database in batches of 1,000,000 records until +all 10,000,000 records have been inserted. + +.. _memoptimized: + +Memoptimized Rowstore +===================== + +The Memoptimized Rowstore is another Oracle Database feature for data +ingestion, particularly for frequent single row inserts. It can also aid query +performance. Configuration and control is handled by database configuration and +the use of specific SQL statements. As a result, there is no specific +python-oracledb requirement or API needed to take advantage of the feature. + +To use the Memoptimized Rowstore see Oracle Database documentation `Enabling +High Performance Data Streaming with the Memoptimized Rowstore +`__. diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index f900ebb1..a8a3e4ad 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -647,7 +647,12 @@ Inserting Data Frames Python-oracledb :ref:`DataFrame ` instances, or third-party DataFrame instances that support the Apache Arrow PyCapsule Interface, can be inserted into Oracle Database by passing them directly to -:meth:`Cursor.executemany()` or :meth:`AsyncCursor.executemany()`. +:meth:`Cursor.executemany()` or :meth:`AsyncCursor.executemany()`. They can +also be passed to :meth:`Connection.direct_path_load()` and +:meth:`AsyncConnection.direct_path_load()`. + +Inserting Data Frames with executemany() +---------------------------------------- For example, with the table:: @@ -686,6 +691,45 @@ For general information about fast data ingestion, and discussion of :meth:`Cursor.executemany()` and :meth:`AsyncCursor.executemany()` options, see :ref:`batchstmnt`. +.. _dfppl: + +Inserting Data Frames with Direct Path Loads +-------------------------------------------- + +Very large :ref:`DataFrame ` objects can be efficiently +inserted using Oracle Database Direct Path Loading by passing them to +:meth:`Connection.direct_path_load()`. You can also pass third-party DataFrame +instances that support the Apache Arrow PyCapsule Interface. + +See :ref:`directpathloads` for general information about Direct Path Loads. + +For example, if the user "HR" has the table:: + + create table mytab ( + id number(9), + name varchar2(100)); + +The following code will insert a Pandas DataFrame: + +.. code-block:: python + + import pandas + + d = [ + (1, "Abigail"), + (2, "Anna"), + (3, "Janey"), + (4, "Jessica"), + ] + pdf = pandas.DataFrame(data=d) + + connection.direct_path_load( + schema_name="hr", + table_name="mytab", + column_names=["id", "name"], + data=pdf + ) + Explicit Conversion to DataFrame or ArrowArray ============================================== diff --git a/doc/src/user_guide/sql_execution.rst b/doc/src/user_guide/sql_execution.rst index 3d8c70b9..dfa9561a 100644 --- a/doc/src/user_guide/sql_execution.rst +++ b/doc/src/user_guide/sql_execution.rst @@ -1114,8 +1114,8 @@ easily be executed with python-oracledb. For example: Do not concatenate or interpolate user data into SQL statements. See :ref:`bind` instead. -When handling multiple data values, use :meth:`Cursor.executemany()` for -performance. See :ref:`batchstmnt` +When handling multiple data values, use :meth:`Cursor.executemany()` or +:meth:`Connection.direct_path_load()` for performance. See :ref:`batchstmnt` By default data is not committed to the database and other users will not be able to see your changes until your connection commits them by calling diff --git a/doc/src/user_guide/tuning.rst b/doc/src/user_guide/tuning.rst index 57e28cc6..048e8308 100644 --- a/doc/src/user_guide/tuning.rst +++ b/doc/src/user_guide/tuning.rst @@ -21,9 +21,11 @@ Some general tuning tips are: Make use of efficient python-oracledb functions. For example, to insert multiple rows use :meth:`Cursor.executemany()` instead of - :meth:`Cursor.execute()`. Another example is to fetch data directly as - :ref:`data frames ` when working with packages like Pandas - and NumPy. + :meth:`Cursor.execute()`. Alternatively use + :meth:`Connection.direct_path_load()` for inserting very large + datasets. Another example is to fetch data directly as :ref:`data frames + ` instead of using the traditional query code path when + working with packages like Pandas and NumPy. * Tune your SQL statements. See the `SQL Tuning Guide `__. diff --git a/samples/data/load_csv_direct_path.csv b/samples/data/load_csv_direct_path.csv new file mode 100644 index 00000000..16322263 --- /dev/null +++ b/samples/data/load_csv_direct_path.csv @@ -0,0 +1,92 @@ +1,Biologist +2,Doctor +4,Executive Director +5,Laboratory Technician +6,Cashier +8,Investment Advisor +9,HR Coordinator +10,HR Specialist +11,Investment Advisor +12,Baker +13,Baker +14,Cashier +15,CNC Operator +16,Software Engineer +18,Auditor +19,Accountant +20,Auditor +21,Loan Officer +22,Bellman +23,Cashier +24,Baker +25,HR Coordinator +26,Operator +27,Service Supervisor +28,Lecturer +29,IT Support Staff +30,Staffing Consultant +31,Paramedic +32,Associate Professor +33,HR Coordinator +34,HR Coordinator +35,Restaurant Manager +36,Webmaster +38,Bellman +39,Design Engineer +40,Cashier +42,Steward +43,Fabricator +44,Inspector +45,Pharmacist +46,Loan Officer +47,Staffing Consultant +48,Chef Manager +49,Biologist +50,Food Technologist +51,Biologist +53,Laboratory Technician +54,Design Engineer +55,Fabricator +56,Lecturer +57,Loan Officer +58,Investment Advisor +59,Treasurer +60,Cash Manager +61,Audiologist +62,Fabricator +63,Systems Administrator +64,Health Educator +65,Fabricator +66,HR Specialist +67,Physician +68,Staffing Consultant +69,Fabricator +70,Physician +71,Auditor +72,Biologist +73,Budget Analyst +74,Bellman +75,Restaurant Manager +76,Stockbroker +77,Paramedic +78,Front Desk Coordinator +79,Cashier +80,CNC Operator +81,Design Engineer +82,Audiologist +83,CNC Operator +84,IT Support Staff +85,Paramedic +86,Fabricator +87,Laboratory Technician +88,Mobile Developer +90,Budget Analyst +91,Lecturer +92,Physician +93,Retail Trainee +94,Pharmacist +95,Service Supervisor +96,Production Painter +97,Cash Manager +99,Treasurer +100,Pharmacist diff --git a/samples/dataframe_insert.py b/samples/dataframe_insert.py index 80c91ed4..f2787c31 100644 --- a/samples/dataframe_insert.py +++ b/samples/dataframe_insert.py @@ -25,9 +25,9 @@ # ----------------------------------------------------------------------------- # dataframe_insert.py # -# Shows how executemany() can be used to insert a Pandas dataframe directly -# into Oracle Database. The same technique can be used with data frames from -# many other libraries. +# Shows how executemany() and Direct Path Loads can be used to insert a Pandas +# dataframe directly into Oracle Database. The same technique can be used with +# data frames from many other Python libraries. # ----------------------------------------------------------------------------- import sys @@ -50,7 +50,7 @@ # ----------------------------------------------------------------------------- # -# Inserting a simple DataFrame +# Inserting a simple DataFrame using executemany() with connection.cursor() as cursor: @@ -64,7 +64,38 @@ # efficient "Array DML" method cursor.executemany("insert into mytab (id, data) values (:1, :2)", pdf) - # Check data + # Check data using a non-DataFrame fetch + print("\nOracle Database Query:") + cursor.execute("select * from mytab order by id") + columns = [col.name for col in cursor.description] + print(columns) + for r in cursor: + print(r) + + # Clean up for the next example + cursor.execute("truncate table mytab") + +# ----------------------------------------------------------------------------- +# +# Inserting a DataFrame using Direct Path Loading + +with connection.cursor() as cursor: + + # Create a Pandas DataFrame + print("\nPandas Dataframe 2:") + d = {"A": [202, 412, 487], "B": ["Abi", "Jessie", "Fay"]} + pdf = pandas.DataFrame(data=d) + print(pdf) + + # Insert using Direct Path Loads + connection.direct_path_load( + schema_name=sample_env.get_main_user(), + table_name="mytab", + column_names=["id", "data"], + data=pdf, + ) + + # Check data using a non-DataFrame fetch print("\nOracle Database Query:") cursor.execute("select * from mytab order by id") columns = [col.name for col in cursor.description] @@ -74,7 +105,7 @@ # ----------------------------------------------------------------------------- # -# Inserting VECTORs +# Inserting a DataFrame with VECTORs using executemany() # The VECTOR example only works with Oracle Database 23.4 or later if sample_env.get_server_version() < (23, 4): @@ -91,7 +122,7 @@ with connection.cursor() as cursor: # Create a Pandas DataFrame - print("\nPandas Dataframe 2:") + print("\nPandas Dataframe 3:") d = {"v": [[3.3, 1.32, 5.0], [2.2, 2.32, 2.0]]} pdf = pandas.DataFrame(data=d) print(pdf) @@ -100,8 +131,10 @@ # efficient "Array DML" method cursor.executemany("insert into SampleVectorTab (v64) values (:1)", pdf) - # Check data + # Check data using a non-DataFrame fetch print("\nOracle Database Query:") cursor.execute("select v64 from SampleVectorTab order by id") - for (r,) in cursor: + columns = [col.name for col in cursor.description] + print(columns) + for r in cursor: print(r) diff --git a/samples/direct_path_load.py b/samples/direct_path_load.py new file mode 100644 index 00000000..22670596 --- /dev/null +++ b/samples/direct_path_load.py @@ -0,0 +1,67 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# direct_path_load.py +# +# Shows how Direct Path Loads can be used to insert into Oracle Database. +# +# The same technique can be used with data frames, see dataframe_insert.py +# An example of loading from a CSV file is in load_csv.py +# ----------------------------------------------------------------------------- + +import oracledb +import sample_env + +connection = oracledb.connect( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_connect_params(), +) + +# ----------------------------------------------------------------------------- + +DATA = [ + (1, "A first row"), + (2, "A second row"), + (3, "A third row"), +] + +connection.direct_path_load( + schema_name=sample_env.get_main_user(), + table_name="mytab", + column_names=["id", "data"], + data=DATA, +) + +with connection.cursor() as cursor: + + # Check the data was inserted + sql = "select * from mytab" + for r in cursor.execute(sql): + print(r) + + # Clean up the table so the sample can be re-run + cursor.execute("truncate table mytab") diff --git a/samples/direct_path_load_async.py b/samples/direct_path_load_async.py new file mode 100644 index 00000000..7e7cae1a --- /dev/null +++ b/samples/direct_path_load_async.py @@ -0,0 +1,77 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# direct_path_load_async.py +# +# An asynchronous version of direct_path_load.py +# +# Shows how Direct Path Loads can be used to insert into Oracle Database. +# ----------------------------------------------------------------------------- + +import asyncio + +import oracledb +import sample_env + +# ----------------------------------------------------------------------------- + + +async def main(): + + connection = await oracledb.connect_async( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_connect_params(), + ) + + DATA = [ + (1, "A first row"), + (2, "A second row"), + (3, "A third row"), + ] + + await connection.direct_path_load( + schema_name=sample_env.get_main_user(), + table_name="mytab", + column_names=["id", "data"], + data=DATA, + ) + + with connection.cursor() as cursor: + + await cursor.execute("select * from mytab") + async for r in cursor: + print(r) + # Check the data was inserted + await cursor.execute("select * from mytab") + async for r in cursor: + print(r) + + # Clean up the table so the sample can be re-run + await cursor.execute("truncate table mytab") + + +asyncio.run(main()) diff --git a/samples/load_csv.py b/samples/load_csv.py index 397f278f..fc95df33 100644 --- a/samples/load_csv.py +++ b/samples/load_csv.py @@ -25,11 +25,17 @@ # ----------------------------------------------------------------------------- # load_csv.py # -# A sample showing how to load CSV data. +# A sample showing how to load CSV data using two methods: +# - executemany() of data read with Python's CSV module +# - executemany() of data read into dataframes by PyArrow's CSV module +# +# To run the second example, install pyarrow: +# python -m pip install pyarrow --upgrade # ----------------------------------------------------------------------------- import csv import os +import sys import oracledb import sample_env @@ -38,15 +44,14 @@ if sample_env.run_in_thick_mode(): oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) -# CSV file. This sample file has both valid rows and some rows with data too +# CSV file. This sample file has both valid rows and some rows with data too # large to insert. FILE_NAME = os.path.join("data", "load_csv.csv") # Adjust the number of rows to be inserted in each iteration to meet your -# memory and performance requirements. Typically this is a large-ish value to -# reduce the number of calls to executemany() to a reasonable size. For this -# demo with a small CSV file a smaller number is used to show the looping -# behavior of the code. +# memory and performance requirements. Typically this is a large-ish value to +# reduce the number of calls to executemany(). For this demo with a small CSV +# file, a small batch size is used to show the looping behavior of the code. BATCH_SIZE = 19 connection = oracledb.connect( @@ -56,9 +61,14 @@ params=sample_env.get_connect_params(), ) +# ----------------------------------------------------------------------------- +# Loading using executemany() which uses Oracle Database's "Array DML" support + +print("\nLoading with executemany() using Python's CSV module") -def process_batch(batch_number, cursor, data): - print("processing batch", batch_number + 1) + +def process_batch_em(cursor, batch_number, data): + print("Processing batch", batch_number + 1) cursor.executemany(sql, data, batcherrors=True) for error in cursor.getbatcherrors(): line_num = (batch_number * BATCH_SIZE) + error.offset + 1 @@ -86,14 +96,62 @@ def process_batch(batch_number, cursor, data): for line in csv_reader: data.append((line[0], line[1])) if len(data) % BATCH_SIZE == 0: - process_batch(batch_number, cursor, data) + process_batch_em(cursor, batch_number, data) data = [] batch_number += 1 if data: - process_batch(batch_number, cursor, data) + process_batch_em(cursor, batch_number, data) # In a production system you might choose to fix any invalid rows, # re-insert them, and then commit. Or you could rollback everything. # In this sample we simply commit and ignore the invalid rows that # couldn't be inserted. connection.commit() + + # Show how many rows were inserted + cursor.execute("select count(*) from LoadCsvTab") + (r,) = cursor.fetchone() + print(f"\n{r} valid rows were inserted") + +# ----------------------------------------------------------------------------- +# Loading with PyArrow's CSV package using executemany() +# +# Using PyArrow to read into a dataframe for insertion can be faster than using +# the Python CSV module. + +try: + import pyarrow.csv +except Exception: + print("\nTo use pyarrow dataframes, install pyarrow.") + sys.exit() + +print("\nLoading with executemany() using PyArrow's CSV module") + +with connection.cursor() as cursor: + # Clean up the table for demonstration purposes + cursor.execute("truncate table LoadCsvTab") + + # PyArrow uses a byte size for batching. For this demo, a size is + # semi-arbitrarily set to give similar behavior to other loading examples + read_options = pyarrow.csv.ReadOptions( + column_names=["id", "name"], block_size=BATCH_SIZE * 17 + ) + + csv_reader = pyarrow.csv.open_csv(FILE_NAME, read_options=read_options) + batch_number = 0 + record_start = 0 + for df in csv_reader: + if df is None: + break + batch_number += 1 + print("Processing batch", batch_number) + cursor.executemany(sql, df, batcherrors=True) + for error in cursor.getbatcherrors(): + line_num = record_start + error.offset + 1 + print("Error", error.message, "at line", line_num) + record_start += len(df) + + # Show how many rows were inserted + cursor.execute("select count(*) from LoadCsvTab") + (r,) = cursor.fetchone() + print(f"\n{r} rows were inserted") diff --git a/samples/load_csv_direct_path.py b/samples/load_csv_direct_path.py new file mode 100644 index 00000000..ee1e3c2b --- /dev/null +++ b/samples/load_csv_direct_path.py @@ -0,0 +1,157 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# load_csv_direct_path.py +# +# A sample showing how to load CSV data using two methods: +# - Direct Path Load of data read with Python's CSV module +# - Direct Path Load of data from PyArrow dataframes +# +# This is related to load_csv.py but the CSV data file for Direct Path Load can +# only contain valid data. +# +# Direct Path Loads can be faster for very large data sets than the +# executemany() code shown in load_csv.py. +# +# To run the second example, install pyarrow: +# python -m pip install pyarrow --upgrade +# ----------------------------------------------------------------------------- + +import csv +import os +import sys + +import oracledb +import sample_env + +# determine whether to use python-oracledb thin mode or thick mode +if sample_env.run_in_thick_mode(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) + +# CSV file. This sample file has both valid rows +FILE_NAME = os.path.join("data", "load_csv_direct_path.csv") + +# Adjust the number of rows to be inserted in each iteration to meet your +# memory and performance requirements. Typically this is a large-ish value to +# reduce the number of calls to executemany() or direct_path_load(). For this +# demo with a small CSV file, a small batch size is used to show the looping +# behavior of the code. +BATCH_SIZE = 19 + +connection = oracledb.connect( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_connect_params(), +) + +# ----------------------------------------------------------------------------- +# Loading with Python's CSV package using Direct Path Loads + +print("\nDirect Path Load using Python's CSV module") + + +def process_batch_dpl(connection, batch_number, data): + print("Processing batch", batch_number + 1) + connection.direct_path_load( + schema_name=sample_env.get_main_user(), + table_name="LoadCsvTab", + column_names=["id", "name"], + data=data, + ) + + +with connection.cursor() as cursor: + # Clean up the table for demonstration purposes + cursor.execute("truncate table LoadCsvTab") + +# Loop over the data and insert it in batches +with open(FILE_NAME, "r") as csv_file: + csv_reader = csv.reader(csv_file, delimiter=",") + data = [] + batch_number = 0 + for line in csv_reader: + data.append((float(line[0]), line[1])) + if len(data) % BATCH_SIZE == 0: + process_batch_dpl(connection, batch_number, data) + data = [] + batch_number += 1 + if data: + process_batch_dpl(connection, batch_number, data) + +# Show how many rows were inserted +with connection.cursor() as cursor: + cursor.execute("select count(*) from LoadCsvTab") + (r,) = cursor.fetchone() + print(f"\n{r} rows were inserted") + +# ----------------------------------------------------------------------------- +# Loading with PyArrow's CSV package using Direct Path Loads +# +# Using Direct Path Load in conjunction with PyArrow's CSV package can be the +# fastest way to load huge datasets. + +try: + import pyarrow.csv +except Exception: + print("\nTo use pyarrow dataframes, install pyarrow.") + sys.exit() + +print("\nDirect Path Load using PyArrow's CSV module") + +with connection.cursor() as cursor: + # Clean up the table for demonstration purposes + cursor.execute("truncate table LoadCsvTab") + +# PyArrow uses a byte size for batching. For this demo, a size is +# semi-arbitrarily set to give similar behavior to other loading examples +read_options = pyarrow.csv.ReadOptions( + column_names=["id", "name"], block_size=BATCH_SIZE * 18 +) + +csv_reader = pyarrow.csv.open_csv(FILE_NAME, read_options=read_options) +batch_number = 0 +for df in csv_reader: + if df is None: + break + batch_number += 1 + print("Processing batch", batch_number) + connection.direct_path_load( + schema_name=sample_env.get_main_user(), + table_name="LoadCsvTab", + column_names=["id", "name"], + data=df, + ) + +with connection.cursor() as cursor: + + # Show how many rows were inserted + cursor.execute("select count(*) from LoadCsvTab") + (r,) = cursor.fetchone() + print(f"\n{r} rows were inserted") + + # Direct Path Load always commits so clean up the table for demonstration + # purposes + cursor.execute("truncate table LoadCsvTab") diff --git a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html index ff437319..b0c23f62 100644 --- a/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html +++ b/samples/tutorial/Python-and-Oracle-Database-The-New-Wave-of-Scripting.html @@ -2,7 +2,7 @@ Python and Oracle Database Tutorial: The New Wave of Scripting - + @@ -133,32 +133,38 @@

      Contents

    • 15.1 Working with scrollable cursors
    -
  • 16. Dataframes +
  • 16. DataFrames
      -
    • 16.1 Fetching Dataframes
    • -
    • 16.2 Inserting Dataframes
    • +
    • 16.1 Fetching DataFrames
    • +
    • 16.2 Inserting DataFrames
  • -
  • 17. Concurrent Programming with asyncio +
  • 17. Direct Path Loads
      -
    • 17.1 Using asyncio
    • +
    • 17.1 Using Direct Path Loads
    • +
    • 17.2 Inserting DataFrames with Direct Path Loads
  • -
  • 18. Pipelining multiple operations +
  • 18. Concurrent Programming with asyncio
      -
    • 18.1 Using Pipelining
    • +
    • 18.1 Using asyncio
  • -
  • 19. Python-oracledb Thick mode +
  • 19. Pipelining multiple operations
      -
    • 19.1 Review the Oracle Client library path
    • -
    • 19.2 Review the configuration files for thick mode
    • +
    • 19.1 Using Pipelining
  • -
  • 20. Simple Oracle Document Access (SODA) +
  • 20. Python-oracledb Thick mode
      -
    • 20.1 Inserting JSON Documents
    • -
    • 20.2 Searching SODA Documents
    • +
    • 20.1 Review the Oracle Client library path
    • +
    • 20.2 Review the configuration files for thick mode
    • +
    +
  • +
  • 21. Simple Oracle Document Access (SODA) +
      +
    • 21.1 Inserting JSON Documents
    • +
    • 21.2 Searching SODA Documents
  • Summary
  • @@ -206,7 +212,7 @@

    Python-oracledb Architecture

    • -

      Get Access to Oracle Database

      +

      Get Access to Oracle Database

      This tutorial assumes you have DBA access to Oracle Database. This is needed to grant some privileges and roles.

      @@ -267,7 +273,7 @@

      Download the tutorial scripts

      >here and unzip it. Alternatively you can use 'git' to clone the repository:

      -
      git clone https://github.com/oracle/python-oracledb.git
      +
      git clone --recursive https://github.com/oracle/python-oracledb.git

      The samples/tutorial directory has scripts to run and modify. The samples/tutorial/solutions directory has @@ -3012,7 +3018,7 @@

      15. Scrollable cursors

    -

    16. Dataframes

    +

    16. DataFrames

    Python-oracledb has a DataFrame class that exposes an Apache Arrow PyCapsule interface. Data can be fetched directly from Oracle Database into DataFrame @@ -3032,7 +3038,7 @@

    16. Dataframes

    python -m pip install pyarrow pandas --upgrade
      -
    • 16.1 Fetching Dataframes

      +
    • 16.1 Fetching DataFrames

      Review the code contained in query_pandas.py:

      @@ -3085,14 +3091,13 @@

      16. Dataframes

    • -
    • 16.2 Inserting Dataframes

      +
    • 16.2 Inserting DataFrames

      Python-oracledb DataFrame instances, and instances of DataFrames from - third-party libraries that support the Apache Arrow PyCapsule Interface, + third-party libraries that support the Apache Arrow PyCapsule Interface can be inserted into Oracle Database by passing them directly to executemany().

      -

      Edit query_pandas.py and add this code at the bottom:

      @@ -3119,11 +3124,120 @@ 

      16. Dataframes

      inserts the DataFrame into Oracle Database. The new salaries are queried back to confirm the insertion was successful.

      +

      You can also insert DataFrames using Oracle Database Direct Path Loads, + as shown in a later section.

    • + +
    + +

    17. Direct Path Loads

    + +

    Oracle Database Direct Path Loads allow data being inserted to bypass code +layers such as the database buffer cache. Also there are no INSERT statements +used. This can be very efficient for ingestion of huge amounts of +data. Documentation link for further reading: Direct +Path Loads

    + +
      +
    • 17.1 Using Direct Path Loads

      + +

      The python-oracledb method connection.direct_path_load() + accepts the schema name, table name, column names, and the data to be + loaded.

      + +

      Review the code contained in direct_path.py:

      + +
      import oracledb
      +import db_config
      +
      +con = oracledb.connect(user=db_config.user, password=db_config.pw, dsn=db_config.dsn)
      +
      +cur = con.cursor()
      +
      +# Create table
      +cur.execute(
      +    """
      +    begin
      +        execute immediate 'drop table testdpl';
      +    exception when others then
      +        if sqlcode <> -942 then
      +            raise;
      +        end if;
      +    end;
      +    """
      +)
      +cur.execute(
      +    """
      +    create table testdpl (
      +        id   number(9),
      +        name varchar2(100)
      +    )
      +    """
      +)
      +
      +DATA = [
      +    (1, "Adelaide"),
      +    (2, "Brisbane"),
      +    (3, "Canberra"),
      +]
      +
      +con.direct_path_load(
      +    schema_name=db_config.user,
      +    table_name="testdpl",
      +    column_names=["id", "name"],
      +    data=DATA,
      +)
      +
      +# Check the data was inserted
      +sql = "select * from testdpl"
      +for r in cur.execute(sql):
      +    print(r)
      +
      + +

      Run the script in a terminal window:

      + +
      python direct_path.py
      + +

      This inserts the data into the new table and queries it back to + confirm.

      + +

      For small numbers of rows, the elapsed time to insert data may not be + better than using executemany(). For larger numbers of rows + there can be time benefits and also reduced load on the database.

      +
    • +
    • 17.2 Inserting DataFrames with Direct Path Loads

      + +

      DataFrames from third-party libraries that support the Apache Arrow + PyCapsule Interface can be inserted into Oracle Database by passing them + directly to direct_path_load().

      + +

      For this example, the pandas package need to be installed:

      + +
      python -m pip install pandas --upgrade
      + +

      Edit direct_path.py and import the Pandas package at the top:

      + +
      import pandas
      + +

      Change the data creation to use a Pandas DataFrame instead of the list + of tuples:

      + +
      d = {"A": [202, 412, 487], "B": ["Anna", "Bidisha", "Charlie"]}
      +DATA = pandas.DataFrame(data=d)
      +
      + +

      Run the script in a terminal window:

      + +
      python direct_path.py
      + +

      This displays the inserted values from the DataFrame.

      + +
    -

    17. Concurrent Programming with asyncio

    +

    18. Concurrent Programming with asyncio

    The Asynchronous I/O (asyncio) Python library can be used in python-oracledb Thin mode for concurrent programming. This library allows you to run operations @@ -3139,7 +3253,7 @@

    17. Concurrent Programming with asyncio

    Programming with asyncio
    .

    -

    18. Pipelining multiple operations

    +

    19. Pipelining multiple operations

    Pipelining allows python-oracledb Thin mode applications to send multiple, independent statements to Oracle Database with one call. The database is kept @@ -3236,7 +3350,7 @@

    18. Pipelining multiple operations

    Database Operations
    .

    -

    19. Python-oracledb Thick mode

    +

    20. Python-oracledb Thick mode

    All the above examples were run in python-oracledb's default Thin mode that connects directly to Oracle Database. Most could also have been run in Thick @@ -3325,7 +3439,7 @@

    19. Python-oracledb Thick mode

    • -

      19.1 Review the Oracle Client library path

      +

      20.1 Review the Oracle Client library path

      You additionally need to make Oracle Client libraries available. Follow the documentation on 19.1 Review the Oracle Client library path

    • -

      19.2 Review the configuration files for thick mode

      +

      20.2 Review the configuration files for thick mode

      Review db_config_thick.py (thick mode), and sql/db_config.sql files in the tutorial directory. @@ -3451,7 +3565,7 @@

      19.2 Review the configuration files for thick mode

      The following section is specific to the python-oracledb Thick mode in this release of python-oracledb.

      -

      20. Simple Oracle Document Access (SODA)

      +

      21. Simple Oracle Document Access (SODA)

      Simple Oracle Document Access (SODA) is a set of NoSQL-style APIs. Documents can be inserted, queried, and retrieved from Oracle Database. By @@ -3464,7 +3578,7 @@

      20. Simple Oracle Document Access (SODA)

      • -

        20.1 Inserting JSON Documents

        +

        21.1 Inserting JSON Documents

        Review soda.py :

        @@ -3535,7 +3649,7 @@

        20.1 Inserting JSON Documents

      • -

        20.2 Searching SODA Documents

        +

        21.2 Searching SODA Documents

        Extend soda.py to insert some more documents and perform a find filter operation:

        diff --git a/samples/tutorial/async_gather.py b/samples/tutorial/async_gather.py index c60180a4..4f7ed64e 100644 --- a/samples/tutorial/async_gather.py +++ b/samples/tutorial/async_gather.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# async_gather.py (Section 17.1) +# async_gather.py (Section 18.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- diff --git a/samples/tutorial/direct_path.py b/samples/tutorial/direct_path.py new file mode 100644 index 00000000..e9ad3185 --- /dev/null +++ b/samples/tutorial/direct_path.py @@ -0,0 +1,76 @@ +# ----------------------------------------------------------------------------- +# direct_path.py (Section 17.1) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) + +cur = con.cursor() + +# Create table +cur.execute( + """ + begin + execute immediate 'drop table testdpl'; + exception when others then + if sqlcode <> -942 then + raise; + end if; + end; + """ +) +cur.execute( + """ + create table testdpl ( + id number(9), + name varchar2(100) + ) + """ +) + +DATA = [ + (1, "Adelaide"), + (2, "Brisbane"), + (3, "Canberra"), +] + +con.direct_path_load( + schema_name=db_config.user, + table_name="testdpl", + column_names=["id", "name"], + data=DATA, +) + +# Check the data was inserted +cur.execute("select * from testdpl") +rows = cur.fetchall() +for row in rows: + print(row) diff --git a/samples/tutorial/pipelining.py b/samples/tutorial/pipelining.py index fb0a989f..4b5ef68c 100644 --- a/samples/tutorial/pipelining.py +++ b/samples/tutorial/pipelining.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# pipelining.py (Section 18.1) +# pipelining.py (Section 19.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- diff --git a/samples/tutorial/soda.py b/samples/tutorial/soda.py index 83c7b640..518506ad 100644 --- a/samples/tutorial/soda.py +++ b/samples/tutorial/soda.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# soda.py (Section 20.1) +# soda.py (Section 21.1) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- diff --git a/samples/tutorial/solutions/direct_path.py b/samples/tutorial/solutions/direct_path.py new file mode 100644 index 00000000..4e25635c --- /dev/null +++ b/samples/tutorial/solutions/direct_path.py @@ -0,0 +1,74 @@ +# ----------------------------------------------------------------------------- +# direct_path.py (Section 17.2) +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +import pandas +import oracledb +import db_config + +con = oracledb.connect( + user=db_config.user, password=db_config.pw, dsn=db_config.dsn +) + +cur = con.cursor() + +# Create table +cur.execute( + """ + begin + execute immediate 'drop table testdpl'; + exception when others then + if sqlcode <> -942 then + raise; + end if; + end; + """ +) +cur.execute( + """ + create table testdpl ( + id number(9), + name varchar2(100) + ) + """ +) + +d = {"A": [202, 412, 487], "B": ["Anna", "Bidisha", "Charlie"]} +DATA = pandas.DataFrame(data=d) + +con.direct_path_load( + schema_name=db_config.user, + table_name="testdpl", + column_names=["id", "name"], + data=DATA, +) + +# Check the data was inserted +cur.execute("select * from testdpl") +rows = cur.fetchall() +for row in rows: + print(row) diff --git a/samples/tutorial/solutions/soda.py b/samples/tutorial/solutions/soda.py index 510e8979..8bc2ef13 100644 --- a/samples/tutorial/solutions/soda.py +++ b/samples/tutorial/solutions/soda.py @@ -1,5 +1,5 @@ # ----------------------------------------------------------------------------- -# soda.py (Section 20.2) +# soda.py (Section 21.2) # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index f8601f49..4ee0faa3 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -109,6 +109,7 @@ cdef class ArrowSchemaImpl: cdef bint _is_sparse_vector(self) except* cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1 cdef int _set_time_unit(self, ArrowTimeUnit time_unit) except -1 + cdef str get_type_name(self) cdef int populate_from_schema(self, ArrowSchema* schema) except -1 cdef int populate_from_metadata(self, ArrowType arrow_type, str name, int8_t precision, int8_t scale, diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index a839d77f..54ae3916 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -235,6 +235,7 @@ cdef class DbType: uint8_t _ora_type_num uint8_t _csfrm uint8_t _default_py_type_num + bint _is_fast @staticmethod cdef DbType _from_num(uint32_t num) @@ -277,8 +278,23 @@ cdef class BatchLoadManager: object conn cdef int _calculate_num_rows_in_batch(self, uint64_t total_rows) except -1 + @staticmethod + cdef BatchLoadManager _create( + object parameters, + uint32_t batch_size, + int error_num, + ) + cdef list _get_all_rows(self) + cdef list _get_arrow_arrays(self) cdef int _next_batch(self) except -1 cdef int _setup_cursor(self) except -1 + cdef int _verify_metadata(self, list column_metadata) except -1 + @staticmethod + cdef BatchLoadManager create_for_direct_path_load( + object parameters, + list column_metadata, + uint32_t batch_size, + ) @staticmethod cdef BatchLoadManager create_for_executemany( object cursor, @@ -348,6 +364,8 @@ cdef class Buffer: cdef int write_interval_ym(self, object value) except -1 cdef int write_oracle_date(self, object value, uint8_t length) except -1 cdef int write_oracle_number(self, bytes num_bytes) except -1 + cdef int write_oson(self, value, ssize_t max_fname_size, + bint write_length=*) except -1 cdef int write_raw(self, const char_type *data, ssize_t length) except -1 cdef int write_sb4(self, int32_t value) except -1 cdef int write_str(self, str value) except -1 @@ -359,6 +377,7 @@ cdef class Buffer: cdef int write_ub2(self, uint16_t value) except -1 cdef int write_ub4(self, uint32_t value) except -1 cdef int write_ub8(self, uint64_t value) except -1 + cdef int write_vector(self, value) except -1 cdef class GrowableBuffer(Buffer): @@ -475,6 +494,7 @@ cdef class OracleMetadata: cdef int _create_arrow_schema(self) except -1 cdef int _finalize_init(self) except -1 + cdef int _set_arrow_schema(self, ArrowSchemaImpl schema_impl) except -1 cdef OracleMetadata copy(self) @staticmethod cdef OracleMetadata from_arrow_schema(ArrowSchemaImpl schema_impl) diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 6d4d365b..012f2561 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -1067,6 +1067,32 @@ def cursor(self, scrollable: bool = False) -> Cursor: self._verify_connected() return Cursor(self, scrollable) + def direct_path_load( + self, + schema_name: str, + table_name: str, + column_names: list[str], + data: Any, + *, + batch_size: int = 2**32 - 1, + ) -> None: + """ + Load data into Oracle Database using the Direct Path Load interface. + It is available only in python-oracledb Thin mode. + + The ``data`` parameter can be a list of sequences, a DataFrame, or a + third-party DataFrame instance that supports the Apache Arrow PyCapsule + Interface. + + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. + """ + self._verify_connected() + self._impl.direct_path_load( + schema_name, table_name, column_names, data, batch_size + ) + def fetch_df_all( self, statement: str, @@ -2359,6 +2385,32 @@ async def fetchall( cursor.rowfactory = rowfactory return await cursor.fetchall() + async def direct_path_load( + self, + schema_name: str, + table_name: str, + column_names: list[str], + data: Any, + *, + batch_size: int = 2**32 - 1, + ) -> None: + """ + Load data into Oracle Database using the Direct Path Load interface. + It is available only in python-oracledb Thin mode. + + The ``data`` parameter can be a list of sequences, a DataFrame, or a + third-party DataFrame instance that supports the Apache Arrow PyCapsule + Interface. + + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. + """ + self._verify_connected() + await self._impl.direct_path_load( + schema_name, table_name, column_names, data, batch_size + ) + async def fetch_df_all( self, statement: str, diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index be4b71a2..972b35f1 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -290,6 +290,7 @@ def _raise_not_supported(feature: str) -> None: ERR_POOL_MAX_LESS_THAN_MIN = 2064 ERR_ARROW_SPARSE_VECTOR_NOT_ALLOWED = 2065 ERR_EMPTY_STATEMENT = 2066 +ERR_WRONG_DIRECT_PATH_DATA_TYPE = 2067 # error numbers that result in NotSupportedError ERR_TIME_NOT_SUPPORTED = 3000 @@ -328,6 +329,8 @@ def _raise_not_supported(feature: str) -> None: ERR_SESSIONLESS_DIFFERING_METHODS = 3034 ERR_SESSIONLESS_ALREADY_ACTIVE = 3035 ERR_SESSIONLESS_INACTIVE = 3036 +ERR_UNSUPPORTED_ARROW_TYPE = 3037 +ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE = 3038 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -395,6 +398,10 @@ def _raise_not_supported(feature: str) -> None: # error numbers that result in Warning WRN_COMPILATION_ERROR = 7000 +# error numbers that result in DataError +ERR_VALUE_TOO_LARGE = 8000 +ERR_NULLS_NOT_ALLOWED = 8001 + # Oracle error number cross reference ERR_ORACLE_ERROR_XREF = { 22: ERR_CONNECTION_CLOSED, @@ -530,6 +537,7 @@ def _raise_not_supported(feature: str) -> None: 5: exceptions.InternalError, 6: exceptions.OperationalError, 7: exceptions.Warning, + 8: exceptions.DataError, } # error messages that have a troubleshooting section available @@ -795,6 +803,9 @@ def _raise_not_supported(feature: str) -> None: ERR_NOT_A_QUERY: "the executed statement does not return rows", ERR_NOT_CONNECTED: "not connected to database", ERR_NOT_IMPLEMENTED: "not implemented", + ERR_NULLS_NOT_ALLOWED: ( + 'value for column "{column_name}" may not be null on row {row_num}' + ), ERR_NUMBER_STRING_OF_ZERO_LENGTH: "invalid number: zero length string", ERR_NUMBER_STRING_TOO_LONG: "invalid number: string too long", ERR_NUMBER_WITH_EMPTY_EXPONENT: "invalid number: empty exponent", @@ -909,6 +920,11 @@ def _raise_not_supported(feature: str) -> None: ERR_UNKNOWN_TRANSACTION_SYNC_VERSION: ( "internal error: unknown transaction sync version {version}" ), + ERR_UNSUPPORTED_ARROW_TYPE: 'unsupported Arrow type "{arrow_type}"', + ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE: ( + 'Arrow type "{arrow_type}" cannot be converted to database type ' + '"{db_type}"' + ), ERR_UNSUPPORTED_INBAND_NOTIFICATION: ( "unsupported in-band notification with error number {err_num}" ), @@ -924,6 +940,10 @@ def _raise_not_supported(feature: str) -> None: "password verifier type 0x{verifier_type:x} is not supported by " "python-oracledb in thin mode" ), + ERR_VALUE_TOO_LARGE: ( + "value of size {actual_size} exeeds maximum allowed size of " + '{max_size} for column "{column_name}" of row {row_num}' + ), ERR_VECTOR_FORMAT_NOT_SUPPORTED: ( "VECTOR type {vector_format} is not supported" ), @@ -934,6 +954,10 @@ def _raise_not_supported(feature: str) -> None: ERR_WRONG_ARRAY_DEFINITION: ( "expecting a list of two elements [type, numelems]" ), + ERR_WRONG_DIRECT_PATH_DATA_TYPE: ( + "expecting a list or an object implementing the Arrow PyCapsule " + "interface __arrow_c_stream__()" + ), ERR_WRONG_EXECUTE_PARAMETERS_TYPE: ( "expecting a dictionary, list or tuple, or keyword args" ), diff --git a/src/oracledb/impl/arrow/schema.pyx b/src/oracledb/impl/arrow/schema.pyx index ea4dae0d..68e4caa7 100644 --- a/src/oracledb/impl/arrow/schema.pyx +++ b/src/oracledb/impl/arrow/schema.pyx @@ -112,6 +112,12 @@ cdef class ArrowSchemaImpl: schema_impl.populate_from_schema(arrow_schema) return schema_impl + cdef str get_type_name(self): + """ + Returns a string representation of the Arrow type. + """ + return ArrowTypeString(self.arrow_type).decode() + cdef int populate_from_schema(self, ArrowSchema* schema) except -1: """ Populate the schema from another schema. diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index 6f55da86..5773151f 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -150,6 +150,7 @@ cdef extern from "nanoarrow.c": ArrowErrorCode ArrowSchemaViewInit(ArrowSchemaView* schema_view, const ArrowSchema* schema, ArrowError* error) + const char* ArrowTypeString(ArrowType type) cdef int _check_nanoarrow(int code) except -1: """ diff --git a/src/oracledb/impl/base/batch_load_manager.pyx b/src/oracledb/impl/base/batch_load_manager.pyx index 3ed57ebb..39033730 100644 --- a/src/oracledb/impl/base/batch_load_manager.pyx +++ b/src/oracledb/impl/base/batch_load_manager.pyx @@ -38,6 +38,54 @@ cdef class BatchLoadManager: cdef uint64_t rows_remaining = total_rows - self.offset self.num_rows = min(rows_remaining, self.batch_size) + @staticmethod + cdef BatchLoadManager _create(object parameters, uint32_t batch_size, + int error_num): + """ + Creates a batch manager from the parameters and batch size. + """ + cdef DataFrameImpl df_impl + + # batch size must be a positive integer + if batch_size == 0: + raise TypeError("batch_size must be a positive integer") + + # if parameters are an int, the value refers to the number of times to + # execute the statement + if isinstance(parameters, int): + return PrePopulatedBatchLoadManager.create(parameters) + + # if parameters are a list, the value refers to the actual data that is + # to be loaded + elif isinstance(parameters, list): + return FullDataBatchLoadManager.create(parameters) + + # if parameters are an Oracle dataframe we can use it directly + elif isinstance(parameters, PY_TYPE_DATAFRAME): + return DataFrameBatchLoadManager.create(parameters._impl) + + # if parameters implement the Arrow PyCapsule stream interface, convert + # it to an Oracle dataframe for further processing + elif hasattr(parameters, "__arrow_c_stream__"): + df_impl = DataFrameImpl.from_arrow_stream(parameters) + return DataFrameBatchLoadManager.create(df_impl) + + # the parameters are of an unknown type + errors._raise_err(error_num) + + cdef list _get_all_rows(self): + """ + Returns the set of rows associated with the batch load manager, if + applicable. + """ + pass + + cdef list _get_arrow_arrays(self): + """ + Returns the Arrow arrays containing the data for the current batch. + """ + pass + cdef int _next_batch(self) except -1: """ Goes to the next batch in the set of data, if applicable. @@ -51,6 +99,36 @@ cdef class BatchLoadManager: """ pass + cdef int _verify_metadata(self, list column_metadata) except -1: + """ + Called after the manager has been populated and helps verify the column + metadata is consistent with the data being loaded. + """ + pass + + @staticmethod + cdef BatchLoadManager create_for_direct_path_load( + object parameters, + list column_metadata, + uint32_t batch_size, + ): + """ + Creates a batch load manager object for calling conn.direct_path_load() + with the given parameters. This allows splitting large source arrays + into multiple chunks and also supports data frames with multiple + chunks. + """ + cdef BatchLoadManager manager + manager = BatchLoadManager._create( + parameters, + batch_size, + errors.ERR_WRONG_DIRECT_PATH_DATA_TYPE, + ) + manager.batch_size = batch_size + manager._verify_metadata(column_metadata) + manager._next_batch() + return manager + @staticmethod cdef BatchLoadManager create_for_executemany( object cursor, @@ -64,40 +142,14 @@ cdef class BatchLoadManager: into multiple chunks and also supports data frames with multiple chunks. """ - cdef: - BatchLoadManager manager - DataFrameImpl df_impl - - # batch size must be a positive integer - if batch_size == 0: - raise TypeError("batch_size must be a positive integer") + cdef BatchLoadManager manager # create and populate manager object - # if parameters are an instance, the value refers to the number of - # times to execute the statement - if isinstance(parameters, int): - manager = PrePopulatedBatchLoadManager.create(parameters) - - # if parameters are a list, the value refers to the actual data that is - # to be loaded - elif isinstance(parameters, list): - manager = FullDataBatchLoadManager.create(parameters) - - # if parameters are an Oracle dataframe we can use it directly - elif isinstance(parameters, PY_TYPE_DATAFRAME): - manager = DataFrameBatchLoadManager.create(parameters._impl) - - # if parameters implement the Arrow PyCapsule stream interface, convert - # it to an Oracle dataframe for further processing - elif hasattr(parameters, "__arrow_c_stream__"): - df_impl = DataFrameImpl.from_arrow_stream(parameters) - manager = DataFrameBatchLoadManager.create(df_impl) - - # the parameters are of an unknown type - else: - errors._raise_err(errors.ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE) - - # setup cursor + manager = BatchLoadManager._create( + parameters, + batch_size, + errors.ERR_WRONG_EXECUTEMANY_PARAMETERS_TYPE, + ) manager.cursor_impl = cursor_impl manager.cursor = cursor manager.conn = cursor.connection @@ -140,6 +192,13 @@ cdef class DataFrameBatchLoadManager(BatchLoadManager): array_impl.get_length(&num_rows) self.num_rows_in_chunk = num_rows + cdef list _get_arrow_arrays(self): + """ + Returns the Arrow arrays containing the data for the current batch. + """ + cdef list arrays = self.df_impl.arrays + return arrays[self.chunk_index:self.chunk_index + self.num_cols] + cdef int _next_batch(self) except -1: """ Goes to the next batch of data. @@ -153,7 +212,7 @@ cdef class DataFrameBatchLoadManager(BatchLoadManager): self._calculate_num_rows_in_batch(self.num_rows_in_chunk) if self.num_rows == 0: self._next_chunk() - if self.num_rows > 0: + if self.num_rows > 0 and self.cursor_impl is not None: for i, bind_var in enumerate(self.cursor_impl.bind_vars): array_impl = self.df_impl.arrays[self.chunk_index + i] bind_var.var_impl._arrow_array = array_impl @@ -192,6 +251,25 @@ cdef class DataFrameBatchLoadManager(BatchLoadManager): bind_var.var_impl = var_impl self.cursor_impl.bind_vars.append(bind_var) + cdef int _verify_metadata(self, list column_metadata) except -1: + """ + Called after the manager has been populated and helps verify the column + metadata is consistent with the data being loaded. + """ + cdef: + ArrowSchemaImpl schema_impl + OracleMetadata metadata + if len(column_metadata) != len(self.df_impl.schema_impls): + errors._raise_err( + errors.ERR_WRONG_NUMBER_OF_POSITIONAL_BINDS, + expected_num=len(column_metadata), + actual_num=len(self.df_impl.schema_impls) + ) + if self.num_chunks > 0: + for metadata, schema_impl in \ + zip(column_metadata, self.df_impl.schema_impls): + metadata._set_arrow_schema(schema_impl) + @staticmethod cdef BatchLoadManager create(DataFrameImpl df_impl): """ @@ -202,7 +280,8 @@ cdef class DataFrameBatchLoadManager(BatchLoadManager): m.df_impl = df_impl m.num_cols = len(df_impl.schema_impls) m.num_chunks = len(df_impl.arrays) // m.num_cols - m._calculate_num_rows_in_chunk() + if m.num_chunks > 0: + m._calculate_num_rows_in_chunk() return m @@ -213,6 +292,13 @@ cdef class FullDataBatchLoadManager(BatchLoadManager): uint64_t total_num_rows object type_handler + cdef list _get_all_rows(self): + """ + Returns the set of rows associated with the batch load manager, if + applicable. + """ + return self.all_rows + cdef int _next_batch(self) except -1: """ Goes to the next batch of data. @@ -222,14 +308,15 @@ cdef class FullDataBatchLoadManager(BatchLoadManager): object row ssize_t i self._calculate_num_rows_in_batch(self.total_num_rows) - self.cursor_impl._reset_bind_vars(self.offset, self.num_rows) - for i in range(self.num_rows): - if i == self.num_rows - 1: - defer_type_assignment = False - row = self.all_rows[self.offset + i] - self.cursor_impl._bind_values(self.cursor, self.type_handler, - row, self.num_rows, i, - defer_type_assignment) + if self.cursor_impl is not None: + self.cursor_impl._reset_bind_vars(self.offset, self.num_rows) + for i in range(self.num_rows): + if i == self.num_rows - 1: + defer_type_assignment = False + row = self.all_rows[self.offset + i] + self.cursor_impl._bind_values(self.cursor, self.type_handler, + row, self.num_rows, i, + defer_type_assignment) cdef int _setup_cursor(self) except -1: """ @@ -238,6 +325,23 @@ cdef class FullDataBatchLoadManager(BatchLoadManager): """ self.type_handler = self.cursor_impl._get_input_type_handler() + cdef int _verify_metadata(self, list column_metadata) except -1: + """ + Called after the manager has been populated and helps verify the column + metadata is consistent with the data being loaded. + """ + cdef: + ssize_t num_columns + object row + num_columns = len(column_metadata) + for row in self.all_rows: + if len(row) != num_columns: + errors._raise_err( + errors.ERR_WRONG_NUMBER_OF_POSITIONAL_BINDS, + expected_num=num_columns, + actual_num=len(row) + ) + @staticmethod cdef BatchLoadManager create(list all_rows): """ diff --git a/src/oracledb/impl/base/buffer.pyx b/src/oracledb/impl/base/buffer.pyx index 7f3f7cdb..ac2bfd2c 100644 --- a/src/oracledb/impl/base/buffer.pyx +++ b/src/oracledb/impl/base/buffer.pyx @@ -567,6 +567,16 @@ cdef class Buffer: encode_number(buf, &buflen, num_bytes) self._write_raw_bytes_and_length(buf, buflen) + cdef int write_oson(self, value, ssize_t max_fname_size, + bint write_length=True) except -1: + """ + Encodes the given value to OSON and then writes that to the buffer. + it. + """ + cdef OsonEncoder encoder = OsonEncoder.__new__(OsonEncoder) + encoder.encode(value, max_fname_size) + self._write_raw_bytes_and_length(encoder._data, encoder._pos) + cdef int write_raw(self, const char_type *data, ssize_t length) except -1: """ Writes raw bytes of the specified length to the buffer. @@ -702,6 +712,14 @@ cdef class Buffer: self.write_uint8(8) self.write_uint64be(value) + cdef int write_vector(self, value) except -1: + """ + Encodes the given value to VECTOR and then writes that to the buffer. + """ + cdef VectorEncoder encoder = VectorEncoder.__new__(VectorEncoder) + encoder.encode(value) + self._write_raw_bytes_and_length(encoder._data, encoder._pos) + cdef class GrowableBuffer(Buffer): diff --git a/src/oracledb/impl/base/connection.pyx b/src/oracledb/impl/base/connection.pyx index a9cdb07a..0c43100b 100644 --- a/src/oracledb/impl/base/connection.pyx +++ b/src/oracledb/impl/base/connection.pyx @@ -245,6 +245,13 @@ cdef class BaseConnImpl: def create_temp_lob_impl(self, DbType dbtype): errors._raise_not_supported("creating a temporary LOB") + def direct_path_load(self, str schema_name, str table_name, + list column_names, object data, + uint32_t batch_size): + errors._raise_not_supported( + "loading data via the Direct Path Load interface" + ) + def get_call_timeout(self): errors._raise_not_supported("getting the call timeout") diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index cbaca4ac..427a8377 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -39,6 +39,7 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, cdef: int64_t int_value, days, seconds, useconds SparseVectorImpl sparse_impl + uint32_t db_type_num ArrowType arrow_type uint64_t uint_value OracleRawBytes* rb @@ -48,6 +49,7 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, char buf[21] arrow_type = metadata._schema_impl.arrow_type + db_type_num = metadata.dbtype.num if arrow_type in ( NANOARROW_TYPE_INT8, NANOARROW_TYPE_INT16, @@ -74,10 +76,21 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, return temp_bytes elif arrow_type == NANOARROW_TYPE_DOUBLE: array_impl.get_double(array_index, &data.is_null, - &data.buffer.as_double) + &data.buffer.as_double) + if db_type_num == DB_TYPE_NUM_NUMBER: + temp_bytes = str(data.buffer.as_double).encode() + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif db_type_num == DB_TYPE_NUM_BINARY_FLOAT: + data.buffer.as_float = data.buffer.as_double elif arrow_type == NANOARROW_TYPE_FLOAT: - array_impl.get_float(array_index, &data.is_null, - &data.buffer.as_float) + array_impl.get_float(array_index, &data.is_null, &data.buffer.as_float) + if db_type_num == DB_TYPE_NUM_NUMBER: + temp_bytes = str(data.buffer.as_float).encode() + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes + elif db_type_num == DB_TYPE_NUM_BINARY_DOUBLE: + data.buffer.as_double = data.buffer.as_float elif arrow_type == NANOARROW_TYPE_BOOL: array_impl.get_bool(array_index, &data.is_null, &data.buffer.as_bool) elif arrow_type in ( @@ -89,7 +102,13 @@ cdef object convert_arrow_to_oracle_data(OracleMetadata metadata, ): rb = &data.buffer.as_raw_bytes array_impl.get_bytes(array_index, &data.is_null, &rb.ptr, - &rb.num_bytes) + &rb.num_bytes) + if rb.num_bytes == 0: + data.is_null = True + elif db_type_num == DB_TYPE_NUM_LONG_NVARCHAR: + temp_bytes = rb.ptr[:rb.num_bytes].decode().encode(ENCODING_UTF16) + convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + return temp_bytes elif arrow_type in (NANOARROW_TYPE_TIMESTAMP, NANOARROW_TYPE_DATE64): array_impl.get_int(arrow_type, array_index, &data.is_null, &int_value) if not data.is_null: @@ -577,6 +596,8 @@ cdef object convert_python_to_oracle_data(OracleMetadata metadata, else: temp_bytes = ( value).encode(ENCODING_UTF16) convert_bytes_to_oracle_data(&data.buffer, temp_bytes) + if data.buffer.as_raw_bytes.num_bytes == 0: + data.is_null = True return temp_bytes elif ora_type_num in (ORA_TYPE_NUM_RAW, ORA_TYPE_NUM_LONG_RAW): convert_bytes_to_oracle_data(&data.buffer, value) diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 43981f04..63724e8d 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -131,6 +131,71 @@ cdef class OracleMetadata: else: self._py_type_num = PY_TYPE_NUM_FLOAT + cdef int _set_arrow_schema(self, ArrowSchemaImpl schema_impl) except -1: + """ + Sets an Arrow schema, which checks to see that the Arrow type is + compatible with the database type. + """ + cdef: + ArrowType arrow_type = schema_impl.arrow_type + uint32_t db_type_num = self.dbtype.num + bint ok = False + + if arrow_type in (NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_LARGE_BINARY): + if db_type_num in (DB_TYPE_NUM_RAW, DB_TYPE_NUM_LONG_RAW): + ok = True + elif arrow_type == NANOARROW_TYPE_BOOL: + if db_type_num in (DB_TYPE_NUM_BOOLEAN): + ok = True + elif arrow_type in (NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64): + if db_type_num == DB_TYPE_NUM_NUMBER: + ok = True + elif arrow_type in (NANOARROW_TYPE_DATE32, + NANOARROW_TYPE_DATE64, + NANOARROW_TYPE_TIMESTAMP): + if db_type_num in (DB_TYPE_NUM_DATE, + DB_TYPE_NUM_TIMESTAMP, + DB_TYPE_NUM_TIMESTAMP_LTZ, + DB_TYPE_NUM_TIMESTAMP_TZ): + ok = True + elif arrow_type == NANOARROW_TYPE_FLOAT: + if db_type_num in (DB_TYPE_NUM_BINARY_DOUBLE, + DB_TYPE_NUM_BINARY_FLOAT, + DB_TYPE_NUM_NUMBER): + ok = True + elif arrow_type == NANOARROW_TYPE_DOUBLE: + if db_type_num in (DB_TYPE_NUM_BINARY_DOUBLE, + DB_TYPE_NUM_BINARY_FLOAT, + DB_TYPE_NUM_NUMBER): + ok = True + elif arrow_type in (NANOARROW_TYPE_STRING, + NANOARROW_TYPE_LARGE_STRING): + if db_type_num in (DB_TYPE_NUM_CHAR, + DB_TYPE_NUM_LONG_VARCHAR, + DB_TYPE_NUM_VARCHAR, + DB_TYPE_NUM_NCHAR, + DB_TYPE_NUM_LONG_NVARCHAR, + DB_TYPE_NUM_NVARCHAR): + ok = True + + if not ok: + errors._raise_err(errors.ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE, + arrow_type=schema_impl.get_type_name(), + db_type=self.dbtype.name) + + self._finalize_init() + self._schema_impl = schema_impl + cdef OracleMetadata copy(self): """ Create a copy of the metadata and return it. @@ -198,7 +263,8 @@ cdef class OracleMetadata: NANOARROW_TYPE_FIXED_SIZE_LIST): metadata.dbtype = DB_TYPE_VECTOR else: - errors._raise_err(errors.ERR_UNEXPECTED_DATA, data=arrow_type) + errors._raise_err(errors.ERR_UNSUPPORTED_ARROW_TYPE, + arrow_type=schema_impl.get_type_name()) metadata._schema_impl = schema_impl metadata.name = schema_impl.name metadata.precision = schema_impl.precision diff --git a/src/oracledb/impl/base/types.pyx b/src/oracledb/impl/base/types.pyx index c00ce582..33619d57 100644 --- a/src/oracledb/impl/base/types.pyx +++ b/src/oracledb/impl/base/types.pyx @@ -58,7 +58,7 @@ cdef class DbType: def __init__(self, num, name, ora_name, native_num=0, ora_type_num=0, default_py_type_num=0, default_size=0, csfrm=0, - buffer_size_factor=0): + buffer_size_factor=0, is_fast=True): cdef uint16_t ora_type_key = csfrm * 256 + ora_type_num self.num = num self.name = name @@ -69,12 +69,18 @@ cdef class DbType: self._default_py_type_num = default_py_type_num self._csfrm = csfrm self._buffer_size_factor = buffer_size_factor + self._is_fast = is_fast if num != 0: num -= DB_TYPE_NUM_MIN db_type_by_num[num] = self db_type_by_ora_name[ora_name] = self db_type_by_ora_type_num[ora_type_key] = self + # add a second key for LONG metadata which doesn't always contain the + # character set form value in metadata received by the database + if ora_type_num == ORA_TYPE_NUM_LONG and csfrm == CS_FORM_IMPLICIT: + db_type_by_ora_type_num[ora_type_num] = self + def __reduce__(self): return self.name @@ -118,7 +124,8 @@ DB_TYPE_BFILE = DbType( NATIVE_TYPE_NUM_LOB, ORA_TYPE_NUM_BFILE, PY_TYPE_NUM_ORACLE_LOB, - buffer_size_factor=4000 + buffer_size_factor=4000, + is_fast=False ) DB_TYPE_BINARY_DOUBLE = DbType( @@ -158,7 +165,8 @@ DB_TYPE_BLOB = DbType( NATIVE_TYPE_NUM_LOB, ORA_TYPE_NUM_BLOB, PY_TYPE_NUM_ORACLE_LOB, - buffer_size_factor=112 + buffer_size_factor=112, + is_fast=False ) DB_TYPE_BOOLEAN = DbType( @@ -191,7 +199,8 @@ DB_TYPE_CLOB = DbType( ORA_TYPE_NUM_CLOB, PY_TYPE_NUM_ORACLE_LOB, csfrm=CS_FORM_IMPLICIT, - buffer_size_factor=112 + buffer_size_factor=112, + is_fast=False ) DB_TYPE_CURSOR = DbType( @@ -201,7 +210,8 @@ DB_TYPE_CURSOR = DbType( NATIVE_TYPE_NUM_STMT, ORA_TYPE_NUM_CURSOR, PY_TYPE_NUM_ORACLE_CURSOR, - buffer_size_factor=4 + buffer_size_factor=4, + is_fast=False ) DB_TYPE_DATE = DbType( @@ -240,7 +250,8 @@ DB_TYPE_JSON = DbType( "JSON", NATIVE_TYPE_NUM_JSON, ORA_TYPE_NUM_JSON, - PY_TYPE_NUM_OBJECT + PY_TYPE_NUM_OBJECT, + is_fast=False ) DB_TYPE_LONG = DbType( @@ -251,7 +262,8 @@ DB_TYPE_LONG = DbType( ORA_TYPE_NUM_LONG, PY_TYPE_NUM_STR, csfrm=CS_FORM_IMPLICIT, - buffer_size_factor=2147483647 + buffer_size_factor=2147483647, + is_fast=False ) DB_TYPE_LONG_NVARCHAR = DbType( @@ -262,7 +274,8 @@ DB_TYPE_LONG_NVARCHAR = DbType( ORA_TYPE_NUM_LONG, PY_TYPE_NUM_STR, csfrm=CS_FORM_NCHAR, - buffer_size_factor=2147483647 + buffer_size_factor=2147483647, + is_fast=False ) DB_TYPE_LONG_RAW = DbType( @@ -272,7 +285,8 @@ DB_TYPE_LONG_RAW = DbType( NATIVE_TYPE_NUM_BYTES, ORA_TYPE_NUM_LONG_RAW, PY_TYPE_NUM_BYTES, - buffer_size_factor=2147483647 + buffer_size_factor=2147483647, + is_fast=False ) DB_TYPE_NCHAR = DbType( @@ -295,7 +309,8 @@ DB_TYPE_NCLOB = DbType( ORA_TYPE_NUM_CLOB, PY_TYPE_NUM_ORACLE_LOB, csfrm=CS_FORM_NCHAR, - buffer_size_factor=112 + buffer_size_factor=112, + is_fast=False ) DB_TYPE_NUMBER = DbType( @@ -347,7 +362,8 @@ DB_TYPE_ROWID = DbType( NATIVE_TYPE_NUM_ROWID, ORA_TYPE_NUM_ROWID, PY_TYPE_NUM_STR, - buffer_size_factor=ORA_TYPE_SIZE_ROWID + buffer_size_factor=ORA_TYPE_SIZE_ROWID, + is_fast=False ) DB_TYPE_TIMESTAMP = DbType( @@ -383,7 +399,8 @@ DB_TYPE_TIMESTAMP_TZ = DbType( DB_TYPE_UNKNOWN = DbType( DB_TYPE_NUM_UNKNOWN, "DB_TYPE_UNKNOWN", - "UNKNOWN" + "UNKNOWN", + is_fast=False ) DB_TYPE_UROWID = DbType( @@ -392,7 +409,8 @@ DB_TYPE_UROWID = DbType( "UROWID", NATIVE_TYPE_NUM_BYTES, ORA_TYPE_NUM_UROWID, - PY_TYPE_NUM_STR + PY_TYPE_NUM_STR, + is_fast=False ) DB_TYPE_VARCHAR = DbType( @@ -413,7 +431,8 @@ DB_TYPE_VECTOR = DbType( "VECTOR", NATIVE_TYPE_NUM_VECTOR, ORA_TYPE_NUM_VECTOR, - PY_TYPE_NUM_ARRAY + PY_TYPE_NUM_ARRAY, + is_fast=False ) DB_TYPE_XMLTYPE = DbType( @@ -424,7 +443,8 @@ DB_TYPE_XMLTYPE = DbType( ORA_TYPE_NUM_OBJECT, PY_TYPE_NUM_STR, csfrm=CS_FORM_IMPLICIT, - buffer_size_factor=2147483647 + buffer_size_factor=2147483647, + is_fast=False ) # additional aliases diff --git a/src/oracledb/impl/thin/connection.pyx b/src/oracledb/impl/thin/connection.pyx index c4c42fc1..8a823b0b 100644 --- a/src/oracledb/impl/thin/connection.pyx +++ b/src/oracledb/impl/thin/connection.pyx @@ -546,6 +546,45 @@ cdef class ThinConnImpl(BaseThinConnImpl): lob_impl.create_temp() return lob_impl + def direct_path_load(self, str schema_name, str table_name, + list column_names, object data, + uint32_t batch_size): + cdef: + Protocol protocol = self._protocol + DirectPathPrepareMessage prepare_message + DirectPathLoadStreamMessage load_message + DirectPathOpMessage op_message + BatchLoadManager manager + + # prepare message + prepare_message = self._create_message(DirectPathPrepareMessage) + prepare_message.schema_name = schema_name + prepare_message.table_name = table_name + prepare_message.column_names = column_names + protocol._process_single_message(prepare_message) + + # setup op message + op_message = self._create_message(DirectPathOpMessage) + op_message.prepare(prepare_message.cursor_id, TNS_DP_OP_ABORT) + + # load message + load_message = self._create_message(DirectPathLoadStreamMessage) + try: + manager = BatchLoadManager.create_for_direct_path_load( + data, prepare_message.column_metadata, batch_size + ) + while manager.num_rows > 0: + load_message.prepare( + prepare_message.cursor_id, + manager, + prepare_message.column_metadata + ) + protocol._process_single_message(load_message) + manager.next_batch() + op_message.op_code = TNS_DP_OP_FINISH + finally: + protocol._process_single_message(op_message) + def get_type(self, object conn, str name): cdef ThinDbObjectTypeCache cache = \ get_dbobject_type_cache(self._dbobject_type_cache_num) @@ -1124,6 +1163,45 @@ cdef class AsyncThinConnImpl(BaseThinConnImpl): await lob_impl.create_temp() return lob_impl + async def direct_path_load(self, str schema_name, str table_name, + list column_names, object data, + uint32_t batch_size): + cdef: + BaseAsyncProtocol protocol = self._protocol + DirectPathPrepareMessage prepare_message + DirectPathLoadStreamMessage load_message + DirectPathOpMessage op_message + BatchLoadManager manager + + # prepare message + prepare_message = self._create_message(DirectPathPrepareMessage) + prepare_message.schema_name = schema_name + prepare_message.table_name = table_name + prepare_message.column_names = column_names + await protocol._process_single_message(prepare_message) + + # setup op message + op_message = self._create_message(DirectPathOpMessage) + op_message.prepare(prepare_message.cursor_id, TNS_DP_OP_ABORT) + + # load message + load_message = self._create_message(DirectPathLoadStreamMessage) + try: + manager = BatchLoadManager.create_for_direct_path_load( + data, prepare_message.column_metadata, batch_size + ) + while manager.num_rows > 0: + load_message.prepare( + prepare_message.cursor_id, + manager, + prepare_message.column_metadata + ) + await protocol._process_single_message(load_message) + manager.next_batch() + op_message.op_code = TNS_DP_OP_FINISH + finally: + await protocol._process_single_message(op_message) + async def get_type(self, object conn, str name): cdef AsyncThinDbObjectTypeCache cache = \ get_dbobject_type_cache(self._dbobject_type_cache_num) diff --git a/src/oracledb/impl/thin/constants.pxi b/src/oracledb/impl/thin/constants.pxi index 9d72320f..79f80b2d 100644 --- a/src/oracledb/impl/thin/constants.pxi +++ b/src/oracledb/impl/thin/constants.pxi @@ -356,6 +356,9 @@ cdef enum: TNS_FUNC_AUTH_PHASE_TWO = 115 TNS_FUNC_CLOSE_CURSORS = 105 TNS_FUNC_COMMIT = 14 + TNS_FUNC_DIRECT_PATH_LOAD_STREAM = 129 + TNS_FUNC_DIRECT_PATH_OP = 130 + TNS_FUNC_DIRECT_PATH_PREPARE = 128 TNS_FUNC_EXECUTE = 94 TNS_FUNC_FETCH = 5 TNS_FUNC_LOB_OP = 96 @@ -577,6 +580,61 @@ cdef enum: TNS_SESSION_STATE_REQUEST_END = 0x08 TNS_SESSION_STATE_EXPLICIT_BOUNDARY = 0x40 +# direct path constants +cdef enum: + TNS_DP_INTERFACE_VERSION = 400 + TNS_DP_STREAM_VERSION = 400 + +# direct path op codes +cdef enum: + TNS_DPP_OP_CODE_LOAD = 1 + TNS_DPP_OP_CODE_UNLOAD = 2 + TNS_DPP_OP_CODE_CONVERT = 3 + +# direct path prepare input indexes +cdef enum: + TNS_DPP_IN_INDEX_INTERFACE_VERSION = 0 + TNS_DPP_IN_INDEX_STREAM_VERSION = 1 + TNS_DPP_IN_INDEX_LOCK_WAIT = 14 + TNS_DPP_IN_MAX_PARAMS = 36 + +# direct path prepare keyword indexes +cdef enum: + TNS_DPP_KW_INDEX_OBJECT_NAME = 1 + TNS_DPP_KW_INDEX_SUBOBJECT_NAME = 2 + TNS_DPP_KW_INDEX_SCHEMA_NAME = 3 + TNS_DPP_KW_INDEX_COLUMN_NAME = 4 + TNS_DPP_KW_INDEX_VARRAY_TABLE = 15 + TNS_DPP_KW_INDEX_NFOBJ_OID_POS = 11 + TNS_DPP_KW_INDEX_NFOBJ_SID_POS = 16 + TNS_DPP_KW_INDEX_NFOBJ_VARRAY_INDEX = 17 + +# direct path prepare output indexes +cdef enum: + TNS_DPP_OUT_INDEX_CURSOR = 3 + TNS_DPP_OUT_MAX_PARAMS = 14 + +# direct path row header flags +cdef enum: + TNS_DPLS_ROW_HEADER_FAST_PIECE = 0x10 + TNS_DPLS_ROW_HEADER_FAST_ROW = 0x20 + TNS_DPLS_ROW_HEADER_FIRST = 0x08 + TNS_DPLS_ROW_HEADER_LAST = 0x04 + TNS_DPLS_ROW_HEADER_SPLIT_WITH_PREV = 0x02 + TNS_DPLS_ROW_HEADER_SPLIT_WITH_NEXT = 0x01 + +# other direct path load stream constants +cdef enum: + TNS_DPLS_MAX_SHORT_LENGTH = 0xfa + TNS_DPLS_MAX_PIECE_SIZE = 0xfff0 + TNS_DPLS_FAST_HEADER_SIZE = 4 + TNS_DPLS_SLOW_HEADER_SIZE = 2 + +# direct path operation codes +cdef enum: + TNS_DP_OP_ABORT = 1 + TNS_DP_OP_FINISH = 2 + # other constants cdef enum: TNS_ESCAPE_CHAR = 253 diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index 80dce97e..ec83e782 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -330,7 +330,14 @@ cdef class Message: buf.skip_ub2() # version buf.skip_ub2() # character set id buf.read_ub1(&csfrm) # character set form - metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) + # in some cases the metadata returned contains an invalid character + # set form for data types that do not actually require it; if the + # lookup fails, try again with a zero character set form + try: + metadata.dbtype = \ + DbType._from_ora_type_and_csfrm(ora_type_num, csfrm) + except: + metadata.dbtype = DbType._from_ora_type_and_csfrm(ora_type_num, 0) buf.read_ub4(&metadata.max_size) if ora_type_num == ORA_TYPE_NUM_RAW: metadata.max_size = metadata.buffer_size diff --git a/src/oracledb/impl/thin/messages/direct_path_load_stream.pyx b/src/oracledb/impl/thin/messages/direct_path_load_stream.pyx new file mode 100644 index 00000000..1e6cc25e --- /dev/null +++ b/src/oracledb/impl/thin/messages/direct_path_load_stream.pyx @@ -0,0 +1,363 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# direct_path_load_stream.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for sending direct path data to the +# database (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class DirectPathPiece: + cdef: + bint is_fast + bint is_first + bint is_last + bint is_split_with_prev + bint is_split_with_next + uint8_t flags + uint8_t num_segments + uint32_t length + bytes data + + cdef int finalize(self, PieceBuffer buf) except -1: + """ + Finalizes the piece in preparation for writing to the message. + """ + self.length = buf._pos + self.data = buf._data[:buf._pos] + buf._pos = 0 + if self.is_first: + self.flags |= TNS_DPLS_ROW_HEADER_FIRST + elif self.is_split_with_prev: + self.flags |= TNS_DPLS_ROW_HEADER_SPLIT_WITH_PREV + if self.is_last: + self.flags |= TNS_DPLS_ROW_HEADER_LAST + elif self.is_split_with_next: + self.flags |= TNS_DPLS_ROW_HEADER_SPLIT_WITH_NEXT + if self.is_fast_row(): + self.flags |= TNS_DPLS_ROW_HEADER_FAST_ROW + self.flags |= TNS_DPLS_ROW_HEADER_FAST_PIECE + + cdef uint32_t header_length(self): + """ + Returns the length of the piece header. + """ + cdef uint32_t length = 2 + if self.is_fast_row(): + length += 2 + return length + + cdef bint is_fast_row(self): + """ + Returns true if the piece contains all of the data for a row and all of + the segments are fast segments. + """ + return self.is_first and self.is_last and self.is_fast + + cdef int write_to_message(self, WriteBuffer buf) except -1: + """ + Writes the piece to the message. + """ + buf.write_uint8(self.flags) + if self.is_fast_row(): + buf.write_uint16be(self.length + self.header_length()) + buf.write_uint8(self.num_segments) + buf.write_raw(self.data, self.length) + + +@cython.final +cdef class PieceBuffer(Buffer): + cdef: + DirectPathPiece current_piece + uint32_t total_piece_length + list pieces + + cdef int _finalize_piece(self) except -1: + """ + Finalizes the piece by adding the data in the buffer and calculating + the piece length, then resetting the buffer. + """ + self.current_piece.finalize(self) + self.pieces.append(self.current_piece) + self.total_piece_length += \ + self.current_piece.length + self.current_piece.header_length() + + cdef int _write_more_data(self, ssize_t num_bytes_available, + ssize_t num_bytes_wanted) except -1: + """ + Called when the amount of buffer available is less than the amount of + data requested. This finalizes the current piece and creates a new one + to write to. + """ + self._finalize_piece() + self.current_piece = DirectPathPiece.__new__(DirectPathPiece) + + cdef int _write_raw_bytes_and_length(self, const char_type *ptr, + ssize_t num_bytes) except -1: + """ + Helper function that writes the length in the format required before + writing the bytes. It also splits the pieces as needed. + """ + cdef: + ssize_t bytes_left = self._max_size - self._pos + ssize_t bytes_to_write + if num_bytes <= TNS_DPLS_MAX_SHORT_LENGTH: + if num_bytes + 1 > bytes_left: + self._finalize_piece() + self.current_piece = DirectPathPiece.__new__(DirectPathPiece) + self.write_uint8( num_bytes) + self.write_raw(ptr, num_bytes) + self.current_piece.num_segments += 1 + else: + while num_bytes + 3 > bytes_left: + bytes_to_write = bytes_left - 3 + self.write_uint8(TNS_LONG_LENGTH_INDICATOR) + self.write_uint16be(bytes_to_write) + self.write_raw(ptr, bytes_to_write) + num_bytes -= bytes_to_write + bytes_left = self._max_size + ptr += bytes_to_write + self.current_piece.is_split_with_next = True + self.current_piece.num_segments += 1 + self._finalize_piece() + self.current_piece = DirectPathPiece.__new__(DirectPathPiece) + self.current_piece.is_split_with_prev = num_bytes > 0 + if num_bytes > 0: + self.current_piece.num_segments += 1 + self.write_uint8(TNS_LONG_LENGTH_INDICATOR) + self.write_uint16be( num_bytes) + self.write_raw(ptr, num_bytes) + + cdef int add_column_value( + self, + BaseConnImpl conn_impl, + OracleMetadata metadata, + OracleData *data, + object value, + uint64_t row_num + ): + """ + Adds column data to the piece (or pieces, if the column value cannot + fit inside the current piece). + """ + cdef uint8_t ora_type_num + + # check that the number of segments hasn't already reached the maximum + # allowable; if it has finalize the current piece and create a new one + if self.current_piece.num_segments == 255: + self._finalize_piece() + self.current_piece = DirectPathPiece.__new__(DirectPathPiece) + + # clear the is_fast flag if the current data type is not one of the + # fast types + if not metadata.dbtype._is_fast: + self.current_piece.is_fast = False + + # write data to the buffer; retain current buffer length in case buffer + # needs to be split across pieces + ora_type_num = metadata.dbtype._ora_type_num + if data.is_null: + if not metadata.nulls_allowed: + errors._raise_err(errors.ERR_NULLS_NOT_ALLOWED, + column_name=metadata.name, row_num=row_num) + self.write_uint8(TNS_NULL_LENGTH_INDICATOR) + self.current_piece.num_segments += 1 + elif ora_type_num in (ORA_TYPE_NUM_VARCHAR, + ORA_TYPE_NUM_CHAR, + ORA_TYPE_NUM_LONG, + ORA_TYPE_NUM_RAW, + ORA_TYPE_NUM_LONG_RAW): + if metadata.max_size > 0 \ + and data.buffer.as_raw_bytes.num_bytes > metadata.max_size: + errors._raise_err( + errors.ERR_VALUE_TOO_LARGE, + max_size=metadata.max_size, + actual_size=data.buffer.as_raw_bytes.num_bytes, + row_num=row_num, + column_name=metadata.name + ) + self._write_raw_bytes_and_length( + data.buffer.as_raw_bytes.ptr, + data.buffer.as_raw_bytes.num_bytes + ) + elif ora_type_num == ORA_TYPE_NUM_NUMBER: + self.write_oracle_number(value) + elif ora_type_num == ORA_TYPE_NUM_BINARY_DOUBLE: + self.write_binary_double(data.buffer.as_double) + elif ora_type_num == ORA_TYPE_NUM_BINARY_FLOAT: + self.write_binary_float(data.buffer.as_float) + elif ora_type_num in (ORA_TYPE_NUM_DATE, + ORA_TYPE_NUM_TIMESTAMP, + ORA_TYPE_NUM_TIMESTAMP_TZ, + ORA_TYPE_NUM_TIMESTAMP_LTZ): + self.write_oracle_date(value, metadata.dbtype._buffer_size_factor) + elif ora_type_num == ORA_TYPE_NUM_INTERVAL_DS: + self.write_interval_ds(value) + elif ora_type_num == ORA_TYPE_NUM_INTERVAL_YM: + self.write_interval_ym(value) + elif ora_type_num == ORA_TYPE_NUM_BOOLEAN: + self.write_bool(data.buffer.as_bool) + elif ora_type_num == ORA_TYPE_NUM_JSON: + self.write_oson(value, conn_impl._oson_max_fname_size) + elif ora_type_num == ORA_TYPE_NUM_VECTOR: + self.write_vector(value) + else: + errors._raise_err(errors.ERR_DB_TYPE_NOT_SUPPORTED, + name=metadata.dbtype.name) + + cdef int finish_row(self) except -1: + """ + Called when the row is finished. The current piece is finalized. + """ + self.current_piece.is_last = True + self._finalize_piece() + self.current_piece = None + + cdef int initialize(self) except -1: + """ + Initializes the piece buffer to the maximum size allowed. A list of + pieces is maintained and populated as data is written to the buffer. + """ + self._initialize(TNS_DPLS_MAX_PIECE_SIZE) + self.pieces = [] + + cdef int start_row(self) except -1: + """ + Called when a row is being started. A new piece is created. + """ + self.current_piece = DirectPathPiece.__new__(DirectPathPiece) + self.current_piece.is_first = True + self.current_piece.is_fast = True + + +@cython.final +cdef class DirectPathLoadStreamMessage(Message): + cdef: + uint64_t current_row_num + uint32_t total_piece_length + uint16_t cursor_id + list row_pieces + + cdef int _calculate_pieces( + self, + BatchLoadManager manager, + list column_metadata + ) except -1: + """ + Calculates the list of pieces that will be sent to the server. Due to + the nature of the protocol, this must be calculated in advance. + """ + cdef: + object row = None, col = None + ArrowArrayImpl array_impl + uint64_t overall_row_num + OracleMetadata metadata + list all_rows, arrays + uint32_t row_num + PieceBuffer buf + OracleData data + ssize_t col_num + + # create buffer used for writing column data + buf = PieceBuffer.__new__(PieceBuffer) + buf.initialize() + + # acquire information from the manager + all_rows = manager._get_all_rows() + arrays = manager._get_arrow_arrays() + + # calculate pieces + for row_num in range(manager.num_rows): + overall_row_num = manager.offset + row_num + if all_rows is not None: + row = all_rows[overall_row_num] + self.current_row_num += 1 + buf.start_row() + for col_num, metadata in enumerate(column_metadata): + if all_rows is not None: + col = self.conn_impl._check_value(metadata, row[col_num], + NULL) + col = convert_python_to_oracle_data(metadata, &data, col) + else: + array_impl = arrays[col_num] + col = convert_arrow_to_oracle_data( + metadata, &data, array_impl, overall_row_num + ) + buf.add_column_value(self.conn_impl, metadata, &data, col, + self.current_row_num) + buf.finish_row() + + # retain pieces for writing to message + self.total_piece_length = buf.total_piece_length + self.row_pieces = buf.pieces + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_DIRECT_PATH_LOAD_STREAM + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters sent by the database. + """ + cdef uint16_t num_out_values, i + buf.read_ub2(&num_out_values) + for i in range(num_out_values): + buf.skip_ub4() + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the database. + """ + cdef DirectPathPiece piece + + # write header and initial data + self._write_function_code(buf) + buf.write_ub2(self.cursor_id) + buf.write_uint8(1) # pointer (buffer) + buf.write_ub4(self.total_piece_length) + buf.write_ub4(TNS_DP_STREAM_VERSION) + buf.write_uint8(0) # pointer (input values) + buf.write_ub4(0) # number of input values + buf.write_uint8(1) # pointer (output values) + buf.write_uint8(1) # pointer (output values length) + + # write all pieces + for piece in self.row_pieces: + piece.write_to_message(buf) + + cdef int prepare(self, + uint16_t cursor_id, + BatchLoadManager manager, + list column_metadata + ): + """ + Prepares the values for writing to the message. + """ + self.cursor_id = cursor_id + self._calculate_pieces(manager, column_metadata) diff --git a/src/oracledb/impl/thin/messages/direct_path_op.pyx b/src/oracledb/impl/thin/messages/direct_path_op.pyx new file mode 100644 index 00000000..9f7c7dfa --- /dev/null +++ b/src/oracledb/impl/thin/messages/direct_path_op.pyx @@ -0,0 +1,71 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# direct_path_op.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for sending direct path operations +# to the database (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class DirectPathOpMessage(Message): + cdef: + uint16_t cursor_id + uint32_t op_code + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_DIRECT_PATH_OP + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters sent by the database. + """ + cdef uint16_t num_out_values, i + buf.read_ub2(&num_out_values) + for i in range(num_out_values): + buf.skip_ub4() + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the database. + """ + self._write_function_code(buf) + buf.write_ub4(self.op_code) + buf.write_ub2(self.cursor_id) + buf.write_uint8(0) # pointer (input values) + buf.write_ub4(0) # number of input values + buf.write_uint8(1) # pointer (output values) + buf.write_uint8(1) # pointer (output values length) + + cdef int prepare(self, uint16_t cursor_id, uint32_t op_code) except -1: + """ + Prepares the values for writing to the message. + """ + self.cursor_id = cursor_id + self.op_code = op_code diff --git a/src/oracledb/impl/thin/messages/direct_path_prepare.pyx b/src/oracledb/impl/thin/messages/direct_path_prepare.pyx new file mode 100644 index 00000000..fe5502e4 --- /dev/null +++ b/src/oracledb/impl/thin/messages/direct_path_prepare.pyx @@ -0,0 +1,153 @@ +#------------------------------------------------------------------------------ +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +#------------------------------------------------------------------------------ + +#------------------------------------------------------------------------------ +# direct_path_prepare.pyx +# +# Cython file defining the messages that are sent to the database and the +# responses that are received by the client for preparing a direct path cursor +# for execution (embedded in thin_impl.pyx). +#------------------------------------------------------------------------------ + +@cython.final +cdef class DirectPathPrepareMessage(Message): + cdef: + str schema_name + str table_name + list column_names + list column_metadata + uint32_t in_values[TNS_DPP_IN_MAX_PARAMS] + uint16_t in_values_length + uint32_t *out_values + uint16_t out_values_length + uint16_t cursor_id + + def __dealloc__(self): + if self.out_values != NULL: + cpython.PyMem_Free(self.out_values) + + cdef int _initialize_hook(self) except -1: + """ + Perform initialization. + """ + self.function_code = TNS_FUNC_DIRECT_PATH_PREPARE + memset(self.in_values, 0, sizeof(self.in_values)) + self.in_values[TNS_DPP_KW_INDEX_NFOBJ_OID_POS] = 0xffff + self.in_values[TNS_DPP_KW_INDEX_NFOBJ_SID_POS] = 0xffff + self.in_values[TNS_DPP_KW_INDEX_NFOBJ_VARRAY_INDEX] = 0xffff + + cdef OracleMetadata _process_metadata(self, ReadBuffer buf): + """ + Process metadata returned by the database. CLOB and BLOB are always + treated as strings and bytes when using direct path. + """ + cdef OracleMetadata metadata + metadata = Message._process_metadata(self, buf) + if metadata.dbtype._ora_type_num == ORA_TYPE_NUM_CLOB: + metadata.dbtype = DbType._from_ora_type_and_csfrm( + ORA_TYPE_NUM_LONG, CS_FORM_NCHAR + ) + elif metadata.dbtype._ora_type_num == ORA_TYPE_NUM_BLOB: + metadata.dbtype = DbType._from_ora_type_and_csfrm( + ORA_TYPE_NUM_LONG_RAW, 0 + ) + return metadata + + cdef int _process_return_parameters(self, ReadBuffer buf) except -1: + """ + Process the return parameters sent by the database. + """ + cdef: + uint32_t i, num_columns + uint16_t num_params + ssize_t num_bytes + buf.read_ub4(&num_columns) + self.column_metadata = [ + self._process_metadata(buf) for i in range(num_columns) + ] + buf.read_ub2(&num_params) + if num_params > 0: + raise Exception("FIX ME!") + buf.read_ub2(&self.out_values_length) + num_bytes = sizeof(uint32_t) * self.out_values_length + self.out_values = cpython.PyMem_Malloc(num_bytes) + for i in range(self.out_values_length): + buf.read_ub4(&self.out_values[i]) + self.cursor_id = self.out_values[TNS_DPP_OUT_INDEX_CURSOR] + + cdef void _set_in_value(self, uint32_t key, uint32_t value): + """ + Sets the value in the input array and the maximum value set. + """ + self.in_values[key] = value + self.in_values_length = max(self.in_values_length, key + 1) + + cdef int _write_keyword_param(self, WriteBuffer buf, uint32_t index, + str value) except -1: + """ + Writes a keyword parameter to the buffer. + """ + cdef bytes value_bytes = value.encode() + buf.write_ub2(0) # text length + buf.write_ub2(len(value_bytes)) + buf.write_bytes_with_length(value_bytes) + buf.write_ub2(index) + + cdef int _write_message(self, WriteBuffer buf) except -1: + """ + Writes the message to the database. + """ + cdef: + uint32_t i, keyword_parameters_length + str name + + # initialize input array + self._set_in_value(TNS_DPP_IN_INDEX_INTERFACE_VERSION, + TNS_DP_INTERFACE_VERSION) + self._set_in_value(TNS_DPP_IN_INDEX_STREAM_VERSION, + TNS_DP_STREAM_VERSION) + self._set_in_value(TNS_DPP_IN_INDEX_LOCK_WAIT, 1) + + # write message + self._write_function_code(buf) + keyword_parameters_length = len(self.column_names) + 2 + buf.write_ub4(TNS_DPP_OP_CODE_LOAD) + buf.write_uint8(1) # keyword parameters (pointer) + buf.write_ub4(keyword_parameters_length) + buf.write_uint8(1) # input array (pointer) + buf.write_ub2(self.in_values_length) + buf.write_uint8(1) # metadata (pointer) + buf.write_uint8(1) # metadata length (pointer) + buf.write_uint8(1) # parameters (pointer) + buf.write_uint8(1) # parameters length (pointer) + buf.write_uint8(1) # output array (pointer) + buf.write_uint8(1) # output array length (pointer) + self._write_keyword_param(buf, TNS_DPP_KW_INDEX_SCHEMA_NAME, + self.schema_name) + self._write_keyword_param(buf, TNS_DPP_KW_INDEX_OBJECT_NAME, + self.table_name) + for name in self.column_names: + self._write_keyword_param(buf, TNS_DPP_KW_INDEX_COLUMN_NAME, name) + for i in range(self.in_values_length): + buf.write_ub4(self.in_values[i]) diff --git a/src/oracledb/impl/thin/packet.pyx b/src/oracledb/impl/thin/packet.pyx index a97b2437..b63172c3 100644 --- a/src/oracledb/impl/thin/packet.pyx +++ b/src/oracledb/impl/thin/packet.pyx @@ -941,8 +941,8 @@ cdef class WriteBuffer(Buffer): self.write_uint64be(0) # unused self.write_uint64be(0) # unused - cdef object write_oson(self, value, ssize_t max_fname_size, - bint write_length=True): + cdef int write_oson(self, value, ssize_t max_fname_size, + bint write_length=True) except -1: """ Encodes the given value to OSON and then writes that to the buffer. it. @@ -958,7 +958,7 @@ cdef class WriteBuffer(Buffer): self._seq_num = 1 self.write_uint8(self._seq_num) - cdef object write_vector(self, value): + cdef int write_vector(self, value) except -1: """ Encodes the given value to VECTOR and then writes that to the buffer. """ diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index c63f55e3..44ecb189 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -96,6 +96,7 @@ from .base_impl cimport ( BaseParser, BasePoolImpl, BaseVarImpl, + BatchLoadManager, PipelineOpImpl, PipelineOpResultImpl, PIPELINE_OP_TYPE_CALL_FUNC, @@ -198,7 +199,7 @@ from .base_impl import ( DB_TYPE_XMLTYPE, ) -from .arrow_impl cimport ArrowArrayImpl +from .arrow_impl cimport ArrowArrayImpl, DataFrameImpl ctypedef unsigned char char_type @@ -220,6 +221,9 @@ include "impl/thin/messages/auth.pyx" include "impl/thin/messages/commit.pyx" include "impl/thin/messages/connect.pyx" include "impl/thin/messages/data_types.pyx" +include "impl/thin/messages/direct_path_prepare.pyx" +include "impl/thin/messages/direct_path_load_stream.pyx" +include "impl/thin/messages/direct_path_op.pyx" include "impl/thin/messages/end_pipeline.pyx" include "impl/thin/messages/execute.pyx" include "impl/thin/messages/fetch.pyx" diff --git a/tests/test_9600_direct_path_load.py b/tests/test_9600_direct_path_load.py new file mode 100644 index 00000000..c437b5f9 --- /dev/null +++ b/tests/test_9600_direct_path_load.py @@ -0,0 +1,614 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing the Direct Path Load interface. +""" + +import datetime +import decimal + +import pandas +import pyarrow +import pytest + +TABLE_NAME = "TestDataFrame" + + +@pytest.fixture(autouse=True) +def module_checks(skip_unless_thin_mode): + pass + + +@pytest.fixture +def empty_tab(cursor): + cursor.execute(f"delete from {TABLE_NAME}") + cursor.connection.commit() + + +def _verify_data(conn, data, column_names): + """ + Verifies that the data matches what is stored in the table. + """ + select_items = ",".join(column_names) + sql = f"select {select_items} from {TABLE_NAME} order by Id" + with conn.cursor() as cursor: + cursor.execute(sql) + assert cursor.fetchall() == data + + +def _verify_data_frame(conn, df, column_names, test_env): + """ + Verifies that the contents of the data frame matches what is stored in the + table. + """ + data = test_env.get_data_from_df(df) + _verify_data(conn, data, column_names) + + +def test_9600(empty_tab, conn, test_env): + "9600 - test basic direct path load with list of tuples" + data = [ + ( + 1, + "Alice", + "Smith", + "New York", + "USA", + datetime.datetime(1990, 1, 15), + 50000.50, + 750, + ), + ( + 2, + "Bob", + "Johnson", + "London", + "UK", + datetime.datetime(1985, 6, 20), + 60000.75, + 680, + ), + ( + 3, + "Charlie", + "Brown", + "Paris", + "France", + datetime.datetime(1992, 3, 10), + 70000.25, + 720, + ), + ] + column_names = [ + "Id", + "FirstName", + "LastName", + "City", + "Country", + "DateOfBirth", + "Salary", + "CreditScore", + ] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + ) + _verify_data(conn, data, column_names) + + +def test_9601(empty_tab, conn, test_env): + "9601 - test basic direct path load with dataframe" + data = { + "Id": [1, 2, 3, 4, 5], + "FirstName": ["Alice", "Bob", "Charlie", "David", "Eve"], + "LastName": ["Smith", "Johnson", "Brown", "Wilson", "Davis"], + "City": ["New York", "London", "Paris", "Tokyo", "Sydney"], + "Country": ["USA", "UK", "France", "Japan", "Australia"], + "DateOfBirth": [ + datetime.datetime(1990, 1, 15), + datetime.datetime(1985, 6, 20), + datetime.datetime(1992, 3, 10), + datetime.datetime(1988, 12, 1), + datetime.datetime(1995, 5, 5), + ], + "Salary": [50000.50, 60000.75, 70000.25, 80000.00, 90000.50], + "CreditScore": [750, 680, 720, 810, 690], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + _verify_data_frame(conn, df, column_names, test_env) + + +def test_9602(empty_tab, conn, test_env): + "960f - test with empty data" + data = [] + column_names = ["Id", "FirstName"] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + ) + _verify_data(conn, data, column_names) + + +def test_9603(empty_tab, conn, test_env): + "9603 - test with empty data frame" + data = { + "Id": [], + "FirstName": [], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + _verify_data_frame(conn, df, column_names, test_env) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +def test_9604(batch_size, conn, empty_tab, round_trip_checker, test_env): + "9604 - test with various batch sizes" + data = [(i + 1, f"String for row {i + 1}") for i in range(200)] + column_names = ["Id", "FirstName"] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + batch_size=batch_size, + ) + num_round_trips = 2 + len(data) // batch_size + if len(data) % batch_size: + num_round_trips += 1 + assert round_trip_checker.get_value() == num_round_trips + _verify_data(conn, data, column_names) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +def test_9605(batch_size, conn, empty_tab, round_trip_checker, test_env): + "9605 - test with various batch sizes with a data frame" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + arrays = [ + pyarrow.array([i for i, _ in rows], pyarrow.int16()), + pyarrow.array([s for _, s in rows], pyarrow.string()), + ] + df = pyarrow.table(arrays, names).to_pandas() + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=names, + data=df, + batch_size=batch_size, + ) + num_round_trips = 2 + len(rows) // batch_size + if len(rows) % batch_size: + num_round_trips += 1 + assert round_trip_checker.get_value() == num_round_trips + _verify_data_frame(conn, df, names, test_env) + + +def test_9606(empty_tab, disable_fetch_lobs, conn, test_env): + "9607 - test with all basic data types" + column_names = [ + "Id", + "FirstName", + "DateOfBirth", + "LastUpdated", + "Salary", + "CreditScore", + "IntegerData", + "LongIntegerData", + "FloatData", + "DoubleData", + "RawData", + "LongData", + "LongRawData", + ] + current_time = datetime.datetime.now() + rows = [ + ( + 1, + "Test1", + datetime.datetime(1990, 1, 1), + current_time, + 12345.50, + 700, + 123456789, + 123456789012345, + 1.625, + 9.87654321, + b"\x01\x02\x03\x04\x05", + "This is a long text description", + b"blob_data_1", + ), + ( + 2, + "Test2", + datetime.datetime(1991, 2, 2), + current_time, + 23456.75, + 750, + 987654321, + 987654321098765, + 5.5, + 1.23456789, + b"\xff\xfe\xfd\xfc\xfb", + "Another long description here", + b"blob_data_2", + ), + ] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + _verify_data(conn, rows, column_names) + + +def test_9607(empty_tab, disable_fetch_lobs, conn, test_env): + "9607 - test with all basic data types with a data frame" + current_time = datetime.datetime.now() + column_names = [ + "Id", + "FirstName", + "DateOfBirth", + "LastUpdated", + "Salary", + "CreditScore", + "IntegerData", + "LongIntegerData", + "FloatData", + "DoubleData", + "RawData", + "LongData", + "LongRawData", + ] + arrays = [ + pyarrow.array([1, 2], pyarrow.int8()), + pyarrow.array(["Test1", "Test2"], pyarrow.string()), + pyarrow.array( + [datetime.datetime(1990, 1, 1), datetime.datetime(1991, 2, 2)], + pyarrow.timestamp("s"), + ), + pyarrow.array( + [current_time, current_time], + pyarrow.timestamp("us"), + ), + pyarrow.array([12345.50, 23456.75], pyarrow.float32()), + pyarrow.array([700, 750], pyarrow.int16()), + pyarrow.array([123456789, 987654321], pyarrow.uint32()), + pyarrow.array([123456789012345, 987654321098765], pyarrow.uint64()), + pyarrow.array([1.625, 5.675], pyarrow.float32()), + pyarrow.array([9.87654321, 1.23456789], pyarrow.float64()), + pyarrow.array( + [b"\x01\x02\x03\x04\x05", b"\xff\xfe\xfd\xfc\xfb"], + pyarrow.binary(), + ), + pyarrow.array( + [ + "This is a long text description", + "Another long description here", + ], + pyarrow.string(), + ), + pyarrow.array([b"blob_data_1", b"blob_data_2"], pyarrow.binary()), + ] + df = pyarrow.table(arrays, column_names).to_pandas() + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + _verify_data_frame(conn, df, column_names, test_env) + + +def test_9608(empty_tab, conn, test_env): + "9608 - test with null values" + column_names = [ + "Id", + "FirstName", + "LastName", + "City", + "Country", + "DateOfBirth", + "Salary", + "CreditScore", + ] + rows = [ + ( + 1, + "Alice", + "Smith", + "New York", + None, + datetime.datetime(1990, 1, 15), + 50_000.50, + 750, + ), + (2, None, "Johnson", None, "UK", None, None, 680), + (3, "Charlie", None, "Paris", "France", None, 70_000.25, None), + ( + 4, + None, + None, + "Tokyo", + None, + datetime.datetime(1995, 5, 5), + 80_000.00, + 690, + ), + ] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + _verify_data(conn, rows, column_names) + + +def test_9609(empty_tab, conn, test_env): + "9609 - test with null values using a data frame" + data = { + "Id": [1, 2, 3, 4], + "FirstName": ["Alice", None, "Charlie", None], + "LastName": ["Smith", "Johnson", None, None], + "City": ["New York", None, "Paris", "Tokyo"], + "Country": [None, "UK", "France", None], + "DateOfBirth": [ + datetime.datetime(1990, 1, 15), + None, + None, + datetime.datetime(1995, 5, 5), + ], + "Salary": [50000.50, None, 70000.25, 80000.00], + "CreditScore": [750, 680, None, 690], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + _verify_data_frame(conn, df, column_names, test_env) + + +def test_9610(empty_tab, conn, test_env): + "9610 - test with the wrong number of columns" + column_names = ["Id", "FirstName", "LastName"] + rows = [(1, "Alice"), (2, "Joe")] + with test_env.assert_raises_full_code("DPY-4009"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + + +def test_9611(empty_tab, conn, test_env): + "9611 - test with the wrong number of columns using a data frame" + column_names = ["Id", "FirstName"] + data = { + "Id": [1, 2], + "FirstName": ["Alice", "Joe"], + "LastName": ["Smith", "Johnson"], + } + df = pandas.DataFrame(data) + with test_env.assert_raises_full_code("DPY-4009"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + + +def test_9612(empty_tab, conn, test_env): + "9612 - test with decimal data" + column_names = ["Id", "FirstName", "DecimalData"] + rows = [ + (decimal.Decimal("1"), "Sally", decimal.Decimal("1234567.8910")), + (decimal.Decimal("2"), "Jill", decimal.Decimal("9876543.2109")), + (decimal.Decimal("3"), "John", decimal.Decimal("5555555.5555")), + ] + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + with test_env.defaults_context_manager("fetch_decimals", True): + _verify_data(conn, rows, column_names) + + +def test_9613(empty_tab, conn, test_env): + "9613 - test with decimal data using a data frame" + data = { + "Id": [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + "FirstName": ["Sally", "Jill", "John"], + "DecimalData": [ + decimal.Decimal("1234567.8910"), + decimal.Decimal("9876543.2109"), + decimal.Decimal("5555555.5555"), + ], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + with test_env.defaults_context_manager("fetch_decimals", True): + _verify_data_frame(conn, df, column_names, test_env) + + +def test_9614(empty_tab, conn, test_env): + "9614 - test string data that exceeds the maximum length" + column_names = ["Id", "FirstName"] + rows = [(1, "Sally"), (2, "Jill" * 26)] + with test_env.assert_raises_full_code("DPY-8000"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + + +def test_9615(empty_tab, conn, test_env): + "9615 - test string data that exceeds the maximum length with a data frame" + data = { + "Id": [1, 2, 3], + "FirstName": ["Sally", "Jill", "John" * 26], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8000"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + + +def test_9616(conn, test_env): + "9616 - test data that is null" + column_names = ["IntCol", "StringCol", "RawCol", "FixedCharCol"] + rows = [(100, "String 100", b"Raw", "Fixed"), (2, None, b"Raw", "Fixed")] + with test_env.assert_raises_full_code("DPY-8001"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=rows, + ) + + +def test_9617(conn, test_env): + "9617 - test data that is null in a data frame" + data = { + "IntCol": [100, 200, 300], + "StringCol": ["String 100", None, "String 300"], + "RawCol": [b"Raw", b"Raw", b"Raw"], + "FixedCharCol": ["Fixed", "Fixed", "Fixed"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8001"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=df, + ) + + +def test_9618(conn, test_env): + "9618 - test data containing empty string" + column_names = ["IntCol", "StringCol", "RawCol", "FixedCharCol"] + rows = [(100, "String 100", b"Raw", "Fixed"), (2, "", b"Raw", "Fixed")] + with test_env.assert_raises_full_code("DPY-8001"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=rows, + ) + + +def test_9619(conn, test_env): + "9619 - test data containing empty string in a data frame" + data = { + "IntCol": [100, 200, 300], + "StringCol": ["String 100", "", "String 300"], + "RawCol": [b"Raw", b"Raw", b"Raw"], + "FixedCharCol": ["Fixed", "Fixed", "Fixed"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8001"): + conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=df, + ) + + +def test_9620(empty_tab, conn, test_env): + "9620 - test data is committed on success" + column_names = ["Id", "FirstName"] + rows = [(1, "Sally"), (2, "Jill")] + with test_env.get_connection() as other_conn: + other_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + _verify_data(conn, rows, column_names) + + +def test_9621(empty_tab, conn, test_env): + "9621 - test data is committed on success using a data frame" + data = { + "Id": [1, 2, 3], + "FirstName": ["Sally", "Jill", "John"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.get_connection() as other_conn: + other_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + _verify_data_frame(conn, df, column_names, test_env) diff --git a/tests/test_9700_direct_path_load_async.py b/tests/test_9700_direct_path_load_async.py new file mode 100644 index 00000000..812c455d --- /dev/null +++ b/tests/test_9700_direct_path_load_async.py @@ -0,0 +1,618 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing the Direct Path Load interface with asyncio. +""" + +import datetime +import decimal + +import pandas +import pyarrow +import pytest + +TABLE_NAME = "TestDataFrame" + + +@pytest.fixture(autouse=True) +async def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.fixture +async def empty_tab(async_cursor): + await async_cursor.execute(f"delete from {TABLE_NAME}") + await async_cursor.connection.commit() + + +async def _verify_data(conn, data, column_names): + """ + Verifies that the data matches what is stored in the table. + """ + select_items = ",".join(column_names) + sql = f"select {select_items} from {TABLE_NAME} order by Id" + with conn.cursor() as cursor: + await cursor.execute(sql) + assert await cursor.fetchall() == data + + +async def _verify_data_frame(conn, df, column_names, test_env): + """ + Verifies that the contents of the data frame matches what is stored in the + table. + """ + data = test_env.get_data_from_df(df) + await _verify_data(conn, data, column_names) + + +async def test_9700(empty_tab, async_conn, test_env): + "9700 - test basic direct path load with list of tuples" + data = [ + ( + 1, + "Alice", + "Smith", + "New York", + "USA", + datetime.datetime(1990, 1, 15), + 50000.50, + 750, + ), + ( + 2, + "Bob", + "Johnson", + "London", + "UK", + datetime.datetime(1985, 6, 20), + 60000.75, + 680, + ), + ( + 3, + "Charlie", + "Brown", + "Paris", + "France", + datetime.datetime(1992, 3, 10), + 70000.25, + 720, + ), + ] + column_names = [ + "Id", + "FirstName", + "LastName", + "City", + "Country", + "DateOfBirth", + "Salary", + "CreditScore", + ] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + ) + await _verify_data(async_conn, data, column_names) + + +async def test_9701(empty_tab, async_conn, test_env): + "9701 - test basic direct path load with Pandas dataframe" + data = { + "Id": [1, 2, 3, 4, 5], + "FirstName": ["Alice", "Bob", "Charlie", "David", "Eve"], + "LastName": ["Smith", "Johnson", "Brown", "Wilson", "Davis"], + "City": ["New York", "London", "Paris", "Tokyo", "Sydney"], + "Country": ["USA", "UK", "France", "Japan", "Australia"], + "DateOfBirth": [ + datetime.datetime(1990, 1, 15), + datetime.datetime(1985, 6, 20), + datetime.datetime(1992, 3, 10), + datetime.datetime(1988, 12, 1), + datetime.datetime(1995, 5, 5), + ], + "Salary": [50000.50, 60000.75, 70000.25, 80000.00, 90000.50], + "CreditScore": [750, 680, 720, 810, 690], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + await _verify_data_frame(async_conn, df, column_names, test_env) + + +async def test_9702(empty_tab, async_conn, test_env): + "9702 - test with empty data" + data = [] + column_names = ["Id", "FirstName"] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + ) + await _verify_data(async_conn, data, column_names) + + +async def test_9703(empty_tab, async_conn, test_env): + "9703 - test with empty data frame" + data = { + "Id": [], + "FirstName": [], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + await _verify_data_frame(async_conn, df, column_names, test_env) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +async def test_9704( + batch_size, async_conn, empty_tab, round_trip_checker_async, test_env +): + "9704 - test with various batch sizes" + data = [(i + 1, f"String for row {i + 1}") for i in range(200)] + column_names = ["Id", "FirstName"] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=data, + batch_size=batch_size, + ) + num_round_trips = 2 + len(data) // batch_size + if len(data) % batch_size: + num_round_trips += 1 + assert await round_trip_checker_async.get_value_async() == num_round_trips + await _verify_data(async_conn, data, column_names) + + +@pytest.mark.parametrize("batch_size", [1, 5, 99, 199, 200]) +async def test_9705( + batch_size, async_conn, empty_tab, round_trip_checker_async, test_env +): + "9705 - test with various batch sizes with a data frame" + names = ["Id", "FirstName"] + rows = [(i + 1, f"Name {i + 1}") for i in range(200)] + arrays = [ + pyarrow.array([i for i, _ in rows], pyarrow.int16()), + pyarrow.array([s for _, s in rows], pyarrow.string()), + ] + df = pyarrow.table(arrays, names).to_pandas() + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=names, + data=df, + batch_size=batch_size, + ) + num_round_trips = 2 + len(rows) // batch_size + if len(rows) % batch_size: + num_round_trips += 1 + assert await round_trip_checker_async.get_value_async() == num_round_trips + await _verify_data_frame(async_conn, df, names, test_env) + + +async def test_9706(empty_tab, disable_fetch_lobs, async_conn, test_env): + "9707 - test with all basic data types" + column_names = [ + "Id", + "FirstName", + "DateOfBirth", + "LastUpdated", + "Salary", + "CreditScore", + "IntegerData", + "LongIntegerData", + "FloatData", + "DoubleData", + "RawData", + "LongData", + "LongRawData", + ] + current_time = datetime.datetime.now() + rows = [ + ( + 1, + "Test1", + datetime.datetime(1990, 1, 1), + current_time, + 12345.50, + 700, + 123456789, + 123456789012345, + 1.625, + 9.87654321, + b"\x01\x02\x03\x04\x05", + "This is a long text description", + b"blob_data_1", + ), + ( + 2, + "Test2", + datetime.datetime(1991, 2, 2), + current_time, + 23456.75, + 750, + 987654321, + 987654321098765, + 5.5, + 1.23456789, + b"\xff\xfe\xfd\xfc\xfb", + "Another long description here", + b"blob_data_2", + ), + ] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + await _verify_data(async_conn, rows, column_names) + + +async def test_9707(empty_tab, disable_fetch_lobs, async_conn, test_env): + "9707 - test with all basic data types with a data frame" + current_time = datetime.datetime.now() + column_names = [ + "Id", + "FirstName", + "DateOfBirth", + "LastUpdated", + "Salary", + "CreditScore", + "IntegerData", + "LongIntegerData", + "FloatData", + "DoubleData", + "RawData", + "LongData", + "LongRawData", + ] + arrays = [ + pyarrow.array([1, 2], pyarrow.int8()), + pyarrow.array(["Test1", "Test2"], pyarrow.string()), + pyarrow.array( + [datetime.datetime(1990, 1, 1), datetime.datetime(1991, 2, 2)], + pyarrow.timestamp("s"), + ), + pyarrow.array( + [current_time, current_time], + pyarrow.timestamp("us"), + ), + pyarrow.array([12345.50, 23456.75], pyarrow.float32()), + pyarrow.array([700, 750], pyarrow.int16()), + pyarrow.array([123456789, 987654321], pyarrow.uint32()), + pyarrow.array([123456789012345, 987654321098765], pyarrow.uint64()), + pyarrow.array([1.625, 5.675], pyarrow.float32()), + pyarrow.array([9.87654321, 1.23456789], pyarrow.float64()), + pyarrow.array( + [b"\x01\x02\x03\x04\x05", b"\xff\xfe\xfd\xfc\xfb"], + pyarrow.binary(), + ), + pyarrow.array( + [ + "This is a long text description", + "Another long description here", + ], + pyarrow.string(), + ), + pyarrow.array([b"blob_data_1", b"blob_data_2"], pyarrow.binary()), + ] + df = pyarrow.table(arrays, column_names).to_pandas() + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + await _verify_data_frame(async_conn, df, column_names, test_env) + + +async def test_9708(empty_tab, async_conn, test_env): + "9708 - test with null values" + column_names = [ + "Id", + "FirstName", + "LastName", + "City", + "Country", + "DateOfBirth", + "Salary", + "CreditScore", + ] + rows = [ + ( + 1, + "Alice", + "Smith", + "New York", + None, + datetime.datetime(1990, 1, 15), + 50_000.50, + 750, + ), + (2, None, "Johnson", None, "UK", None, None, 680), + (3, "Charlie", None, "Paris", "France", None, 70_000.25, None), + ( + 4, + None, + None, + "Tokyo", + None, + datetime.datetime(1995, 5, 5), + 80_000.00, + 690, + ), + ] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + await _verify_data(async_conn, rows, column_names) + + +async def test_9709(empty_tab, async_conn, test_env): + "9709 - test with null values using a data frame" + data = { + "Id": [1, 2, 3, 4], + "FirstName": ["Alice", None, "Charlie", None], + "LastName": ["Smith", "Johnson", None, None], + "City": ["New York", None, "Paris", "Tokyo"], + "Country": [None, "UK", "France", None], + "DateOfBirth": [ + datetime.datetime(1990, 1, 15), + None, + None, + datetime.datetime(1995, 5, 5), + ], + "Salary": [50000.50, None, 70000.25, 80000.00], + "CreditScore": [750, 680, None, 690], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + await _verify_data_frame(async_conn, df, column_names, test_env) + + +async def test_9710(empty_tab, async_conn, test_env): + "9710 - test with the wrong number of columns" + column_names = ["Id", "FirstName", "LastName"] + rows = [(1, "Alice"), (2, "Joe")] + with test_env.assert_raises_full_code("DPY-4009"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + + +async def test_9711(empty_tab, async_conn, test_env): + "9711 - test with the wrong number of columns using a data frame" + column_names = ["Id", "FirstName"] + data = { + "Id": [1, 2], + "FirstName": ["Alice", "Joe"], + "LastName": ["Smith", "Johnson"], + } + df = pandas.DataFrame(data) + with test_env.assert_raises_full_code("DPY-4009"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + + +async def test_9712(empty_tab, async_conn, test_env): + "9712 - test with decimal data" + column_names = ["Id", "FirstName", "DecimalData"] + rows = [ + (decimal.Decimal("1"), "Sally", decimal.Decimal("1234567.8910")), + (decimal.Decimal("2"), "Jill", decimal.Decimal("9876543.2109")), + (decimal.Decimal("3"), "John", decimal.Decimal("5555555.5555")), + ] + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + with test_env.defaults_context_manager("fetch_decimals", True): + await _verify_data(async_conn, rows, column_names) + + +async def test_9713(empty_tab, async_conn, test_env): + "9713 - test with decimal data using a data frame" + data = { + "Id": [ + decimal.Decimal("1"), + decimal.Decimal("2"), + decimal.Decimal("3"), + ], + "FirstName": ["Sally", "Jill", "John"], + "DecimalData": [ + decimal.Decimal("1234567.8910"), + decimal.Decimal("9876543.2109"), + decimal.Decimal("5555555.5555"), + ], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + with test_env.defaults_context_manager("fetch_decimals", True): + await _verify_data_frame(async_conn, df, column_names, test_env) + + +async def test_9714(empty_tab, async_conn, test_env): + "9714 - test string data that exceeds the maximum length" + column_names = ["Id", "FirstName"] + rows = [(1, "Sally"), (2, "Jill" * 26)] + with test_env.assert_raises_full_code("DPY-8000"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + + +async def test_9715(empty_tab, async_conn, test_env): + "9715 - test string data that exceeds the maximum length with a data frame" + data = { + "Id": [1, 2, 3], + "FirstName": ["Sally", "Jill", "John" * 26], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8000"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + + +async def test_9716(async_conn, test_env): + "9716 - test data that is null" + column_names = ["IntCol", "StringCol", "RawCol", "FixedCharCol"] + rows = [(100, "String 100", b"Raw", "Fixed"), (2, None, b"Raw", "Fixed")] + with test_env.assert_raises_full_code("DPY-8001"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=rows, + ) + + +async def test_9717(async_conn, test_env): + "9717 - test data that is null in a data frame" + data = { + "IntCol": [100, 200, 300], + "StringCol": ["String 100", None, "String 300"], + "RawCol": [b"Raw", b"Raw", b"Raw"], + "FixedCharCol": ["Fixed", "Fixed", "Fixed"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8001"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=df, + ) + + +async def test_9718(async_conn, test_env): + "9718 - test data containing empty string" + column_names = ["IntCol", "StringCol", "RawCol", "FixedCharCol"] + rows = [(100, "String 100", b"Raw", "Fixed"), (2, "", b"Raw", "Fixed")] + with test_env.assert_raises_full_code("DPY-8001"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=rows, + ) + + +async def test_9719(async_conn, test_env): + "9719 - test data containing empty string in a data frame" + data = { + "IntCol": [100, 200, 300], + "StringCol": ["String 100", "", "String 300"], + "RawCol": [b"Raw", b"Raw", b"Raw"], + "FixedCharCol": ["Fixed", "Fixed", "Fixed"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + with test_env.assert_raises_full_code("DPY-8001"): + await async_conn.direct_path_load( + schema_name=test_env.main_user, + table_name="TestStrings", + column_names=column_names, + data=df, + ) + + +async def test_9720(empty_tab, async_conn, test_env): + "9720 - test data is committed on success" + column_names = ["Id", "FirstName"] + rows = [(1, "Sally"), (2, "Jill")] + async with test_env.get_connection_async() as other_conn: + await other_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=rows, + ) + await _verify_data(async_conn, rows, column_names) + + +async def test_9721(empty_tab, async_conn, test_env): + "9721 - test data is committed on success using a data frame" + data = { + "Id": [1, 2, 3], + "FirstName": ["Sally", "Jill", "John"], + } + df = pandas.DataFrame(data) + column_names = list(df.columns.tolist()) + async with test_env.get_connection_async() as other_conn: + await other_conn.direct_path_load( + schema_name=test_env.main_user, + table_name=TABLE_NAME, + column_names=column_names, + data=df, + ) + await _verify_data_frame(async_conn, df, column_names, test_env) diff --git a/utils/templates/connection.py b/utils/templates/connection.py index ad962c91..c5c65d81 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -1065,6 +1065,32 @@ def cursor(self, scrollable: bool = False) -> Cursor: self._verify_connected() return Cursor(self, scrollable) + def direct_path_load( + self, + schema_name: str, + table_name: str, + column_names: list[str], + data: Any, + *, + batch_size: int = 2**32 - 1, + ) -> None: + """ + Load data into Oracle Database using the Direct Path Load interface. + It is available only in python-oracledb Thin mode. + + The ``data`` parameter can be a list of sequences, a DataFrame, or a + third-party DataFrame instance that supports the Apache Arrow PyCapsule + Interface. + + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. + """ + self._verify_connected() + self._impl.direct_path_load( + schema_name, table_name, column_names, data, batch_size + ) + def fetch_df_all( self, statement: str, @@ -2042,6 +2068,28 @@ async def fetchall( cursor.rowfactory = rowfactory return await cursor.fetchall() + async def direct_path_load( + self, + schema_name: str, + table_name: str, + column_names: list[str], + data: Any, + *, + batch_size: int = 2**32 - 1, + ) -> None: + """ + Load data into Oracle Database using the Direct Path Load interface. + It is available only in python-oracledb Thin mode. + + The ``data`` parameter can be a list of sequences, a DataFrame, or a + third-party DataFrame instance that supports the Apache Arrow PyCapsule + Interface. + """ + self._verify_connected() + await self._impl.direct_path_load( + schema_name, table_name, column_names, data, batch_size + ) + async def fetch_df_all( self, statement: str, From e3ac33383ac66d7fefa4eaab8b9f97c85eaa467c Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:48:20 -0600 Subject: [PATCH 211/239] Doc updates. --- doc/src/api_manual/async_connection.rst | 10 ---------- doc/src/api_manual/connection.rst | 10 ---------- doc/src/api_manual/dataframe.rst | 5 ----- doc/src/release_notes.rst | 8 ++++++++ doc/src/user_guide/dataframes.rst | 5 ----- 5 files changed, 8 insertions(+), 30 deletions(-) diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 86c1af16..1c393975 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -103,11 +103,6 @@ AsyncConnection Methods See :ref:`dataframeformat` for the supported data types and examples. - .. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. versionchanged:: 3.4.0 The ``fetch_decimals`` parameter was added. @@ -118,11 +113,6 @@ AsyncConnection Methods See :ref:`dataframeformat` for the supported data types and examples. - .. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. versionchanged:: 3.4.0 The ``fetch_decimals`` parameter was added. diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 3cc944e6..577b1ea6 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -91,11 +91,6 @@ Connection Methods See :ref:`dataframeformat` for the supported data types and examples. - .. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. dbapimethodextension:: .. versionchanged:: 3.4.0 @@ -108,11 +103,6 @@ Connection Methods See :ref:`dataframeformat` for the supported data types and examples. - .. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. dbapimethodextension:: .. versionchanged:: 3.4.0 diff --git a/doc/src/api_manual/dataframe.rst b/doc/src/api_manual/dataframe.rst index 664f3f6e..3ce11d6b 100644 --- a/doc/src/api_manual/dataframe.rst +++ b/doc/src/api_manual/dataframe.rst @@ -13,11 +13,6 @@ libraries. See :ref:`dataframeformat` for more information, including the type mapping from Oracle Database types to Arrow data types. -.. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. _oracledataframeobj: DataFrame Class diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index 3ea4dd0d..ce09f08b 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -73,8 +73,16 @@ Common Changes the `cryptography deprecation announcement `__. +#) Connectivity and interoperability with Oracle Database and Oracle Client + libraries older than version 19 is deprecated and will be removed in a + future version of python-oracledb. Production use, and availability of + database and client software, is detailed in `Release Schedule of Current + Database Releases `__. #) Pin Cython to 3.1.x instead of 3.1.0 as requested (`issue 530 `__). +#) Support for :ref:`data frames ` is no longer considered a + pre-release. #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). #) Fixed bug when attempting to convert an integer that cannot be represented diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index a8a3e4ad..4c0fe1fe 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -19,11 +19,6 @@ Python-oracledb has a :ref:`DataFrame ` object that exposes an Apache Arrow ArrowArrayStream PyCapsule Interface. This enables zero-copy data interchanges to the data frame objects of other libraries. -.. note:: - - The data frame support in python-oracledb 3.3 is a pre-release and may - change in a future version. - .. _dfquery: Fetching Data Frames From e92bc7b51d3d88f5af978ad00295ecf830094a21 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:51:46 -0600 Subject: [PATCH 212/239] Support type mapping in Dataframe queries (#494). --- doc/src/api_manual/async_connection.rst | 4 +- doc/src/api_manual/connection.rst | 4 +- doc/src/release_notes.rst | 31 +- doc/src/user_guide/batch_statement.rst | 2 +- doc/src/user_guide/dataframes.rst | 105 ++- samples/dataframe_types.py | 63 ++ src/oracledb/arrow_impl.pxd | 8 +- src/oracledb/arrow_impl.pyx | 1 + src/oracledb/base_impl.pxd | 10 +- src/oracledb/base_impl.pyx | 2 +- src/oracledb/connection.py | 37 + src/oracledb/errors.py | 23 +- src/oracledb/impl/arrow/array.pyx | 98 ++- src/oracledb/impl/arrow/schema.pyx | 68 +- src/oracledb/impl/arrow/utils.pyx | 1 + src/oracledb/impl/base/buffer.pyx | 14 +- src/oracledb/impl/base/converters.pyx | 116 ++- src/oracledb/impl/base/cursor.pyx | 14 +- src/oracledb/impl/base/metadata.pyx | 92 ++- src/oracledb/impl/thick/odpi.pxd | 1 + src/oracledb/impl/thick/var.pyx | 60 +- src/oracledb/impl/thin/dbobject.pyx | 3 +- src/oracledb/impl/thin/messages/base.pyx | 10 +- src/oracledb/impl/thin/statement.pyx | 1 + src/oracledb/thick_impl.pyx | 1 + src/oracledb/thin_impl.pyx | 2 +- tests/test_8000_dataframe.py | 16 + tests/test_8100_dataframe_async.py | 16 + tests/test_9300_dataframe_requested_schema.py | 721 +++++++++++++++++ ...t_9400_dataframe_requested_schema_async.py | 744 ++++++++++++++++++ utils/templates/connection.py | 41 + 31 files changed, 2164 insertions(+), 145 deletions(-) create mode 100644 samples/dataframe_types.py create mode 100644 tests/test_9300_dataframe_requested_schema.py create mode 100644 tests/test_9400_dataframe_requested_schema_async.py diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 1c393975..2cca1110 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -105,7 +105,7 @@ AsyncConnection Methods .. versionchanged:: 3.4.0 - The ``fetch_decimals`` parameter was added. + The ``fetch_decimals`` and ``requested_schema`` parameters were added. .. versionadded:: 3.0.0 @@ -115,7 +115,7 @@ AsyncConnection Methods .. versionchanged:: 3.4.0 - The ``fetch_decimals`` parameter was added. + The ``fetch_decimals`` and ``requested_schema`` parameters were added. .. versionadded:: 3.0.0 diff --git a/doc/src/api_manual/connection.rst b/doc/src/api_manual/connection.rst index 577b1ea6..7df813d4 100644 --- a/doc/src/api_manual/connection.rst +++ b/doc/src/api_manual/connection.rst @@ -95,7 +95,7 @@ Connection Methods .. versionchanged:: 3.4.0 - The ``fetch_decimals`` parameter was added. + The ``fetch_decimals`` and ``requested_schema`` parameters were added. .. versionadded:: 3.0.0 @@ -107,7 +107,7 @@ Connection Methods .. versionchanged:: 3.4.0 - The ``fetch_decimals`` parameter was added. + The ``fetch_decimals`` and ``requested_schema`` parameters were added. .. versionadded:: 3.0.0 diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index ce09f08b..b6a8d32b 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -21,7 +21,8 @@ Thin Mode Changes #) Added support for Oracle Database's :ref:`Direct Path Load ` functionality which is very efficient for loading large - datasets into a database. + datasets into a database. Data may be a list of sequences or a DataFrame + object. #) Fixed bug when setting values of type ``datetime.date`` on variables (such as created by :meth:`Cursor.var()` or implicitly by :meth:`Cursor.setinputsizes()`) of types @@ -44,16 +45,26 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Added support for all of the signed and unsigned fixed width integer types - when ingesting data frames supporting the Arrow PyCapsule interface. - Previously only ``int64`` was supported. -#) Added support for types ``date32`` and ``date64`` when ingesting data - frames supporting the Arrow PyCapsule interface as requested - (`issue 535 `__). +#) Changes to :ref:`data frame ` support: + + - Support for data frames is no longer considered a pre-release. + - Added parameter ``requested_schema`` to :meth:`Connection.fetch_df_all()` + and :meth:`Connection.fetch_df_batches()` to support type mapping when + querying. + - Added support for all of the signed and unsigned fixed width integer + types when ingesting data frames supporting the Arrow PyCapsule + interface. Previously only ``int64`` was supported. + - Added support for types ``date32`` and ``date64`` when ingesting data + frames supporting the Arrow PyCapsule interface as requested + (`issue 535 `__). + - Data frames with multiple chunks are now supported. + - Fixed bug when fetching NCHAR and NVARCHAR2 column data. + - Fixed bug when attempting to convert an integer that cannot be + represented as a native C ``int`` value to an Arrow data frame. + #) Added a ``batch_size`` parameter to :meth:`Cursor.executemany()` and :meth:`AsyncCursor.executemany()` to let these methods operate on data in batches. -#) Data frames with multiple chunks are now supported. #) Added ``fetch_lobs`` and ``fetch_decimals`` parameters where applicable to the methods used for fetching rows or data frames from the database. Note that for the creation of pipeline operations, if these parameters are not @@ -81,12 +92,8 @@ Common Changes DocumentDisplay?id=742060.1>`__. #) Pin Cython to 3.1.x instead of 3.1.0 as requested (`issue 530 `__). -#) Support for :ref:`data frames ` is no longer considered a - pre-release. #) Fixed bug when attempting to execute an empty statement (`issue 525 `__). -#) Fixed bug when attempting to convert an integer that cannot be represented - as a native C ``int`` value to an Arrow data frame. #) Fixed bug when attempting to append an element to a :ref:`DbObject ` which is not actually a collection. #) API documentation is now generated from the source code. diff --git a/doc/src/user_guide/batch_statement.rst b/doc/src/user_guide/batch_statement.rst index 1359ca7c..fdf24050 100644 --- a/doc/src/user_guide/batch_statement.rst +++ b/doc/src/user_guide/batch_statement.rst @@ -629,7 +629,7 @@ You can control the data transfer by changing your SELECT statement. Direct Path Loads ================= -Direct Path Loads allows data being inserted into Oracle Database to bypass +Direct Path Loads allow data being inserted into Oracle Database to bypass code layers such as the database buffer cache. Also there are no INSERT statements used. This can be very efficient for ingestion of huge amounts of data but, as a consequence of the architecture, there are restrictions on when diff --git a/doc/src/user_guide/dataframes.rst b/doc/src/user_guide/dataframes.rst index 4c0fe1fe..5a0d874d 100644 --- a/doc/src/user_guide/dataframes.rst +++ b/doc/src/user_guide/dataframes.rst @@ -110,14 +110,18 @@ Or to iterate: Data Frame Type Mapping ----------------------- +Default Data Frame Type Mapping ++++++++++++++++++++++++++++++++ + Internally, python-oracledb's :ref:`DataFrame ` support makes use of `Apache nanoarrow `__ libraries to build data frames. -The following data type mapping occurs from Oracle Database types to the Arrow -types used in python-oracledb DataFrame objects. Querying any other data types -from Oracle Database will result in an exception. :ref:`Output type handlers -` cannot be used to map data types. +When querying, the following default data type mapping occurs from Oracle +Database types to the Arrow types used in python-oracledb DataFrame +objects. Querying any other data types from Oracle Database will result in an +exception. :ref:`Output type handlers ` cannot be used to +map data types. .. list-table-with-summary:: Mapping from Oracle Database to Arrow data types :header-rows: 1 @@ -258,6 +262,99 @@ When converting Oracle Database DATEs and TIMESTAMPs: * - 7 - 9 - nanoseconds +Explicit Data Frame Type Mapping +++++++++++++++++++++++++++++++++ + +You can explicitly set the data types and names that a :ref:`DataFrame +` will use for query results. This provides fine-grained +control over the physical data representation of the resulting Arrow arrays. It +allows you to specify a representation that is more efficient for its specific +use case. This can reduce memory consumption and improve processing speed. + +The parameter ``requested_schema`` parameter to +:meth:`Connection.fetch_df_all()`, :meth:`Connection.fetch_df_batches()`, +:meth:`AsyncConnection.fetch_df_all()`, or +:meth:`AsyncConnection.fetch_df_batches()` should be an object implementing the +`Arrow PyCapsule schema interface +`__. + +For example, the ``pyarrow.schema()`` factory function can be used to create a +new schema. This takes a list of field definitions as input. Each field can be +a tuple of ``(name, DataType)``: + +.. code-block:: python + + import pyarrow + + # Default fetch + + odf = connection.fetch_df_all( + "select 123 c1, 'Scott' c2 from dual" + ) + tab = pyarrow.table(odf) + print("Default Output:", tab) + + # Fetching with an explicit schema + + schema = pyarrow.schema([ + ("col_1", pyarrow.int16()), + ("C2", pyarrow.string()) + ]) + + odf = connection.fetch_df_all( + "select 456 c1, 'King' c2 from dual", + requested_schema=schema + ) + tab = pyarrow.table(odf) + print("\nNew Output:", tab) + +The schema should have an entry for each queried column. + +Running the example shows that the number column with the explicit schema was +fetched into the requested type INT16. Its name has also changed:: + + Default Output: pyarrow.Table + C1: double + C2: string + ---- + C1: [[123]] + C2: [["Scott"]] + + New Output: pyarrow.Table + col_1: int16 + C2: string + ---- + col_1: [[456]] + C2: [["King"]] + +**Supported Explicit Type Mapping** + +The following table shows the explicit type mappings that are supported. An +error will occur if the database type or the data cannot be represented in the +requested schema type. + + .. list-table-with-summary:: + :header-rows: 1 + :class: wy-table-responsive + :widths: 1 1 + :align: left + :summary: The first column is the Oracle Database data type. The second column shows supported Arrow data types. + + * - Oracle Database Type + - Arrow Data Types + * - DB_TYPE_NUMBER + - INT8, INT16, INT32, INT64, UINT8, UINT16, UINT32, UINT64, DECIMAL128(p, s), DOUBLE, FLOAT + * - DB_TYPE_RAW, DB_TYPE_LONG_RAW + - BINARY, FIXED SIZE BINARY, LARGE BINARY + * - DB_TYPE_BOOLEAN + - BOOLEAN + * - DB_TYPE_DATE, DB_TYPE_TIMESTAMP, DB_TYPE_TIMESTAMP_LTZ, DB_TYPE_TIMESTAMP_TZ + - DATE32, DATE64, TIMESTAMP + * - DB_TYPE_BINARY_DOUBLE, DB_TYPE_BINARY_FLOAT + - DOUBLE, FLOAT + * - DB_TYPE_VARCHAR, DB_TYPE_CHAR, DB_TYPE_LONG, DB_TYPE_NVARCHAR, DB_TYPE_NCHAR, DB_TYPE_LONG_NVARCHAR + - STRING, LARGE_STRING + .. _convertingodf: Converting python-oracledb's DataFrame to Other Data Frames diff --git a/samples/dataframe_types.py b/samples/dataframe_types.py new file mode 100644 index 00000000..7c0c572a --- /dev/null +++ b/samples/dataframe_types.py @@ -0,0 +1,63 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +# ----------------------------------------------------------------------------- +# dataframe_types.py +# +# Shows how to change the schema types and names of a dataframe +# ----------------------------------------------------------------------------- + +import pyarrow + +import oracledb +import sample_env + +# determine whether to use python-oracledb thin mode or thick mode +if sample_env.run_in_thick_mode(): + oracledb.init_oracle_client(lib_dir=sample_env.get_oracle_client()) + +connection = oracledb.connect( + user=sample_env.get_main_user(), + password=sample_env.get_main_password(), + dsn=sample_env.get_connect_string(), + params=sample_env.get_connect_params(), +) + + +SQL = "select * from SampleQueryTab where id < 5" + +# Default fetch with no type mapping + +odf = connection.fetch_df_all(SQL) +tab = pyarrow.table(odf) +print("Default Output:", tab) + +# Fetching with an explicit schema + +schema = pyarrow.schema( + [("COL_1", pyarrow.int16()), ("COL_2", pyarrow.string())] +) +odf = connection.fetch_df_all(SQL, requested_schema=schema) +tab = pyarrow.table(odf) +print("\nNew Output:", tab) diff --git a/src/oracledb/arrow_impl.pxd b/src/oracledb/arrow_impl.pxd index 4ee0faa3..a73550b9 100644 --- a/src/oracledb/arrow_impl.pxd +++ b/src/oracledb/arrow_impl.pxd @@ -105,6 +105,7 @@ cdef class ArrowSchemaImpl: ArrowSchema *arrow_schema ArrowType child_arrow_type int child_element_size + list child_schemas cdef bint _is_sparse_vector(self) except* cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1 @@ -122,6 +123,10 @@ cdef class ArrowArrayImpl: ArrowArray *arrow_array ArrowSchemaImpl schema_impl + cdef int _extract_int(self, const void* ptr, ArrowType arrow_type, + int64_t index, int64_t* value) except -1 + cdef int _extract_uint(self, const void* ptr, ArrowType arrow_type, + int64_t index, uint64_t* value) except -1 cdef int _get_is_null(self, int64_t index, bint* is_null) except -1 cdef int _get_list_info(self, int64_t index, ArrowArray* arrow_array, int64_t* offset, int64_t* num_elements) except -1 @@ -129,12 +134,13 @@ cdef class ArrowArrayImpl: cdef int append_decimal(self, void* ptr, int64_t num_bytes) except -1 cdef int append_double(self, double value) except -1 cdef int append_float(self, float value) except -1 - cdef int append_int64(self, int64_t value) except -1 + cdef int append_int(self, int64_t value) except -1 cdef int append_last_value(self, ArrowArrayImpl array) except -1 cdef int append_null(self) except -1 cdef int append_sparse_vector(self, int64_t num_dimensions, array.array indices, array.array values) except -1 + cdef int append_uint(self, uint64_t value) except -1 cdef int append_vector(self, array.array value) except -1 cdef int finish_building(self) except -1 cdef int get_bool(self, int64_t index, bint* is_null, diff --git a/src/oracledb/arrow_impl.pyx b/src/oracledb/arrow_impl.pyx index 5cbe629a..0385ab4f 100644 --- a/src/oracledb/arrow_impl.pyx +++ b/src/oracledb/arrow_impl.pyx @@ -32,6 +32,7 @@ cimport cpython +from libc.errno cimport EINVAL from libc.stdint cimport uintptr_t from libc.string cimport memcpy, memset, strlen, strchr from cpython cimport array diff --git a/src/oracledb/base_impl.pxd b/src/oracledb/base_impl.pxd index 54ae3916..4a654072 100644 --- a/src/oracledb/base_impl.pxd +++ b/src/oracledb/base_impl.pxd @@ -337,8 +337,9 @@ cdef class Buffer: cdef int read_sb4(self, int32_t *value) except -1 cdef int read_sb8(self, int64_t *value) except -1 cdef bytes read_null_terminated_bytes(self) - cdef int read_oracle_data(self, OracleMetadata metadata, - OracleData* data, bint from_dbobject) except -1 + cdef object read_oracle_data(self, OracleMetadata metadata, + OracleData* data, bint from_dbobject, + bint decode_str) cdef object read_str(self, int csfrm, const char* encoding_errors=*) cdef object read_str_with_length(self) cdef int read_ub1(self, uint8_t *value) except -1 @@ -495,6 +496,10 @@ cdef class OracleMetadata: cdef int _create_arrow_schema(self) except -1 cdef int _finalize_init(self) except -1 cdef int _set_arrow_schema(self, ArrowSchemaImpl schema_impl) except -1 + cdef int check_convert_from_arrow(self, + ArrowSchemaImpl schema_impl) except -1 + cdef int check_convert_to_arrow(self, + ArrowSchemaImpl schema_impl) except -1 cdef OracleMetadata copy(self) @staticmethod cdef OracleMetadata from_arrow_schema(ArrowSchemaImpl schema_impl) @@ -718,6 +723,7 @@ cdef class BaseCursorImpl: public bint suspend_on_success public bint fetch_lobs public bint fetch_decimals + public ArrowSchemaImpl schema_impl uint32_t _buffer_rowcount uint32_t _buffer_index uint32_t _fetch_array_size diff --git a/src/oracledb/base_impl.pyx b/src/oracledb/base_impl.pyx index 0efec02e..5eadf008 100644 --- a/src/oracledb/base_impl.pyx +++ b/src/oracledb/base_impl.pyx @@ -39,7 +39,7 @@ from libc cimport errno from libc.stdint cimport int8_t, int16_t, int32_t, int64_t from libc.stdint cimport uint8_t, uint16_t, uint32_t, uint64_t from libc.stdint cimport UINT8_MAX, UINT16_MAX, UINT32_MAX, UINT64_MAX -from libc.stdlib cimport strtod, strtoll +from libc.stdlib cimport strtod, strtof, strtoll, strtoull from libc.string cimport memcpy from cpython cimport array from cpython.conversion cimport PyOS_snprintf diff --git a/src/oracledb/connection.py b/src/oracledb/connection.py index 012f2561..687bb2a1 100644 --- a/src/oracledb/connection.py +++ b/src/oracledb/connection.py @@ -45,6 +45,7 @@ from . import base_impl, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module from .aq import AsyncQueue, Queue, MessageProperties +from .arrow_impl import ArrowSchemaImpl from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor @@ -1100,6 +1101,7 @@ def fetch_df_all( arraysize: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -1123,10 +1125,18 @@ def fetch_df_all( value is :data:`oracledb.defaults.fetch_decimals `. + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize @@ -1144,6 +1154,7 @@ def fetch_df_batches( size: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -1169,10 +1180,18 @@ def fetch_df_batches( value is :data:`oracledb.defaults.fetch_decimals `. + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize @@ -2418,6 +2437,7 @@ async def fetch_df_all( arraysize: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -2440,9 +2460,17 @@ async def fetch_df_all( capable of being represented in Arrow Decimal128 format. The default value is :data:`oracledb.defaults.fetch_decimals `. + + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize @@ -2460,6 +2488,7 @@ async def fetch_df_batches( size: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -2484,9 +2513,17 @@ async def fetch_df_batches( capable of being represented in Arrow Decimal128 format. The default value is :data:`oracledb.defaults.fetch_decimals `. + + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize diff --git a/src/oracledb/errors.py b/src/oracledb/errors.py index 972b35f1..91a80bba 100644 --- a/src/oracledb/errors.py +++ b/src/oracledb/errors.py @@ -330,7 +330,8 @@ def _raise_not_supported(feature: str) -> None: ERR_SESSIONLESS_ALREADY_ACTIVE = 3035 ERR_SESSIONLESS_INACTIVE = 3036 ERR_UNSUPPORTED_ARROW_TYPE = 3037 -ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE = 3038 +ERR_CANNOT_CONVERT_TO_ARROW_TYPE = 3038 +ERR_CANNOT_CONVERT_FROM_ARROW_TYPE = 3039 # error numbers that result in DatabaseError ERR_TNS_ENTRY_NOT_FOUND = 4000 @@ -369,6 +370,8 @@ def _raise_not_supported(feature: str) -> None: ERR_INVALID_SERVER_RESPONSE = 4035 ERR_CANNOT_CONVERT_TO_ARROW_INTEGER = 4036 ERR_CANNOT_CONVERT_TO_ARROW_DOUBLE = 4037 +ERR_INVALID_INTEGER = 4038 +ERR_CANNOT_CONVERT_TO_ARROW_FLOAT = 4039 # error numbers that result in InternalError ERR_MESSAGE_TYPE_UNKNOWN = 5000 @@ -592,12 +595,23 @@ def _raise_not_supported(feature: str) -> None: "insufficient to hold {required_buffer_len} bytes" ), ERR_CALL_TIMEOUT_EXCEEDED: "call timeout of {timeout} ms exceeded", + ERR_CANNOT_CONVERT_FROM_ARROW_TYPE: ( + 'Arrow type "{arrow_type}" cannot be converted to database type ' + '"{db_type}"' + ), ERR_CANNOT_CONVERT_TO_ARROW_DOUBLE: ( "{value} cannot be converted to an Arrow double" ), + ERR_CANNOT_CONVERT_TO_ARROW_FLOAT: ( + "{value} cannot be converted to an Arrow float" + ), ERR_CANNOT_CONVERT_TO_ARROW_INTEGER: ( "{value} cannot be converted to an Arrow integer" ), + ERR_CANNOT_CONVERT_TO_ARROW_TYPE: ( + 'database type "{db_type}" cannot be converted to Arrow type ' + '"{arrow_type}"' + ), ERR_CANNOT_PARSE_CONNECT_STRING: 'cannot parse connect string "{data}"', ERR_COLUMN_TRUNCATED: ( "column truncated to {col_value_len} {unit}. " @@ -695,6 +709,9 @@ def _raise_not_supported(feature: str) -> None: "{max_index}" ), ERR_INVALID_ENUM_VALUE: "invalid value for enumeration {name}: {value}", + ERR_INVALID_INTEGER: ( + "integer {value} cannot be represented as Arrow type {arrow_type}" + ), ERR_INVALID_LOB_AMOUNT: "LOB amount must be greater than zero", ERR_INVALID_LOB_OFFSET: "LOB offset must be greater than zero", ERR_INVALID_MAKEDSN_ARG: '"{name}" argument contains invalid values', @@ -921,10 +938,6 @@ def _raise_not_supported(feature: str) -> None: "internal error: unknown transaction sync version {version}" ), ERR_UNSUPPORTED_ARROW_TYPE: 'unsupported Arrow type "{arrow_type}"', - ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE: ( - 'Arrow type "{arrow_type}" cannot be converted to database type ' - '"{db_type}"' - ), ERR_UNSUPPORTED_INBAND_NOTIFICATION: ( "unsupported in-band notification with error number {err_num}" ), diff --git a/src/oracledb/impl/arrow/array.pyx b/src/oracledb/impl/arrow/array.pyx index 32efea15..e60a68c5 100644 --- a/src/oracledb/impl/arrow/array.pyx +++ b/src/oracledb/impl/arrow/array.pyx @@ -40,6 +40,36 @@ cdef class ArrowArrayImpl: ArrowArrayRelease(self.arrow_array) cpython.PyMem_Free(self.arrow_array) + cdef int _extract_int(self, const void* ptr, ArrowType arrow_type, + int64_t index, int64_t* value) except -1: + """ + Return an int64_t value at the specified index from the buffer for all + signed integer types. + """ + if arrow_type == NANOARROW_TYPE_INT8: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_INT16: + value[0] = ( ptr)[index] + elif arrow_type in (NANOARROW_TYPE_INT32, NANOARROW_TYPE_DATE32): + value[0] = ( ptr)[index] + else: + value[0] = ( ptr)[index] + + cdef int _extract_uint(self, const void* ptr, ArrowType arrow_type, + int64_t index, uint64_t* value) except -1: + """ + Return a uint64_t value at the specified index from the buffer for all + unsigned integer types. + """ + if arrow_type == NANOARROW_TYPE_UINT8: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_UINT16: + value[0] = ( ptr)[index] + elif arrow_type == NANOARROW_TYPE_UINT32: + value[0] = ( ptr)[index] + else: + value[0] = ( ptr)[index] + cdef int _get_is_null(self, int64_t index, bint* is_null) except -1: """ Returns whether or not the value at the specified index is null. @@ -111,11 +141,19 @@ cdef class ArrowArrayImpl: """ self.append_double(value) - cdef int append_int64(self, int64_t value) except -1: + cdef int append_int(self, int64_t value) except -1: """ - Append a value of type int64_t to the array. + Append a signed integer value to the array. """ - _check_nanoarrow(ArrowArrayAppendInt(self.arrow_array, value)) + cdef: + str arrow_type + int result + result = ArrowArrayAppendInt(self.arrow_array, value) + if result == EINVAL: + arrow_type = ArrowTypeString(self.schema_impl.arrow_type).decode() + errors._raise_err(errors.ERR_INVALID_INTEGER, value=value, + arrow_type=arrow_type) + _check_nanoarrow(result) cdef int append_last_value(self, ArrowArrayImpl array) except -1: """ @@ -125,7 +163,9 @@ cdef class ArrowArrayImpl: int32_t start_offset, end_offset ArrowBuffer *offsets_buffer ArrowBuffer *data_buffer + uint64_t uint64_value ArrowDecimal decimal + int64_t int64_value int64_t *as_int64 int32_t *as_int32 double *as_double @@ -142,12 +182,26 @@ cdef class ArrowArrayImpl: if is_null: self.append_null() elif array.schema_impl.arrow_type in ( + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, NANOARROW_TYPE_INT64, NANOARROW_TYPE_TIMESTAMP ): - data_buffer = ArrowArrayBuffer(array.arrow_array, 1) - as_int64 = data_buffer.data - self.append_int64(as_int64[index]) + self._extract_int(ArrowArrayBuffer(array.arrow_array, 1).data, + array.schema_impl.arrow_type, index, + &int64_value) + self.append_int(int64_value) + elif array.schema_impl.arrow_type in ( + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64 + ): + self._extract_uint(ArrowArrayBuffer(array.arrow_array, 1).data, + array.schema_impl.arrow_type, index, + &uint64_value) + self.append_uint(uint64_value) elif array.schema_impl.arrow_type == NANOARROW_TYPE_DOUBLE: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_double = data_buffer.data @@ -159,7 +213,7 @@ cdef class ArrowArrayImpl: elif array.schema_impl.arrow_type == NANOARROW_TYPE_BOOL: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) as_bool = ArrowBitGet(data_buffer.data, index) - self.append_int64(as_bool) + self.append_int(as_bool) elif array.schema_impl.arrow_type == NANOARROW_TYPE_DECIMAL128: data_buffer = ArrowArrayBuffer(array.arrow_array, 1) ArrowDecimalInit(&decimal, 128, self.schema_impl.precision, @@ -208,6 +262,12 @@ cdef class ArrowArrayImpl: """ _check_nanoarrow(ArrowArrayAppendNull(self.arrow_array, 1)) + cdef int append_uint(self, uint64_t value) except -1: + """ + Append an unsigned integer to the array. + """ + _check_nanoarrow(ArrowArrayAppendUInt(self.arrow_array, value)) + cdef int append_vector(self, array.array value) except -1: """ Append a vector to the array. @@ -357,18 +417,10 @@ cdef class ArrowArrayImpl: Return an int64_t value at the specified index from the Arrow array for all signed integer types. """ - cdef const void* ptr self._get_is_null(index, is_null) if not is_null[0]: - ptr = self.arrow_array.buffers[1] - if arrow_type == NANOARROW_TYPE_INT8: - value[0] = ( ptr)[index] - elif arrow_type == NANOARROW_TYPE_INT16: - value[0] = ( ptr)[index] - elif arrow_type in (NANOARROW_TYPE_INT32, NANOARROW_TYPE_DATE32): - value[0] = ( ptr)[index] - else: - value[0] = ( ptr)[index] + self._extract_int(self.arrow_array.buffers[1], arrow_type, index, + value) cdef int get_length(self, int64_t* length) except -1: """ @@ -429,18 +481,10 @@ cdef class ArrowArrayImpl: Return a uint64_t value at the specified index from the Arrow array for all unsigned integer types. """ - cdef const void* ptr self._get_is_null(index, is_null) if not is_null[0]: - ptr = self.arrow_array.buffers[1] - if arrow_type == NANOARROW_TYPE_UINT8: - value[0] = ( ptr)[index] - elif arrow_type == NANOARROW_TYPE_UINT16: - value[0] = ( ptr)[index] - elif arrow_type == NANOARROW_TYPE_UINT32: - value[0] = ( ptr)[index] - else: - value[0] = ( ptr)[index] + self._extract_uint(self.arrow_array.buffers[1], arrow_type, index, + value) cdef object get_vector(self, int64_t index, bint* is_null): """ diff --git a/src/oracledb/impl/arrow/schema.pyx b/src/oracledb/impl/arrow/schema.pyx index 68e4caa7..b11f095b 100644 --- a/src/oracledb/impl/arrow/schema.pyx +++ b/src/oracledb/impl/arrow/schema.pyx @@ -67,8 +67,6 @@ cdef class ArrowSchemaImpl: _check_nanoarrow(ArrowSchemaViewInit(&view, schema, NULL)) if view.type != NANOARROW_TYPE_LIST or schema.name != b"values": return False - _check_nanoarrow(ArrowSchemaViewInit(&view, schema.children[0], NULL)) - self._set_child_arrow_type(view.type) return True cdef int _set_child_arrow_type(self, ArrowType child_arrow_type) except -1: @@ -122,7 +120,10 @@ cdef class ArrowSchemaImpl: """ Populate the schema from another schema. """ - cdef ArrowSchemaView schema_view + cdef: + ArrowSchemaView schema_view + ArrowSchemaImpl schema_impl + int64_t i ArrowSchemaMove(schema, self.arrow_schema) memset(&schema_view, 0, sizeof(ArrowSchemaView)) _check_nanoarrow( @@ -133,7 +134,25 @@ cdef class ArrowSchemaImpl: self.precision = schema_view.decimal_precision self.scale = schema_view.decimal_scale self.fixed_size = schema_view.fixed_size - if schema_view.type == NANOARROW_TYPE_TIMESTAMP: + if schema_view.type == NANOARROW_TYPE_STRUCT: + + # struct may refer to a sparse vector + if self._is_sparse_vector(): + _check_nanoarrow( + ArrowSchemaViewInit(&schema_view, + schema.children[2].children[0], NULL) + ) + self._set_child_arrow_type(schema_view.type) + + # otherwise, it is treated as a list of columns such as those used + # for a requested schema + else: + self.child_schemas = [] + for i in range(schema.n_children): + schema_impl = ArrowSchemaImpl.__new__(ArrowSchemaImpl) + schema_impl.populate_from_schema(schema.children[i]) + self.child_schemas.append(schema_impl) + elif schema_view.type == NANOARROW_TYPE_TIMESTAMP: self._set_time_unit(schema_view.time_unit) elif schema_view.type == NANOARROW_TYPE_DATE64: self._set_time_unit(NANOARROW_TIME_UNIT_MILLI) @@ -148,28 +167,25 @@ cdef class ArrowSchemaImpl: ) self._set_child_arrow_type(schema_view.type) elif schema_view.type not in ( - NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_BOOL, - NANOARROW_TYPE_DECIMAL128, - NANOARROW_TYPE_DATE32, - NANOARROW_TYPE_DATE64, - NANOARROW_TYPE_DOUBLE, - NANOARROW_TYPE_FIXED_SIZE_BINARY, - NANOARROW_TYPE_FLOAT, - NANOARROW_TYPE_INT8, - NANOARROW_TYPE_INT16, - NANOARROW_TYPE_INT32, - NANOARROW_TYPE_INT64, - NANOARROW_TYPE_LARGE_BINARY, - NANOARROW_TYPE_LARGE_STRING, - NANOARROW_TYPE_STRING, - NANOARROW_TYPE_UINT8, - NANOARROW_TYPE_UINT16, - NANOARROW_TYPE_UINT32, - NANOARROW_TYPE_UINT64, - ) and not ( - schema_view.type == NANOARROW_TYPE_STRUCT - and self._is_sparse_vector() + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_BOOL, + NANOARROW_TYPE_DATE32, + NANOARROW_TYPE_DATE64, + NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING, + NANOARROW_TYPE_STRING, + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, ): errors._raise_err(errors.ERR_ARROW_UNSUPPORTED_DATA_FORMAT, schema_format=schema.format.decode()) diff --git a/src/oracledb/impl/arrow/utils.pyx b/src/oracledb/impl/arrow/utils.pyx index 5773151f..67d7374b 100644 --- a/src/oracledb/impl/arrow/utils.pyx +++ b/src/oracledb/impl/arrow/utils.pyx @@ -90,6 +90,7 @@ cdef extern from "nanoarrow.c": double value) ArrowErrorCode ArrowArrayAppendInt(ArrowArray* arrow_array, int64_t value) ArrowErrorCode ArrowArrayAppendNull(ArrowArray* arrow_array, int64_t n) + ArrowErrorCode ArrowArrayAppendUInt(ArrowArray * arrow_array, uint64_t n) ArrowBuffer* ArrowArrayBuffer(ArrowArray* arrow_array, int64_t i) ArrowErrorCode ArrowArrayFinishBuildingDefault(ArrowArray* arrow_array, ArrowError* error) diff --git a/src/oracledb/impl/base/buffer.pyx b/src/oracledb/impl/base/buffer.pyx index ac2bfd2c..dd72f900 100644 --- a/src/oracledb/impl/base/buffer.pyx +++ b/src/oracledb/impl/base/buffer.pyx @@ -162,12 +162,15 @@ cdef class Buffer: """ return self._size - self._pos - cdef int read_oracle_data(self, OracleMetadata metadata, - OracleData* data, bint from_dbobject) except -1: + cdef object read_oracle_data(self, OracleMetadata metadata, + OracleData* data, bint from_dbobject, + bint decode_str): """ Reads Oracle data of the given type from the buffer. """ cdef: + const char *encoding_errors = NULL + bytes temp_bytes = None uint8_t ora_type_num const uint8_t* ptr ssize_t num_bytes @@ -188,8 +191,15 @@ cdef class Buffer: ORA_TYPE_NUM_RAW, ORA_TYPE_NUM_VARCHAR, ): + if decode_str and metadata.dbtype._csfrm == CS_FORM_NCHAR: + temp_bytes = \ + ptr[:num_bytes].decode(ENCODING_UTF16, + encoding_errors).encode() + ptr = temp_bytes + num_bytes = len(temp_bytes) data.buffer.as_raw_bytes.ptr = ptr data.buffer.as_raw_bytes.num_bytes = num_bytes + return temp_bytes elif ora_type_num in ( ORA_TYPE_NUM_DATE, ORA_TYPE_NUM_TIMESTAMP, diff --git a/src/oracledb/impl/base/converters.pyx b/src/oracledb/impl/base/converters.pyx index 427a8377..27ed64dd 100644 --- a/src/oracledb/impl/base/converters.pyx +++ b/src/oracledb/impl/base/converters.pyx @@ -161,6 +161,22 @@ cdef cydatetime.datetime convert_date_to_python(OracleDataBuffer *buffer): return output +cdef int convert_date_to_arrow_date32(ArrowArrayImpl array_impl, + OracleDataBuffer *buffer) except -1: + """ + Converts a DATE, TIMESTAMP, TIMESTAMP WITH LOCAL TIME ZONE or TIMESTMP + WITH TIMEZONE value stored in the buffer to Arrow date32. + """ + cdef: + cydatetime.timedelta td + cydatetime.datetime dt + int32_t days + dt = convert_date_to_python(buffer) + td = dt - EPOCH_DATE + days = td.days + array_impl.append_int(days) + + cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ @@ -174,7 +190,7 @@ cdef int convert_date_to_arrow_timestamp(ArrowArrayImpl array_impl, dt = convert_date_to_python(buffer) td = dt - EPOCH_DATE ts = int(cydatetime.total_seconds(td) * array_impl.schema_impl.time_factor) - array_impl.append_int64(ts) + array_impl.append_int(ts) cdef object convert_interval_ds_to_python(OracleDataBuffer *buffer): @@ -268,10 +284,29 @@ cdef int convert_number_to_arrow_double(ArrowArrayImpl array_impl, array_impl.append_double(double_value) -cdef int convert_number_to_arrow_int64(ArrowArrayImpl array_impl, +cdef int convert_number_to_arrow_float(ArrowArrayImpl array_impl, OracleDataBuffer *buffer) except -1: """ - Converts a NUMBER value stored in the buffer to Arrow INT64. + Converts a NUMBER value stored in the buffer to Arrow float + """ + cdef: + OracleNumber *value = &buffer.as_number + float float_value + if value.is_max_negative_value: + array_impl.append_float(-1.0e126) + else: + errno.errno = 0 + float_value = strtof(( value.chars), NULL) + if errno.errno != 0: + errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_FLOAT, + value=value.chars[:value.num_chars].decode()) + array_impl.append_float(float_value) + + +cdef int convert_number_to_arrow_int(ArrowArrayImpl array_impl, + OracleDataBuffer *buffer) except -1: + """ + Converts a NUMBER value stored in the buffer to an Arrow integer. """ cdef: OracleNumber *value = &buffer.as_number @@ -281,7 +316,23 @@ cdef int convert_number_to_arrow_int64(ArrowArrayImpl array_impl, if errno.errno != 0: errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_INTEGER, value=value.chars[:value.num_chars].decode()) - array_impl.append_int64(int64_value) + array_impl.append_int(int64_value) + + +cdef int convert_number_to_arrow_uint(ArrowArrayImpl array_impl, + OracleDataBuffer *buffer) except -1: + """ + Converts a NUMBER value stored in the buffer to an Arrow unsigned integer. + """ + cdef: + OracleNumber *value = &buffer.as_number + uint64_t uint64_value + errno.errno = 0 + uint64_value = strtoull(( value.chars), NULL, 0) + if errno.errno != 0: + errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_INTEGER, + value=value.chars[:value.num_chars].decode()) + array_impl.append_uint(uint64_value) cdef object convert_number_to_python_decimal(OracleDataBuffer *buffer): @@ -344,6 +395,16 @@ cdef int convert_bytes_to_oracle_data(OracleDataBuffer *buffer, cpython.PyBytes_AsStringAndSize(value, &rb.ptr, &rb.num_bytes) +cdef int convert_str_to_arrow(ArrowArrayImpl array_impl, + OracleDataBuffer *buffer) except -1: + """ + Converts a CHAR, NCHAR, LONG, VARCHAR, or NVARCHAR value stored in the + buffer to an Arrow string. + """ + cdef OracleRawBytes *rb = &buffer.as_raw_bytes + array_impl.append_bytes( rb.ptr, rb.num_bytes) + + cdef object convert_str_to_python(OracleDataBuffer *buffer, uint8_t csfrm, const char* encoding_errors): """ @@ -366,7 +427,6 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, cdef: ArrowType arrow_type uint32_t db_type_num - OracleRawBytes* rb # NULL values if data.is_null: @@ -374,27 +434,51 @@ cdef int convert_oracle_data_to_arrow(OracleMetadata from_metadata, arrow_type = to_metadata._schema_impl.arrow_type db_type_num = from_metadata.dbtype.num - if arrow_type == NANOARROW_TYPE_INT64: - convert_number_to_arrow_int64(array_impl, &data.buffer) + if arrow_type in ( + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + ): + convert_number_to_arrow_int(array_impl, &data.buffer) + elif arrow_type in ( + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64, + ): + convert_number_to_arrow_uint(array_impl, &data.buffer) elif arrow_type == NANOARROW_TYPE_DOUBLE: if db_type_num == DB_TYPE_NUM_NUMBER: convert_number_to_arrow_double(array_impl, &data.buffer) + elif db_type_num == DB_TYPE_NUM_BINARY_FLOAT: + array_impl.append_float(data.buffer.as_float) else: array_impl.append_double(data.buffer.as_double) elif arrow_type == NANOARROW_TYPE_FLOAT: - array_impl.append_float(data.buffer.as_float) + if db_type_num == DB_TYPE_NUM_NUMBER: + convert_number_to_arrow_float(array_impl, &data.buffer) + elif db_type_num == DB_TYPE_NUM_BINARY_DOUBLE: + array_impl.append_double(data.buffer.as_double) + else: + array_impl.append_float(data.buffer.as_float) elif arrow_type == NANOARROW_TYPE_BOOL: - array_impl.append_int64(data.buffer.as_bool) + array_impl.append_int(data.buffer.as_bool) elif arrow_type in ( - NANOARROW_TYPE_BINARY, - NANOARROW_TYPE_STRING, - NANOARROW_TYPE_LARGE_BINARY, - NANOARROW_TYPE_LARGE_STRING + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_STRING, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_LARGE_BINARY, + NANOARROW_TYPE_LARGE_STRING + ): + convert_str_to_arrow(array_impl, &data.buffer) + elif arrow_type in ( + NANOARROW_TYPE_DATE64, + NANOARROW_TYPE_TIMESTAMP ): - rb = &data.buffer.as_raw_bytes - array_impl.append_bytes( rb.ptr, rb.num_bytes) - elif arrow_type == NANOARROW_TYPE_TIMESTAMP: convert_date_to_arrow_timestamp(array_impl, &data.buffer) + elif arrow_type == NANOARROW_TYPE_DATE32: + convert_date_to_arrow_date32(array_impl, &data.buffer) elif arrow_type == NANOARROW_TYPE_DECIMAL128: convert_number_to_arrow_decimal(array_impl, &data.buffer) diff --git a/src/oracledb/impl/base/cursor.pyx b/src/oracledb/impl/base/cursor.pyx index e4ea5110..b78e7d74 100644 --- a/src/oracledb/impl/base/cursor.pyx +++ b/src/oracledb/impl/base/cursor.pyx @@ -152,6 +152,7 @@ cdef class BaseCursorImpl: adjustments. """ cdef: + ArrowSchemaImpl schema_impl object var, pub_metadata BaseConnImpl conn_impl BaseVarImpl var_impl @@ -191,7 +192,18 @@ cdef class BaseCursorImpl: # otherwise, create a new variable using the provided fetch metadata var_impl = self._create_var_impl(conn) var_impl.num_elements = self._fetch_array_size - var_impl.metadata = metadata.copy() + if self.schema_impl is not None: + schema_impl = self.schema_impl.child_schemas[pos] + metadata.check_convert_to_arrow(schema_impl) + var_impl.metadata = OracleMetadata.from_arrow_schema(schema_impl) + if metadata.dbtype.num in (DB_TYPE_NUM_NUMBER, + DB_TYPE_NUM_BINARY_DOUBLE, + DB_TYPE_NUM_BINARY_FLOAT) \ + and schema_impl.arrow_type in (NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FLOAT): + var_impl.metadata.dbtype = metadata.dbtype + else: + var_impl.metadata = metadata.copy() var_impl._fetch_metadata = metadata # adjust the variable based on the defaults specified by the user, if diff --git a/src/oracledb/impl/base/metadata.pyx b/src/oracledb/impl/base/metadata.pyx index 63724e8d..1cb17a6f 100644 --- a/src/oracledb/impl/base/metadata.pyx +++ b/src/oracledb/impl/base/metadata.pyx @@ -136,6 +136,16 @@ cdef class OracleMetadata: Sets an Arrow schema, which checks to see that the Arrow type is compatible with the database type. """ + self.check_convert_from_arrow(schema_impl) + self._finalize_init() + self._schema_impl = schema_impl + + cdef int check_convert_from_arrow(self, + ArrowSchemaImpl schema_impl) except -1: + """ + Check that the conversion from the Arrow type to the database type is + supported. + """ cdef: ArrowType arrow_type = schema_impl.arrow_type uint32_t db_type_num = self.dbtype.num @@ -189,12 +199,88 @@ cdef class OracleMetadata: ok = True if not ok: - errors._raise_err(errors.ERR_UNSUPPORTED_ARROW_TYPE_FOR_DB_TYPE, + errors._raise_err(errors.ERR_CANNOT_CONVERT_FROM_ARROW_TYPE, arrow_type=schema_impl.get_type_name(), db_type=self.dbtype.name) - self._finalize_init() - self._schema_impl = schema_impl + cdef int check_convert_to_arrow(self, + ArrowSchemaImpl schema_impl) except -1: + """ + Check that the conversion to the Arrow type from the database type is + supported. + """ + cdef: + ArrowType arrow_type = schema_impl.arrow_type + uint32_t db_type_num = self.dbtype.num + bint ok = False + + if db_type_num == DB_TYPE_NUM_NUMBER: + if arrow_type in ( + NANOARROW_TYPE_DECIMAL128, + NANOARROW_TYPE_DOUBLE, + NANOARROW_TYPE_FLOAT, + NANOARROW_TYPE_INT8, + NANOARROW_TYPE_INT16, + NANOARROW_TYPE_INT32, + NANOARROW_TYPE_INT64, + NANOARROW_TYPE_UINT8, + NANOARROW_TYPE_UINT16, + NANOARROW_TYPE_UINT32, + NANOARROW_TYPE_UINT64 + ): + ok = True + elif db_type_num in ( + DB_TYPE_NUM_BLOB, + DB_TYPE_NUM_RAW, + DB_TYPE_NUM_LONG_RAW + ): + if arrow_type in ( + NANOARROW_TYPE_BINARY, + NANOARROW_TYPE_FIXED_SIZE_BINARY, + NANOARROW_TYPE_LARGE_BINARY + ): + ok = True + elif db_type_num == DB_TYPE_NUM_BOOLEAN: + if arrow_type == NANOARROW_TYPE_BOOL: + ok = True + elif db_type_num in ( + DB_TYPE_NUM_DATE, + DB_TYPE_NUM_TIMESTAMP, + DB_TYPE_NUM_TIMESTAMP_LTZ, + DB_TYPE_NUM_TIMESTAMP_TZ + ): + if arrow_type in ( + NANOARROW_TYPE_DATE32, + NANOARROW_TYPE_DATE64, + NANOARROW_TYPE_TIMESTAMP + ): + ok = True + elif db_type_num in ( + DB_TYPE_NUM_BINARY_DOUBLE, + DB_TYPE_NUM_BINARY_FLOAT + ): + if arrow_type in (NANOARROW_TYPE_DOUBLE, NANOARROW_TYPE_FLOAT): + ok = True + elif db_type_num in ( + DB_TYPE_NUM_CHAR, + DB_TYPE_NUM_CLOB, + DB_TYPE_NUM_LONG_NVARCHAR, + DB_TYPE_NUM_LONG_VARCHAR, + DB_TYPE_NUM_VARCHAR, + DB_TYPE_NUM_NCHAR, + DB_TYPE_NUM_NCLOB, + DB_TYPE_NUM_NVARCHAR + ): + if arrow_type in ( + NANOARROW_TYPE_STRING, + NANOARROW_TYPE_LARGE_STRING + ): + ok = True + + if not ok: + errors._raise_err(errors.ERR_CANNOT_CONVERT_TO_ARROW_TYPE, + arrow_type=schema_impl.get_type_name(), + db_type=self.dbtype.name) cdef OracleMetadata copy(self): """ diff --git a/src/oracledb/impl/thick/odpi.pxd b/src/oracledb/impl/thick/odpi.pxd index e325f735..0f23a9e7 100644 --- a/src/oracledb/impl/thick/odpi.pxd +++ b/src/oracledb/impl/thick/odpi.pxd @@ -89,6 +89,7 @@ cdef extern from "impl/thick/odpi/embed/dpi.c": DPI_NATIVE_TYPE_FLOAT DPI_NATIVE_TYPE_INT64 DPI_NATIVE_TYPE_INTERVAL_DS + DPI_NATIVE_TYPE_INTERVAL_YM DPI_NATIVE_TYPE_JSON DPI_NATIVE_TYPE_JSON_ARRAY DPI_NATIVE_TYPE_JSON_OBJECT diff --git a/src/oracledb/impl/thick/var.pyx b/src/oracledb/impl/thick/var.pyx index 8bf6f84b..db69138f 100644 --- a/src/oracledb/impl/thick/var.pyx +++ b/src/oracledb/impl/thick/var.pyx @@ -426,39 +426,35 @@ cdef class ThickVarImpl(BaseVarImpl): """ cdef: dpiData *data = &self._data[pos] - uint32_t ora_type_num + uint32_t native_type_num + OracleNumber *as_number OracleData ora_data dpiBytes *as_bytes object vector ora_data.is_null = data.isNull if not data.isNull: - ora_type_num = self._fetch_metadata.dbtype.num - if ora_type_num == DPI_ORACLE_TYPE_NATIVE_DOUBLE: - ora_data.buffer.as_double = data.value.asDouble - elif ora_type_num == DPI_ORACLE_TYPE_NATIVE_FLOAT: + native_type_num = self.metadata.dbtype._native_num + if native_type_num == DPI_NATIVE_TYPE_FLOAT: ora_data.buffer.as_float = data.value.asFloat - elif ora_type_num == DPI_ORACLE_TYPE_BOOLEAN: + elif native_type_num == DPI_NATIVE_TYPE_DOUBLE: + ora_data.buffer.as_double = data.value.asDouble + elif native_type_num == DPI_NATIVE_TYPE_BOOLEAN: ora_data.buffer.as_bool = data.value.asBoolean - elif ora_type_num in ( - DPI_ORACLE_TYPE_CHAR, - DPI_ORACLE_TYPE_LONG_NVARCHAR, - DPI_ORACLE_TYPE_LONG_VARCHAR, - DPI_ORACLE_TYPE_LONG_RAW, - DPI_ORACLE_TYPE_NCHAR, - DPI_ORACLE_TYPE_NVARCHAR, - DPI_ORACLE_TYPE_RAW, - DPI_ORACLE_TYPE_VARCHAR, - ): + elif native_type_num == DPI_NATIVE_TYPE_BYTES: as_bytes = &data.value.asBytes; - ora_data.buffer.as_raw_bytes.ptr = \ - as_bytes.ptr; - ora_data.buffer.as_raw_bytes.num_bytes = as_bytes.length; - elif ora_type_num in ( - DPI_ORACLE_TYPE_DATE, - DPI_ORACLE_TYPE_TIMESTAMP, - DPI_ORACLE_TYPE_TIMESTAMP_LTZ, - DPI_ORACLE_TYPE_TIMESTAMP_TZ, - ): + if self._fetch_metadata.dbtype.num == DPI_ORACLE_TYPE_NUMBER: + as_number = &ora_data.buffer.as_number + as_number.is_max_negative_value = 0; + as_number.is_integer = \ + memchr(as_bytes.ptr, b'.', as_bytes.length) == NULL; + memcpy(as_number.chars, as_bytes.ptr, as_bytes.length) + as_number.chars[as_bytes.length] = 0 + as_number.num_chars = as_bytes.length + else: + ora_data.buffer.as_raw_bytes.ptr = \ + as_bytes.ptr; + ora_data.buffer.as_raw_bytes.num_bytes = as_bytes.length; + elif native_type_num == DPI_NATIVE_TYPE_TIMESTAMP: ora_data.buffer.as_date.year = data.value.asTimestamp.year; ora_data.buffer.as_date.month = data.value.asTimestamp.month; ora_data.buffer.as_date.day = data.value.asTimestamp.day; @@ -471,7 +467,7 @@ cdef class ThickVarImpl(BaseVarImpl): data.value.asTimestamp.tzHourOffset; ora_data.buffer.as_date.tz_minute_offset = \ data.value.asTimestamp.tzMinuteOffset; - elif ora_type_num == DPI_ORACLE_TYPE_INTERVAL_DS: + elif native_type_num == DPI_NATIVE_TYPE_INTERVAL_DS: ora_data.buffer.as_interval_ds.days = \ data.value.asIntervalDS.days; ora_data.buffer.as_interval_ds.hours = \ @@ -482,20 +478,12 @@ cdef class ThickVarImpl(BaseVarImpl): data.value.asIntervalDS.seconds; ora_data.buffer.as_interval_ds.fseconds = \ data.value.asIntervalDS.fseconds; - elif ora_type_num == DPI_ORACLE_TYPE_INTERVAL_YM: + elif native_type_num == DPI_NATIVE_TYPE_INTERVAL_YM: ora_data.buffer.as_interval_ym.years = \ data.value.asIntervalYM.years; ora_data.buffer.as_interval_ym.months = \ data.value.asIntervalYM.months; - elif ora_type_num == DPI_ORACLE_TYPE_NUMBER: - as_bytes = &data.value.asBytes; - ora_data.buffer.as_number.is_max_negative_value = 0; - ora_data.buffer.as_number.is_integer = \ - memchr(as_bytes.ptr, b'.', as_bytes.length) == NULL; - memcpy(ora_data.buffer.as_number.chars, as_bytes.ptr, - as_bytes.length + 1); - ora_data.buffer.as_number.num_chars = as_bytes.length; - elif ora_type_num == DPI_ORACLE_TYPE_VECTOR: + elif native_type_num == DPI_NATIVE_TYPE_VECTOR: vector = _convert_vector_to_python(data.value.asVector) return convert_vector_to_arrow(self._arrow_array, vector) else: diff --git a/src/oracledb/impl/thin/dbobject.pyx b/src/oracledb/impl/thin/dbobject.pyx index 5b3e48da..078eafdd 100644 --- a/src/oracledb/impl/thin/dbobject.pyx +++ b/src/oracledb/impl/thin/dbobject.pyx @@ -391,7 +391,8 @@ cdef class ThinDbObjectImpl(BaseDbObjectImpl): else: obj_impl._unpack_data_from_buf(buf) return PY_TYPE_DB_OBJECT._from_impl(obj_impl) - buf.read_oracle_data(metadata, &data, from_dbobject=True) + buf.read_oracle_data(metadata, &data, from_dbobject=True, + decode_str=False) if metadata.dbtype._csfrm == CS_FORM_NCHAR: conn_impl = self.type._conn_impl conn_impl._protocol._caps._check_ncharset_id() diff --git a/src/oracledb/impl/thin/messages/base.pyx b/src/oracledb/impl/thin/messages/base.pyx index ec83e782..39d4eae4 100644 --- a/src/oracledb/impl/thin/messages/base.pyx +++ b/src/oracledb/impl/thin/messages/base.pyx @@ -971,12 +971,14 @@ cdef class MessageWithData(Message): # variables in order to take the new type handler into account conn = self.cursor.connection type_handler = cursor_impl._get_output_type_handler(&uses_metadata) - if type_handler is not statement._last_output_type_handler: + if type_handler is not statement._last_output_type_handler \ + or cursor_impl.schema_impl is not statement._last_schema_impl: for i, var_impl in enumerate(cursor_impl.fetch_var_impls): cursor_impl._create_fetch_var(conn, self.cursor, type_handler, uses_metadata, i, var_impl._fetch_metadata) statement._last_output_type_handler = type_handler + statement._last_schema_impl = cursor_impl.schema_impl # create Arrow arrays if fetching arrow is enabled if cursor_impl.fetching_arrow: @@ -1072,7 +1074,10 @@ cdef class MessageWithData(Message): else: column_value = PY_TYPE_DB_OBJECT._from_impl(obj_impl) else: - buf.read_oracle_data(metadata, &data, from_dbobject=False) + column_value = buf.read_oracle_data( + metadata, &data, from_dbobject=False, + decode_str=self.cursor_impl.fetching_arrow + ) if metadata.dbtype._csfrm == CS_FORM_NCHAR: buf._caps._check_ncharset_id() if self.cursor_impl.fetching_arrow: @@ -1147,6 +1152,7 @@ cdef class MessageWithData(Message): stmt._fetch_var_impls = cursor_impl.fetch_var_impls stmt._num_columns = cursor_impl._num_columns stmt._last_output_type_handler = type_handler + stmt._last_schema_impl = cursor_impl.schema_impl cdef int _process_error_info(self, ReadBuffer buf) except -1: cdef: diff --git a/src/oracledb/impl/thin/statement.pyx b/src/oracledb/impl/thin/statement.pyx index 3e926dfa..abdd25f7 100644 --- a/src/oracledb/impl/thin/statement.pyx +++ b/src/oracledb/impl/thin/statement.pyx @@ -299,6 +299,7 @@ cdef class Statement: list _fetch_var_impls object _bind_info_dict object _last_output_type_handler + ArrowSchemaImpl _last_schema_impl uint32_t _num_columns bint _executed bint _binds_changed diff --git a/src/oracledb/thick_impl.pyx b/src/oracledb/thick_impl.pyx index 72d52b62..54b9e168 100644 --- a/src/oracledb/thick_impl.pyx +++ b/src/oracledb/thick_impl.pyx @@ -77,6 +77,7 @@ from .base_impl cimport ( ENCODING_UTF8, OracleData, OracleMetadata, + OracleNumber, PURITY_DEFAULT, PY_TYPE_DATE, PY_TYPE_DATETIME, diff --git a/src/oracledb/thin_impl.pyx b/src/oracledb/thin_impl.pyx index 44ecb189..3fbcca48 100644 --- a/src/oracledb/thin_impl.pyx +++ b/src/oracledb/thin_impl.pyx @@ -199,7 +199,7 @@ from .base_impl import ( DB_TYPE_XMLTYPE, ) -from .arrow_impl cimport ArrowArrayImpl, DataFrameImpl +from .arrow_impl cimport ArrowArrayImpl, ArrowSchemaImpl, DataFrameImpl ctypedef unsigned char char_type diff --git a/tests/test_8000_dataframe.py b/tests/test_8000_dataframe.py index 7193c33e..4948bd93 100644 --- a/tests/test_8000_dataframe.py +++ b/tests/test_8000_dataframe.py @@ -1921,3 +1921,19 @@ def test_8077(conn, test_env): ) fetched_df = pyarrow.table(ora_df).to_pandas() assert [data] == test_env.get_data_from_df(fetched_df) + + +def test_8078(conn, test_env): + "8078 - test fetching NCHAR and NVARCHAR data" + value = "test_8078" + value_len = len(value) + ora_df = conn.fetch_df_all( + f""" + select + cast('{value}' as nchar({value_len})), + cast('{value}' as nvarchar2({value_len})) + from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert test_env.get_data_from_df(fetched_df) == [(value, value)] diff --git a/tests/test_8100_dataframe_async.py b/tests/test_8100_dataframe_async.py index 0b6762aa..00e9c651 100644 --- a/tests/test_8100_dataframe_async.py +++ b/tests/test_8100_dataframe_async.py @@ -1696,3 +1696,19 @@ async def test_8166(async_conn, test_env): ) fetched_df = pyarrow.table(ora_df).to_pandas() assert [data] == test_env.get_data_from_df(fetched_df) + + +async def test_8167(conn, test_env): + "8167 - test fetching NCHAR and NVARCHAR data" + value = "test_8167" + value_len = len(value) + ora_df = conn.fetch_df_all( + f""" + select + cast('{value}' as nchar({value_len})), + cast('{value}' as nvarchar2({value_len})) + from dual + """ + ) + fetched_df = pyarrow.table(ora_df).to_pandas() + assert test_env.get_data_from_df(fetched_df) == [(value, value)] diff --git a/tests/test_9300_dataframe_requested_schema.py b/tests/test_9300_dataframe_requested_schema.py new file mode 100644 index 00000000..97ff82b0 --- /dev/null +++ b/tests/test_9300_dataframe_requested_schema.py @@ -0,0 +1,721 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing user requested schema in fetch_df APIs +""" + +import datetime + +import pyarrow +import pytest + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +def test_9300(dtype, conn): + "9300 - fetch_df_all() with fixed width integer types" + statement = "select 1 from dual" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == dtype + assert tab["INT_COL"][0].as_py() == 1 + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +def test_9301(dtype, conn): + "9301 - fetch_df_all() with duplicate fixed width integer types" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + ora_df = conn.fetch_df_all( + """ + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert len(tab) == 6 + assert tab.field("INT_COL").type == dtype + for value in tab["INT_COL"]: + assert value.as_py() == 99 + + +def test_9302(conn): + "9302 - fetch_df_all() requested_schema honored for repeated execution" + statement = "select 1 as int_col from dual" + ora_df = conn.fetch_df_all(statement) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + requested_schema = pyarrow.schema([("INT_COL", pyarrow.int8())]) + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.int8() + ora_df = conn.fetch_df_all(statement) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +def test_9303(dtype, conn): + "9303 - fetch_df_batches() with fixed width integer types" + statement = "select 1 from dual" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + for ora_df in conn.fetch_df_batches( + statement, requested_schema=requested_schema + ): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == dtype + assert tab["INT_COL"][0].as_py() == 1 + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +def test_9304(dtype, conn): + "9304 - fetch_df_batches() with duplicate fixed width integer types" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + for ora_df in conn.fetch_df_batches( + """ + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + """, + requested_schema=requested_schema, + ): + tab = pyarrow.table(ora_df) + assert len(tab) == 6 + assert tab.field("INT_COL").type == dtype + for value in tab["INT_COL"]: + assert value.as_py() == 99 + + +def test_9305(conn): + "9305 - fetch_df_batches() requested_schema honored for repeated execution" + statement = "select 1 as int_col from dual" + for ora_df in conn.fetch_df_batches(statement): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + requested_schema = pyarrow.schema([("INT_COL", pyarrow.int8())]) + for ora_df in conn.fetch_df_batches( + statement, requested_schema=requested_schema + ): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.int8() + for ora_df in conn.fetch_df_batches(statement): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.decimal128(precision=3, scale=2), + pyarrow.float32(), + pyarrow.float64(), + ], +) +def test_9306(dtype, conn): + "9306 - fetch_df_all() for NUMBER" + value = 2.75 + requested_schema = pyarrow.schema([("DECIMAL_COL", dtype)]) + statement = f"select {value} from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("DECIMAL_COL").type == dtype + assert tab["DECIMAL_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +def test_9307(dtype, conn): + "9307 - fetch_df_all() for BINARY_DOUBLE" + value = 123.25 + requested_schema = pyarrow.schema([("BINARY_DOUBLE_COL", dtype)]) + statement = f"select to_binary_double({value}) from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_DOUBLE_COL").type == dtype + assert tab["BINARY_DOUBLE_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +def test_9308(dtype, conn): + "9308 - fetch_df_all() for BINARY_FLOAT" + value = 123.625 + requested_schema = pyarrow.schema([("BINARY_FLOAT_COL", dtype)]) + statement = f"select to_binary_float({value}) from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_FLOAT_COL").type == dtype + assert tab["BINARY_FLOAT_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +def test_9309(dtype, conn): + "9309 - fetch_df_all() for RAW" + value = "ABCDEF" + requested_schema = pyarrow.schema([("RAW_COL", dtype)]) + statement = f"select utl_raw.cast_to_raw('{value}') as raw_col from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("RAW_COL").type == dtype + assert tab["RAW_COL"][0].as_py() == value.encode() + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9310(dtype, value_is_date, conn): + "9310 - fetch_df_all() for DATE" + requested_schema = pyarrow.schema([("DATE_COL", dtype)]) + value = datetime.datetime(2025, 2, 18) + statement = "select cast(:1 as date) from dual" + ora_df = conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("DATE_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["DATE_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9311(dtype, value_is_date, conn): + "9311 - fetch_df_all() for TIMESTAMP" + requested_schema = pyarrow.schema([("TIMESTAMP_COL", dtype)]) + value = datetime.datetime(2025, 1, 15) + statement = "select cast(:1 as timestamp) from dual" + ora_df = conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9312(dtype, value_is_date, conn): + "9312 - fetch_df_all() for TIMESTAMP WITH LOCAL TIME ZONE" + requested_schema = pyarrow.schema([("TIMESTAMP_LTZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 4) + statement = "select cast(:1 as timestamp with local time zone) from dual" + ora_df = conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_LTZ_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_LTZ_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9313(dtype, value_is_date, conn): + "9313 - fetch_df_all() for TIMESTAMP WITH TIME ZONE" + requested_schema = pyarrow.schema([("TIMESTAMP_TZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 4) + statement = "select cast(:1 as timestamp with time zone) from dual" + ora_df = conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_TZ_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_TZ_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +def test_9314(dtype, conn): + "9314 - fetch_df_all() for BLOB" + value = "GHIJKL" + requested_schema = pyarrow.schema([("BLOB_COL", dtype)]) + statement = f"select to_blob(utl_raw.cast_to_raw('{value}')) from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("BLOB_COL").type == dtype + assert tab["BLOB_COL"][0].as_py() == value.encode() + + +@pytest.mark.parametrize( + "db_type_name", + ["CHAR", "NCHAR", "VARCHAR2", "NVARCHAR2"], +) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +def test_9315(db_type_name, dtype, conn): + "9315 - fetch_df_all() for string types" + value = "test_9315" + requested_schema = pyarrow.schema([("STRING_COL", dtype)]) + statement = f"select cast('{value}' as {db_type_name}(9)) from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("STRING_COL").type == dtype + assert tab["STRING_COL"][0].as_py() == value + + +@pytest.mark.parametrize("db_type_name", ["CLOB", "NCLOB"]) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +def test_9316(db_type_name, dtype, conn): + "9316 - fetch_df_all() for CLOB types" + value = "test_9316" + requested_schema = pyarrow.schema([("CLOB_COL", dtype)]) + statement = f"select to_{db_type_name.lower()}('{value}') from dual" + ora_df = conn.fetch_df_all(statement, requested_schema=requested_schema) + tab = pyarrow.table(ora_df) + assert tab.field("CLOB_COL").type == dtype + assert tab["CLOB_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.decimal128(precision=3, scale=2), + pyarrow.float32(), + pyarrow.float64(), + ], +) +def test_9317(dtype, conn): + "9317 - fetch_df_all() for NUMBER duplicate values" + value = 93.25 + requested_schema = pyarrow.schema([("DECIMAL_COL", dtype)]) + ora_df = conn.fetch_df_all( + f""" + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("DECIMAL_COL").type == dtype + for fetched_value in tab["DECIMAL_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +def test_9318(dtype, conn): + "9318 - fetch_df_all() for BINARY_DOUBLE duplicate values" + value = 523.75 + requested_schema = pyarrow.schema([("BINARY_DOUBLE_COL", dtype)]) + ora_df = conn.fetch_df_all( + f""" + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_DOUBLE_COL").type == dtype + for fetched_value in tab["BINARY_DOUBLE_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +def test_9319(dtype, conn): + "9319 - fetch_df_all() for BINARY_FLOAT duplicate values" + value = 9308.125 + requested_schema = pyarrow.schema([("BINARY_FLOAT_COL", dtype)]) + ora_df = conn.fetch_df_all( + f""" + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_FLOAT_COL").type == dtype + for fetched_value in tab["BINARY_FLOAT_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +def test_9320(dtype, conn): + "9320 - fetch_df_all() for RAW duplicate values" + value = "A23456" + requested_schema = pyarrow.schema([("RAW_COL", dtype)]) + ora_df = conn.fetch_df_all( + f""" + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("RAW_COL").type == dtype + for fetched_value in tab["RAW_COL"]: + assert fetched_value.as_py() == value.encode() + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9321(dtype, value_is_date, conn): + "9321 - fetch_df_all() for DATE duplicate values" + requested_schema = pyarrow.schema([("DATE_COL", dtype)]) + value = datetime.datetime(2025, 3, 1) + parameters = dict(value=value) + ora_df = conn.fetch_df_all( + """ + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("DATE_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["DATE_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9322(dtype, value_is_date, conn): + "9322 - fetch_df_all() for TIMESTAMP duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_COL", dtype)]) + value = datetime.datetime(2025, 1, 14) + parameters = dict(value=value) + ora_df = conn.fetch_df_all( + """ + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9323(dtype, value_is_date, conn): + "9323 - fetch_df_all() for TIMESTAMP WITH LOCAL TIME ZONE duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_LTZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 6) + parameters = dict(value=value) + ora_df = conn.fetch_df_all( + """ + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_LTZ_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_LTZ_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +def test_9324(dtype, value_is_date, conn): + "9324 - fetch_df_all() for TIMESTAMP WITH TIME ZONE duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_TZ_COL", dtype)]) + value = datetime.datetime(2025, 2, 28) + parameters = dict(value=value) + ora_df = conn.fetch_df_all( + """ + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_TZ_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_TZ_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "db_type_name", + ["CHAR", "NCHAR", "VARCHAR2", "NVARCHAR2"], +) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +def test_9325(db_type_name, dtype, conn): + "9325 - fetch_df_all() for string types duplicate values" + value = "test_9325" + requested_schema = pyarrow.schema([("STRING_COL", dtype)]) + ora_df = conn.fetch_df_all( + f""" + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("STRING_COL").type == dtype + for fetched_value in tab["STRING_COL"]: + assert fetched_value.as_py() == value diff --git a/tests/test_9400_dataframe_requested_schema_async.py b/tests/test_9400_dataframe_requested_schema_async.py new file mode 100644 index 00000000..67f8f031 --- /dev/null +++ b/tests/test_9400_dataframe_requested_schema_async.py @@ -0,0 +1,744 @@ +# ----------------------------------------------------------------------------- +# Copyright (c) 2025, Oracle and/or its affiliates. +# +# This software is dual-licensed to you under the Universal Permissive License +# (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl and Apache License +# 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose +# either license. +# +# If you elect to accept the software under the Apache License, Version 2.0, +# the following applies: +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ----------------------------------------------------------------------------- + +""" +Module for testing user requested schema in fetch_df APIs using asyncio. +""" + +import datetime + +import pyarrow +import pytest + + +@pytest.fixture(autouse=True) +def module_checks(anyio_backend, skip_unless_thin_mode): + pass + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +async def test_9400(dtype, async_conn): + "9400 - fetch_df_all() with fixed width integer types" + statement = "select 1 from dual" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == dtype + assert tab["INT_COL"][0].as_py() == 1 + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +async def test_9401(dtype, async_conn): + "9401 - fetch_df_all() with duplicate fixed width integer types" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + """ + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert len(tab) == 6 + assert tab.field("INT_COL").type == dtype + for value in tab["INT_COL"]: + assert value.as_py() == 99 + + +async def test_9402(async_conn): + "9402 - fetch_df_all() requested_schema honored for repeated execution" + statement = "select 1 as int_col from dual" + ora_df = await async_conn.fetch_df_all(statement) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + requested_schema = pyarrow.schema([("INT_COL", pyarrow.int8())]) + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.int8() + ora_df = await async_conn.fetch_df_all(statement) + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +async def test_9403(dtype, async_conn): + "9403 - fetch_df_batches() with fixed width integer types" + statement = "select 1 from dual" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + async for ora_df in async_conn.fetch_df_batches( + statement, requested_schema=requested_schema + ): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == dtype + assert tab["INT_COL"][0].as_py() == 1 + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.int8(), + pyarrow.int16(), + pyarrow.int32(), + pyarrow.int64(), + pyarrow.uint8(), + pyarrow.uint16(), + pyarrow.uint32(), + pyarrow.uint64(), + ], +) +async def test_9404(dtype, async_conn): + "9404 - fetch_df_batches() with duplicate fixed width integer types" + requested_schema = pyarrow.schema([("INT_COL", dtype)]) + async for ora_df in async_conn.fetch_df_batches( + """ + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + union all + select 99 from dual + """, + requested_schema=requested_schema, + ): + tab = pyarrow.table(ora_df) + assert len(tab) == 6 + assert tab.field("INT_COL").type == dtype + for value in tab["INT_COL"]: + assert value.as_py() == 99 + + +async def test_9405(async_conn): + "9405 - fetch_df_batches() requested_schema honored for repeated execution" + statement = "select 1 as int_col from dual" + async for ora_df in async_conn.fetch_df_batches(statement): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + requested_schema = pyarrow.schema([("INT_COL", pyarrow.int8())]) + async for ora_df in async_conn.fetch_df_batches( + statement, requested_schema=requested_schema + ): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.int8() + async for ora_df in async_conn.fetch_df_batches(statement): + tab = pyarrow.table(ora_df) + assert tab.field("INT_COL").type == pyarrow.float64() + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.decimal128(precision=3, scale=2), + pyarrow.float32(), + pyarrow.float64(), + ], +) +async def test_9406(dtype, async_conn): + "9406 - fetch_df_all() for NUMBER" + value = 2.75 + requested_schema = pyarrow.schema([("DECIMAL_COL", dtype)]) + statement = f"select {value} from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("DECIMAL_COL").type == dtype + assert tab["DECIMAL_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +async def test_9407(dtype, async_conn): + "9407 - fetch_df_all() for BINARY_DOUBLE" + value = 123.25 + requested_schema = pyarrow.schema([("BINARY_DOUBLE_COL", dtype)]) + statement = f"select to_binary_double({value}) from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_DOUBLE_COL").type == dtype + assert tab["BINARY_DOUBLE_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +async def test_9408(dtype, async_conn): + "9408 - fetch_df_all() for BINARY_FLOAT" + value = 123.625 + requested_schema = pyarrow.schema([("BINARY_FLOAT_COL", dtype)]) + statement = f"select to_binary_float({value}) from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_FLOAT_COL").type == dtype + assert tab["BINARY_FLOAT_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +async def test_9409(dtype, async_conn): + "9409 - fetch_df_all() for RAW" + value = "ABCDEF" + requested_schema = pyarrow.schema([("RAW_COL", dtype)]) + statement = f"select utl_raw.cast_to_raw('{value}') as raw_col from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("RAW_COL").type == dtype + assert tab["RAW_COL"][0].as_py() == value.encode() + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9410(dtype, value_is_date, async_conn): + "9410 - fetch_df_all() for DATE" + requested_schema = pyarrow.schema([("DATE_COL", dtype)]) + value = datetime.datetime(2025, 2, 18) + statement = "select cast(:1 as date) from dual" + ora_df = await async_conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("DATE_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["DATE_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9411(dtype, value_is_date, async_conn): + "9411 - fetch_df_all() for TIMESTAMP" + requested_schema = pyarrow.schema([("TIMESTAMP_COL", dtype)]) + value = datetime.datetime(2025, 1, 15) + statement = "select cast(:1 as timestamp) from dual" + ora_df = await async_conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9412(dtype, value_is_date, async_conn): + "9412 - fetch_df_all() for TIMESTAMP WITH LOCAL TIME ZONE" + requested_schema = pyarrow.schema([("TIMESTAMP_LTZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 4) + statement = "select cast(:1 as timestamp with local time zone) from dual" + ora_df = await async_conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_LTZ_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_LTZ_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9413(dtype, value_is_date, async_conn): + "9413 - fetch_df_all() for TIMESTAMP WITH TIME ZONE" + requested_schema = pyarrow.schema([("TIMESTAMP_TZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 4) + statement = "select cast(:1 as timestamp with time zone) from dual" + ora_df = await async_conn.fetch_df_all( + statement, [value], requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_TZ_COL").type == dtype + if value_is_date: + value = value.date() + assert tab["TIMESTAMP_TZ_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +async def test_9414(dtype, async_conn): + "9414 - fetch_df_all() for BLOB" + value = "GHIJKL" + requested_schema = pyarrow.schema([("BLOB_COL", dtype)]) + statement = f"select to_blob(utl_raw.cast_to_raw('{value}')) from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("BLOB_COL").type == dtype + assert tab["BLOB_COL"][0].as_py() == value.encode() + + +@pytest.mark.parametrize( + "db_type_name", + ["CHAR", "NCHAR", "VARCHAR2", "NVARCHAR2"], +) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +async def test_9415(db_type_name, dtype, async_conn): + "9415 - fetch_df_all() for string types" + value = "test_9415" + requested_schema = pyarrow.schema([("STRING_COL", dtype)]) + statement = f"select cast('{value}' as {db_type_name}(9)) from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("STRING_COL").type == dtype + assert tab["STRING_COL"][0].as_py() == value + + +@pytest.mark.parametrize("db_type_name", ["CLOB", "NCLOB"]) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +async def test_9416(db_type_name, dtype, async_conn): + "9416 - fetch_df_all() for CLOB types" + value = "test_9416" + requested_schema = pyarrow.schema([("CLOB_COL", dtype)]) + statement = f"select to_{db_type_name.lower()}('{value}') from dual" + ora_df = await async_conn.fetch_df_all( + statement, requested_schema=requested_schema + ) + tab = pyarrow.table(ora_df) + assert tab.field("CLOB_COL").type == dtype + assert tab["CLOB_COL"][0].as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [ + pyarrow.decimal128(precision=3, scale=2), + pyarrow.float32(), + pyarrow.float64(), + ], +) +async def test_9417(dtype, async_conn): + "9417 - fetch_df_all() for NUMBER duplicate values" + value = 93.25 + requested_schema = pyarrow.schema([("DECIMAL_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + f""" + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + union all + select {value} from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("DECIMAL_COL").type == dtype + for fetched_value in tab["DECIMAL_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +async def test_9418(dtype, async_conn): + "9418 - fetch_df_all() for BINARY_DOUBLE duplicate values" + value = 523.75 + requested_schema = pyarrow.schema([("BINARY_DOUBLE_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + f""" + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + union all + select to_binary_double({value}) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_DOUBLE_COL").type == dtype + for fetched_value in tab["BINARY_DOUBLE_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.float32(), pyarrow.float64()], +) +async def test_9419(dtype, async_conn): + "9419 - fetch_df_all() for BINARY_FLOAT duplicate values" + value = 9308.125 + requested_schema = pyarrow.schema([("BINARY_FLOAT_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + f""" + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + union all + select to_binary_float({value}) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("BINARY_FLOAT_COL").type == dtype + for fetched_value in tab["BINARY_FLOAT_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype", + [pyarrow.binary(length=6), pyarrow.binary(), pyarrow.large_binary()], +) +async def test_9420(dtype, async_conn): + "9420 - fetch_df_all() for RAW duplicate values" + value = "A23456" + requested_schema = pyarrow.schema([("RAW_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + f""" + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + union all + select utl_raw.cast_to_raw('{value}') from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("RAW_COL").type == dtype + for fetched_value in tab["RAW_COL"]: + assert fetched_value.as_py() == value.encode() + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9421(dtype, value_is_date, async_conn): + "9421 - fetch_df_all() for DATE duplicate values" + requested_schema = pyarrow.schema([("DATE_COL", dtype)]) + value = datetime.datetime(2025, 3, 1) + parameters = dict(value=value) + ora_df = await async_conn.fetch_df_all( + """ + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + union all + select cast(:value as date) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("DATE_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["DATE_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9422(dtype, value_is_date, async_conn): + "9422 - fetch_df_all() for TIMESTAMP duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_COL", dtype)]) + value = datetime.datetime(2025, 1, 14) + parameters = dict(value=value) + ora_df = await async_conn.fetch_df_all( + """ + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + union all + select cast(:value as timestamp) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9423(dtype, value_is_date, async_conn): + "9423 - fetch_df_all() for TIMESTAMP WITH LOCAL TIME ZONE duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_LTZ_COL", dtype)]) + value = datetime.datetime(2025, 3, 6) + parameters = dict(value=value) + ora_df = await async_conn.fetch_df_all( + """ + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + union all + select cast(:value as timestamp with local time zone) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_LTZ_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_LTZ_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "dtype,value_is_date", + [ + (pyarrow.date32(), True), + (pyarrow.date64(), True), + (pyarrow.timestamp("s"), False), + (pyarrow.timestamp("us"), False), + (pyarrow.timestamp("ms"), False), + (pyarrow.timestamp("ns"), False), + ], +) +async def test_9424(dtype, value_is_date, async_conn): + "9424 - fetch_df_all() for TIMESTAMP WITH TIME ZONE duplicate values" + requested_schema = pyarrow.schema([("TIMESTAMP_TZ_COL", dtype)]) + value = datetime.datetime(2025, 2, 28) + parameters = dict(value=value) + ora_df = await async_conn.fetch_df_all( + """ + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + union all + select cast(:value as timestamp with time zone) from dual + """, + parameters, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("TIMESTAMP_TZ_COL").type == dtype + if value_is_date: + value = value.date() + for fetched_value in tab["TIMESTAMP_TZ_COL"]: + assert fetched_value.as_py() == value + + +@pytest.mark.parametrize( + "db_type_name", + ["CHAR", "NCHAR", "VARCHAR2", "NVARCHAR2"], +) +@pytest.mark.parametrize("dtype", [pyarrow.string(), pyarrow.large_string()]) +async def test_9425(db_type_name, dtype, async_conn): + "9425 - fetch_df_all() for string types duplicate values" + value = "test_9425" + requested_schema = pyarrow.schema([("STRING_COL", dtype)]) + ora_df = await async_conn.fetch_df_all( + f""" + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + union all + select cast('{value}' as {db_type_name}(9)) from dual + """, + requested_schema=requested_schema, + ) + tab = pyarrow.table(ora_df) + assert tab.field("STRING_COL").type == dtype + for fetched_value in tab["STRING_COL"]: + assert fetched_value.as_py() == value diff --git a/utils/templates/connection.py b/utils/templates/connection.py index c5c65d81..5a3293a5 100644 --- a/utils/templates/connection.py +++ b/utils/templates/connection.py @@ -43,6 +43,7 @@ from . import base_impl, driver_mode, errors, thick_impl, thin_impl from . import pool as pool_module from .aq import AsyncQueue, Queue, MessageProperties +from .arrow_impl import ArrowSchemaImpl from .base_impl import DB_TYPE_BLOB, DB_TYPE_CLOB, DB_TYPE_NCLOB, DbType from .connect_params import ConnectParams from .cursor import AsyncCursor, Cursor @@ -1098,6 +1099,7 @@ def fetch_df_all( arraysize: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -1121,10 +1123,18 @@ def fetch_df_all( value is :data:`oracledb.defaults.fetch_decimals `. + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize @@ -1142,6 +1152,7 @@ def fetch_df_batches( size: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -1167,10 +1178,18 @@ def fetch_df_batches( value is :data:`oracledb.defaults.fetch_decimals `. + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. + Any LOB fetched must be less than 1 GB. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize @@ -2084,6 +2103,10 @@ async def direct_path_load( The ``data`` parameter can be a list of sequences, a DataFrame, or a third-party DataFrame instance that supports the Apache Arrow PyCapsule Interface. + + The ``batch_size`` parameter is used to split large data sets into + smaller pieces for sending to the database. It is the number of records + in each batch. This parameter can be used to tune performance. """ self._verify_connected() await self._impl.direct_path_load( @@ -2097,6 +2120,7 @@ async def fetch_df_all( arraysize: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> DataFrame: """ Fetches all rows of the SQL query ``statement``, returning them in a @@ -2119,9 +2143,17 @@ async def fetch_df_all( capable of being represented in Arrow Decimal128 format. The default value is :data:`oracledb.defaults.fetch_decimals `. + + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if arraysize is not None: cursor.arraysize = arraysize cursor.prefetchrows = cursor.arraysize @@ -2139,6 +2171,7 @@ async def fetch_df_batches( size: Optional[int] = None, *, fetch_decimals: Optional[bool] = None, + requested_schema: Optional[Any] = None, ) -> Iterator[DataFrame]: """ This returns an iterator yielding the next ``size`` rows of the SQL @@ -2163,9 +2196,17 @@ async def fetch_df_batches( capable of being represented in Arrow Decimal128 format. The default value is :data:`oracledb.defaults.fetch_decimals `. + + The ``requested_schema`` parameter specifies an object that implements + the Arrow PyCapsule schema interface. The DataFrame returned by + ``fetch_df_all()`` will have the data types and names of the schema. """ cursor = self.cursor() cursor._impl.fetching_arrow = True + if requested_schema is not None: + cursor._impl.schema_impl = ArrowSchemaImpl.from_arrow_schema( + requested_schema + ) if size is not None: cursor.arraysize = size cursor.prefetchrows = cursor.arraysize From 0304898b03ac149f13d417c00f2fb997fbda7878 Mon Sep 17 00:00:00 2001 From: Anthony Tuininga Date: Thu, 2 Oct 2025 15:55:10 -0600 Subject: [PATCH 213/239] Doc updates. --- README.md | 2 +- doc/src/api_manual/async_connection.rst | 2 +- doc/src/api_manual/connect_params.rst | 2 +- doc/src/api_manual/deprecations.rst | 4 +- doc/src/api_manual/pipeline.rst | 3 +- doc/src/index.rst | 1 + doc/src/release_notes.rst | 72 +- doc/src/user_guide/appendix_a.rst | 10 +- doc/src/user_guide/aq.rst | 2 +- doc/src/user_guide/asyncio.rst | 3 +- doc/src/user_guide/authentication_methods.rst | 1579 +++++++++++++++++ doc/src/user_guide/bind.rst | 8 +- doc/src/user_guide/connection_handling.rst | 1478 +-------------- doc/src/user_guide/installation.rst | 52 +- doc/src/user_guide/introduction.rst | 4 +- doc/src/user_guide/json_data_type.rst | 8 +- doc/src/user_guide/soda.rst | 3 +- doc/src/user_guide/troubleshooting.rst | 2 +- doc/src/user_guide/vector_data_type.rst | 24 +- samples/containers/app_dev/Dockerfile | 10 +- samples/containers/app_dev/README.md | 4 +- .../containers/app_dev/sample_app/customer.py | 4 +- samples/containers/samples_and_db/Dockerfile | 4 +- samples/containers/samples_and_db/README.md | 3 +- samples/json_duality.py | 2 +- samples/json_duality_async.py | 4 +- samples/pipelining_basic.py | 6 +- samples/pipelining_error.py | 6 +- samples/pipelining_parallel.py | 6 +- samples/sessionless_transactions.py | 2 +- samples/soda_json_duality.py | 6 +- ...le-Database-The-New-Wave-of-Scripting.html | 8 +- samples/tutorial/sql/setup_tutorial.sql | 2 +- samples/vector.py | 2 +- samples/vector_async.py | 4 +- samples/vector_numpy.py | 4 +- samples/vector_numpy_async.py | 6 +- samples/vector_string.py | 8 +- src/oracledb/errors.py | 2 +- src/oracledb/fetch_info.py | 16 +- src/oracledb/impl/base/queue.pyx | 5 +- src/oracledb/impl/thin/connection.pyx | 2 +- src/oracledb/impl/thin/packet.pyx | 10 +- src/oracledb/impl/thin/protocol.pyx | 8 +- tests/ext/sample_config.ini | 6 +- tests/test_3000_subscription.py | 2 +- tests/test_3100_boolean_var.py | 2 +- tests/test_3400_soda_collection.py | 12 +- tests/test_6700_json_23.py | 2 +- 49 files changed, 1803 insertions(+), 1614 deletions(-) create mode 100644 doc/src/user_guide/authentication_methods.rst diff --git a/README.md b/README.md index 3d1c9a83..fdba06ff 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ with oracledb.connect(user=un, password=pw, dsn=cs) as connection: available when optional Oracle Client libraries are loaded by python-oracledb. Libraries are available in the free [Oracle Instant Client][instantclient] packages. Python-oracledb can use Oracle Client - libraries 11.2 through 23ai. + libraries versions 11.2 through 23, inclusive. - Oracle Database diff --git a/doc/src/api_manual/async_connection.rst b/doc/src/api_manual/async_connection.rst index 2cca1110..c31cab3b 100644 --- a/doc/src/api_manual/async_connection.rst +++ b/doc/src/api_manual/async_connection.rst @@ -159,7 +159,7 @@ AsyncConnection Methods .. note:: - True pipelining requires Oracle Database 23ai. + True pipelining requires Oracle Database version 23, or later. When you connect to an older database, operations are sequentially executed by python-oracledb. Each operation concludes before the next diff --git a/doc/src/api_manual/connect_params.rst b/doc/src/api_manual/connect_params.rst index cecbf6c4..6d58b339 100644 --- a/doc/src/api_manual/connect_params.rst +++ b/doc/src/api_manual/connect_params.rst @@ -230,7 +230,7 @@ All properties are read only. pooled DRCP or PRCP connections are implicitly released back to the DRCP or PRCP pool when either one of the methods :meth:`Connection.commit()` or :meth:`Connection.rollback()` are called. This attribute requires the use - of DRCP or PRCP with Oracle Database 23ai (or later). See + of DRCP or PRCP with Oracle Database version 23, or later. See :ref:`implicitconnpool` for more information. This attribute is supported in both python-oracledb Thin and Thick modes. diff --git a/doc/src/api_manual/deprecations.rst b/doc/src/api_manual/deprecations.rst index a8f71cd9..ba55373e 100644 --- a/doc/src/api_manual/deprecations.rst +++ b/doc/src/api_manual/deprecations.rst @@ -23,8 +23,10 @@ used for new development. * - Name - Comments - * - The x86_64 macOS and 32-bit Windows platforms are deprecated. They will be desupported when the `cryptography `__ package desupports them, see the `cryptography deprecation announcement `__. + * - The x86_64 macOS and 32-bit Windows platforms are deprecated. They will be desupported before, or when, the `cryptography `__ package desupports them. See the `cryptography deprecation announcement `__. - Use arm64 macOS or 64-bit Windows instead. + * - Connectivity and interoperability with Oracle Database and Oracle Client libraries older than version 19 is deprecated and will be removed in a future version of python-oracledb. Production use, and availability of database and client software, is detailed in `Release Schedule of Current Database Releases `__. + - Upgrade the database and client library versions. .. list-table-with-summary:: Deprecated in python-oracledb 3.0 :header-rows: 1 diff --git a/doc/src/api_manual/pipeline.rst b/doc/src/api_manual/pipeline.rst index 7bceaedb..f1b6e096 100644 --- a/doc/src/api_manual/pipeline.rst +++ b/doc/src/api_manual/pipeline.rst @@ -12,7 +12,8 @@ information about pipelining. .. note:: - True pipelining is only available when connected to Oracle Database 23ai. + True pipelining is only available when connected to Oracle Database version + 23, or later. .. versionadded:: 2.4.0 diff --git a/doc/src/index.rst b/doc/src/index.rst index 43ed1d5a..9a4936ee 100644 --- a/doc/src/index.rst +++ b/doc/src/index.rst @@ -21,6 +21,7 @@ User Guide user_guide/installation.rst user_guide/initialization.rst user_guide/connection_handling.rst + user_guide/authentication_methods.rst user_guide/sql_execution.rst user_guide/plsql_execution.rst user_guide/bind.rst diff --git a/doc/src/release_notes.rst b/doc/src/release_notes.rst index b6a8d32b..36570fc8 100644 --- a/doc/src/release_notes.rst +++ b/doc/src/release_notes.rst @@ -79,9 +79,10 @@ Common Changes support (`issue 512 `__). #) The x86_64 macOS and 32-bit Windows platforms are :ref:`deprecated - `. They will be desupported when the `cryptography - `__ package desupports them, see - the `cryptography deprecation announcement `. They will be desupported in a future release before, or + when, the `cryptography `__ package + desupports them. See the `cryptography deprecation announcement + `__. #) Connectivity and interoperability with Oracle Database and Oracle Client @@ -135,7 +136,7 @@ Common Changes #) Pre-built binaries are now being created for Python 3.14. Note this Python version is currently in release candidate phase. -#) Added support for Oracle Database 23ai :ref:`Sessionless Transactions +#) Added support for Oracle Database 23.6 :ref:`Sessionless Transactions `. #) Changes to :ref:`data frame ` support: @@ -166,7 +167,7 @@ Common Changes - Fixed bug when fetching numeric data with precision that exceeds 38 as decimal data. - Fixed bug when fetching large amounts of data in one round-trip when - using asyncio with Oracle Database versions before 23ai. + using asyncio with Oracle Database 21c and earlier. Note the data frame support in python-oracledb 3.3 is a pre-release, and may change in a future version. @@ -193,8 +194,8 @@ Thin Mode Changes closed connection (`issue 482 `__). #) Fixed bug when connecting with asyncio using the parameter ``https_proxy``. -#) Fixed bug when fetching LOBs with asyncio from databases prior to Oracle - Database 23ai +#) Fixed bug when fetching LOBs with asyncio from Oracle Database 21c and + earlier (`issue 500 `__). #) Fixed regression when connecting where only the host specified by the ``https_proxy`` parameter can successfully perform name resolution. @@ -243,7 +244,7 @@ Common Changes (`issue 505 `__). #) Added parameter ``pool_name`` to connection and pool creation methods to - support Oracle Database 23ai multi-pool :ref:`drcp`. + support Oracle Database 23.4 multi-pool :ref:`drcp`. #) :ref:`GitHub Action ` workflow updates: - Use GitHub Arm Linux runner for builds. Supplied by wojiushixiaobai @@ -705,13 +706,13 @@ oracledb `2.4.0 `. #) Fixed bug resulting in a segfault when a closed cursor is bound as a REF CURSOR (`issue 368 `__). -#) Fixed bug resulting in an inability to connect to Oracle Database 23ai - instances which have fast authentication disabled. +#) Fixed bug resulting in an inability to connect to Oracle Database version + 23 instances which have fast authentication disabled. #) Fixed error message when idle time is exceeded by a connection. The error ``DPY-4033: the database closed the connection because the connection's idle time has been exceeded`` is now raised when this situation is @@ -787,7 +788,7 @@ Thick Mode Changes Common Changes ++++++++++++++ -#) Added support for Oracle Database 23ai +#) Added support for Oracle Database 23.5 :ref:`BINARY vector format `. #) Replaced integer constants for :ref:`connection authorization modes `, @@ -888,12 +889,12 @@ Thin Mode Changes #) Fixed bug that would cause an internal error to be raised when attempting to close a connection that has been forcibly closed by the database. #) Internal change: further efforts to tighten code looking for the end of a - database request made to Oracle Database 23ai. + database request made to Oracle Database version 23. Common Changes ++++++++++++++ -#) Added support for Oracle Database 23ai columns of type :ref:`VECTOR +#) Added support for Oracle Database 23.4 columns of type :ref:`VECTOR `. #) Added support for columns of type INTERVAL YEAR TO MONTH which can be represented in Python by instances of the new @@ -962,7 +963,7 @@ oracledb `2.1.0 `, @@ -1010,8 +1011,8 @@ Thin Mode Changes Thick Mode Changes ++++++++++++++++++ -#) Added support for internal use of JSON in SODA with Oracle Client 23. This - allows for seamless transfer of extended data types. +#) Added support for internal use of JSON in SODA with Oracle Client version + 23. This allows for seamless transfer of extended data types. #) Fixed bug when calling :meth:`SodaDoc.getContent()` for SODA documents that do not contain JSON. #) Corrected support for Oracle Sharding. @@ -1031,7 +1032,7 @@ Common Changes returned by SODA in Oracle Database 23.4 and later in the ``_id`` attribute of documents stored in native collections. #) Added support for columns of type VECTOR usable with a limited - availability release of Oracle Database 23. + availability release of Oracle Database version 23. #) Errors raised when calling :meth:`Cursor.executemany()` with PL/SQL now have the :data:`oracledb._Error.offset` attribute populated with the last iteration that succeeded @@ -1126,8 +1127,8 @@ Thin Mode Changes #) Added parameter :data:`ConnectParams.ssl_context` to modify the SSL context used when connecting via TLS (`issue 259 `__). -#) Added support for an Oracle Database 23ai JSON feature allowing field names - with more than 255 UTF-8 encoded bytes. +#) Added support for an Oracle Database version 23 JSON feature allowing + fieldnames with more than 255 UTF-8 encoded bytes. #) Added support for the ``FAILOVER`` clause in full connect descriptors. #) Fixed bug in detecting the current time zone (`issue 257 `__). @@ -1137,8 +1138,8 @@ Thin Mode Changes multiple line comments with multiple asterisks before the closing slash. #) A more meaningful error is raised when the wrong type of data is passed to :meth:`LOB.write()`. -#) Internal change to support an Oracle Database 23ai JSON feature improving - JSON storage usage. +#) Internal change to support an Oracle Database version 23 JSON feature + improving JSON storage usage. #) Internal change to ensure that all connections in a pool have been closed gracefully before the pool is closed. #) Internal changes to improve handling of the network protocol between @@ -1175,8 +1176,9 @@ Common Changes 17D3A9C6-D993-4E94-BF6B-CACA56581F41>`__ and `annotations `__ associated with columns that are being - fetched. SQL domains and annotations require Oracle Database 23ai. If using - python-oracledb Thick mode, Oracle Client 23ai is also required. + fetched. SQL domains and annotations require Oracle Database version 23, + or later. If using python-oracledb Thick mode, Oracle Client version 23, + or later, is also required. #) Added parameter ``data`` to :meth:`Connection.createlob()` to allow data to be written at LOB creation time. #) Added type :data:`~oracledb.DB_TYPE_XMLTYPE` to represent data of type @@ -1196,7 +1198,7 @@ Common Changes #) Errors that have entries in the :ref:`troubleshooting documentation ` now have links to that documentation shown in the message text. -#) Fixed bug with binding boolean values with Oracle Database 23ai +#) Fixed bug with binding boolean values with Oracle Database version 23 (`issue 263 `__). #) Fixed bug with getting unknown attributes from :ref:`Oracle Object ` instances. @@ -1266,9 +1268,9 @@ oracledb `1.4.0 `__). -#) Fixed bug with Oracle Database 23ai when SQL is executed after first being - parsed. +#) Fixed bug with Oracle Database version 23 when SQL is executed after first + being parsed. #) Fixed bug when :data:`ConnectionPool.timeout` is not *None* when creating a connection pool (`issue 166 `__). diff --git a/doc/src/user_guide/appendix_a.rst b/doc/src/user_guide/appendix_a.rst index f7a7eafd..cfce9f18 100644 --- a/doc/src/user_guide/appendix_a.rst +++ b/doc/src/user_guide/appendix_a.rst @@ -169,7 +169,7 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - Yes - Yes - Yes - * - Oracle Database 23ai Implicit Connection Pooling with :ref:`DRCP ` and PRCP (see :ref:`implicitconnpool`) + * - Oracle Database version 23 Implicit Connection Pooling with :ref:`DRCP ` and PRCP (see :ref:`implicitconnpool`) - Yes - Yes - No @@ -249,7 +249,7 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - Yes - No - No - * - Oracle Database 23ai JSON-Relational Duality Views (see :ref:`jsondualityviews`) + * - Oracle Database version 23 JSON-Relational Duality Views (see :ref:`jsondualityviews`) - Yes - Yes - No @@ -309,7 +309,7 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - Yes - No - No - * - Oracle Database 23ai Pipelining (see :ref:`pipelining`) + * - Oracle Database version 23 Pipelining (see :ref:`pipelining`) - Yes - No - No @@ -325,7 +325,7 @@ For more details see :ref:`driverdiff` and :ref:`upgrading83`. - Yes - Yes - Yes - * - Oracle Database 23ai Sessionless Transactions (see :ref:`sessionlesstxns`) + * - Oracle Database version 23 Sessionless Transactions (see :ref:`sessionlesstxns`) - Yes - Yes - No @@ -401,7 +401,7 @@ example when binding numeric values. - :data:`~oracledb.DB_TYPE_DATE` - datetime.date, datetime.datetime - No relevant notes - * - BOOLEAN (PL/SQL and Oracle Database 23ai SQL) + * - BOOLEAN (PL/SQL and Oracle Database version 23 SQL) - :data:`~oracledb.DB_TYPE_BOOLEAN` - Any type convertible to bool - No relevant notes diff --git a/doc/src/user_guide/aq.rst b/doc/src/user_guide/aq.rst index 1105d3a8..6db66b5a 100644 --- a/doc/src/user_guide/aq.rst +++ b/doc/src/user_guide/aq.rst @@ -48,7 +48,7 @@ types which are detailed below. Array enqueuing and dequeuing is not supported for JSON payloads. * - JMS - Supported - - Supported for single and array message enqueuing and dequeuing when using Oracle Client 19c (or later) and Oracle Database 23ai. + - Supported for single and array message enqueuing and dequeuing when using Oracle Client 19 (or later) and Oracle Database version 23 (or later). **Usage Notes** diff --git a/doc/src/user_guide/asyncio.rst b/doc/src/user_guide/asyncio.rst index afb20fee..1bb6633c 100644 --- a/doc/src/user_guide/asyncio.rst +++ b/doc/src/user_guide/asyncio.rst @@ -381,7 +381,8 @@ about Oracle Database Pipelining. .. note:: - True pipelining only occurs when you are connected to Oracle Database 23ai. + True pipelining only occurs when you are connected to Oracle Database + version 23, or later. When you connect to an older database, operations are sequentially executed by python-oracledb. Each operation concludes before the next is diff --git a/doc/src/user_guide/authentication_methods.rst b/doc/src/user_guide/authentication_methods.rst new file mode 100644 index 00000000..aa7ec0c3 --- /dev/null +++ b/doc/src/user_guide/authentication_methods.rst @@ -0,0 +1,1579 @@ +.. _authenticationmethods: + +.. currentmodule:: oracledb + +********************** +Authentication Options +********************** + +Authentication allows only authorized users to access Oracle Database after +successful verification of their identity. This section details the various +Oracle Database authentication options supported in python-oracledb. + +The Oracle Client libraries used by python-oracledb Thick mode may support +additional authentication options that are configured independently of the +driver. + +.. _dbauthentication: + +Database Authentication +======================= + +Database Authentication is the most basic authentication method that allows +users to connect to Oracle Database by using a valid database username and +their associated password. Oracle Database verifies the username and password +specified in the python-oracledb connection method with the information stored +in the database. See `Database Authentication of Users `__ +for more information. + +:ref:`Standalone connections ` and +:ref:`pooled connections ` can be created in python-oracledb Thin +and Thick modes using database authentication. This can be done by specifying +the database username and the associated password in the ``user`` and +``password`` parameters of :meth:`oracledb.connect()`, +:meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, or +:meth:`oracledb.create_pool_async()`. An example is: + +.. code-block:: python + + import oracledb + import getpass + + userpwd = getpass.getpass("Enter password: ") + + connection = oracledb.connect(user="hr", password=userpwd, + dsn="dbhost.example.com/orclpdb") + +.. _proxyauth: + +Proxy Authentication +==================== + +Proxy authentication allows a user (the "session user") to connect to Oracle +Database using the credentials of a "proxy user". Statements will run as the +session user. Proxy authentication is generally used in three-tier +applications where one user owns the schema while multiple end-users access +the data. For more information about proxy authentication, see the `Oracle +documentation `__. + +An alternative to using proxy users is to set +:attr:`Connection.client_identifier` after connecting and use its value in +statements and in the database, for example for :ref:`monitoring +`. + +The following proxy examples use these schemas. The ``mysessionuser`` schema +is granted access to use the password of ``myproxyuser``: + +.. code-block:: sql + + CREATE USER myproxyuser IDENTIFIED BY myproxyuserpw; + GRANT CREATE SESSION TO myproxyuser; + + CREATE USER mysessionuser IDENTIFIED BY itdoesntmatter; + GRANT CREATE SESSION TO mysessionuser; + + ALTER USER mysessionuser GRANT CONNECT THROUGH myproxyuser; + +After connecting to the database, the following query can be used to show the +session and proxy users: + +.. code-block:: sql + + SELECT SYS_CONTEXT('USERENV', 'PROXY_USER'), + SYS_CONTEXT('USERENV', 'SESSION_USER') + FROM DUAL; + +Standalone connection examples: + +.. code-block:: python + + # Basic Authentication without a proxy + connection = oracledb.connect(user="myproxyuser", password="myproxyuserpw", + dsn="dbhost.example.com/orclpdb") + # PROXY_USER: None + # SESSION_USER: MYPROXYUSER + + # Basic Authentication with a proxy + connection = oracledb.connect(user="myproxyuser[mysessionuser]", password="myproxyuserpw", + dsn="dbhost.example.com/orclpdb") + # PROXY_USER: MYPROXYUSER + # SESSION_USER: MYSESSIONUSER + +Pooled connection examples: + +.. code-block:: python + + # Basic Authentication without a proxy + pool = oracledb.create_pool(user="myproxyuser", password="myproxyuserpw", + dsn="dbhost.example.com/orclpdb") + connection = pool.acquire() + # PROXY_USER: None + # SESSION_USER: MYPROXYUSER + + # Basic Authentication with proxy + pool = oracledb.create_pool(user="myproxyuser[mysessionuser]", password="myproxyuserpw", + dsn="dbhost.example.com/orclpdb", + homogeneous=False) + + connection = pool.acquire() + # PROXY_USER: MYPROXYUSER + # SESSION_USER: MYSESSIONUSER + +Note the use of a :ref:`heterogeneous ` pool in the example +above. This is required in this scenario. + +.. _extauth: + +External Authentication +======================= + +Instead of storing the database username and password in Python scripts or +environment variables, database access can be authenticated by an outside +system. External Authentication allows applications to validate user access +with an external password store (such as an +:ref:`Oracle Wallet `), with the +:ref:`operating system `, or with an external authentication +service. + +.. note:: + + Connecting to Oracle Database using external authentication is only + supported in python-oracledb Thick mode. See :ref:`enablingthick`. + +.. _extauthwithwallet: + +Using an Oracle Wallet for External Authentication +-------------------------------------------------- + +The following steps give an overview of using an Oracle Wallet. Wallets should +be kept securely. Wallets can be managed with `Oracle Wallet Manager +`__. + +In this example the wallet is created for the ``myuser`` schema in the +directory ``/home/oracle/wallet_dir``. The ``mkstore`` command is available +from a full Oracle Client or Oracle Database installation. If you have been +given wallet by your DBA, skip to step 3. + +1. First create a new wallet as the ``oracle`` user:: + + mkstore -wrl "/home/oracle/wallet_dir" -create + + This will prompt for a new password for the wallet. + +2. Create the entry for the database user name and password that are currently + hardcoded in your Python scripts. Use either of the methods shown below. + They will prompt for the wallet password that was set in the first step. + + **Method 1 - Using an Easy Connect string**:: + + mkstore -wrl "/home/oracle/wallet_dir" -createCredential dbhost.example.com/orclpdb myuser myuserpw + + **Method 2 - Using a connect name identifier**:: + + mkstore -wrl "/home/oracle/wallet_dir" -createCredential mynetalias myuser myuserpw + + The alias key ``mynetalias`` immediately following the + ``-createCredential`` option will be the connect name to be used in Python + scripts. If your application connects with multiple different database + users, you could create a wallet entry with different connect names for + each. + + You can see the newly created credential with:: + + mkstore -wrl "/home/oracle/wallet_dir" -listCredential + +3. Skip this step if the wallet was created using an Easy Connect String. + Otherwise, add an entry in :ref:`tnsnames.ora ` for the + connect name as follows:: + + mynetalias = + (DESCRIPTION = + (ADDRESS = (PROTOCOL = TCP)(HOST = dbhost.example.com)(PORT = 1521)) + (CONNECT_DATA = + (SERVER = DEDICATED) + (SERVICE_NAME = orclpdb) + ) + ) + + The file uses the description for your existing database and sets the + connect name alias to ``mynetalias``, which is the identifier used when + adding the wallet entry. + +4. Add the following wallet location entry in the :ref:`sqlnet.ora + ` file, using the ``DIRECTORY`` you created the wallet in:: + + WALLET_LOCATION = + (SOURCE = + (METHOD = FILE) + (METHOD_DATA = + (DIRECTORY = /home/oracle/wallet_dir) + ) + ) + SQLNET.WALLET_OVERRIDE = TRUE + + Examine the Oracle documentation for full settings and values. + +5. Ensure the configuration files are in a default location or TNS_ADMIN is + set to the directory containing them. See :ref:`optnetfiles`. + +With an Oracle wallet configured, and readable by you, your scripts +can connect to Oracle Database with: + +- Standalone connections by setting the ``externalauth`` parameter to *True* + in :meth:`oracledb.connect()`: + + .. code-block:: python + + connection = oracledb.connect(externalauth=True, dsn="mynetalias") + +- Or pooled connections by setting the ``externalauth`` parameter to *True* + in :meth:`oracledb.create_pool()`. Additionally in python-oracledb Thick + mode, you must set the ``homogeneous`` parameter to *False* as shown below + since heterogeneous pools can only be used with external authentication: + + .. code-block:: python + + pool = oracledb.create_pool(externalauth=True, homogeneous=False, + dsn="mynetalias") + pool.acquire() + +The ``dsn`` used in :meth:`oracledb.connect()` and +:meth:`oracledb.create_pool()` must match the one used in the wallet. + +After connecting, the query:: + + SELECT SYS_CONTEXT('USERENV', 'SESSION_USER') FROM DUAL; + +will show:: + + MYUSER + +.. note:: + + Wallets are also used to configure Transport Layer Security (TLS) connections. + If you are using a wallet like this, you may need a database username and password + in :meth:`oracledb.connect()` and :meth:`oracledb.create_pool()` calls. + +**External Authentication and Proxy Authentication** + +The following examples show external wallet authentication combined with +:ref:`proxy authentication `. These examples use the wallet +configuration from above, with the addition of a grant to another user:: + + ALTER USER mysessionuser GRANT CONNECT THROUGH myuser; + +After connection, you can check who the session user is with: + +.. code-block:: sql + + SELECT SYS_CONTEXT('USERENV', 'PROXY_USER'), + SYS_CONTEXT('USERENV', 'SESSION_USER') + FROM DUAL; + +Standalone connection example: + +.. code-block:: python + + # External Authentication with proxy + connection = oracledb.connect(user="[mysessionuser]", dsn="mynetalias") + # PROXY_USER: MYUSER + # SESSION_USER: MYSESSIONUSER + +You can also set the ``externalauth`` parameter to *True* in standalone +connections: + +.. code-block:: python + + # External Authentication with proxy when externalauth is set to True + connection = oracledb.connect(user="[mysessionuser]", dsn="mynetalias", + externalauth=True) + # PROXY_USER: MYUSER + # SESSION_USER: MYSESSIONUSER + +A connection pool example is: + +.. code-block:: python + + # External Authentication with proxy + pool = oracledb.create_pool(externalauth=True, homogeneous=False, + dsn="mynetalias") + pool.acquire(user="[mysessionuser]") + # PROXY_USER: MYUSER + # SESSION_USER: MYSESSIONUSER + +The following usage is not supported: + +.. code-block:: python + + pool = oracledb.create_pool(user="[mysessionuser]", externalauth=True, + homogeneous=False, dsn="mynetalias") + pool.acquire() + +.. _opsysauth: + +Operating System Authentication +------------------------------- + +With `Operating System `__ authentication, Oracle allows +user authentication to be performed by the operating system. The following +steps give an overview of how to implement OS Authentication on Linux. + +1. Log in to your computer. The commands used in these steps assume the + operating system user name is "oracle". + +2. Log in to SQL*Plus as the SYSTEM user and verify the value for the + ``OS_AUTHENT_PREFIX`` parameter:: + + SQL> SHOW PARAMETER os_authent_prefix + + NAME TYPE VALUE + ------------------------------------ ----------- ------------------------------ + os_authent_prefix string ops$ + +3. Create an Oracle database user using the ``os_authent_prefix`` determined in + step 2, and the operating system user name: + + .. code-block:: sql + + CREATE USER ops$oracle IDENTIFIED EXTERNALLY; + GRANT CONNECT, RESOURCE TO ops$oracle; + +In Python, connect using the following code: + +.. code-block:: python + + connection = oracledb.connect(dsn="mynetalias") + +Your session user will be ``OPS$ORACLE``. + +If your database is not on the same computer as Python, you can perform testing +by setting the database configuration parameter ``remote_os_authent=true``. +Beware of security concerns because this is insecure. + +See `Oracle Database Security Guide +`__ for more information about +Operating System Authentication. + +.. _tokenauth: + +Token-Based Authentication +========================== + +Token-Based Authentication allows users to connect to a database by using an +encrypted authentication token without having to enter a database username and +password. The authentication token must be valid and not expired for the +connection to be successful. Users already connected will be able to continue +work after their token has expired but they will not be able to reconnect +without getting a new token. + +The two authentication methods supported by python-oracledb are +:ref:`Open Authorization (OAuth 2.0) ` and :ref:`Oracle +Cloud Infrastructure (OCI) Identity and Access Management (IAM) `. +These authentication methods can use Cloud Native Authentication with the +support of the Azure SDK or OCI SDK to generate access tokens and connect to +Oracle Database. Alternatively, these methods can use a Python script that +contains a class to generate access tokens to connect to Oracle Database. + +.. _iamauth: + +OCI IAM Token-Based Authentication +---------------------------------- + +Oracle Cloud Infrastructure (OCI) Identity and Access Management (IAM) provides +its users with a centralized database authentication and authorization system. +Using this authentication method, users can use the database access token +issued by OCI IAM to authenticate to the Oracle Autonomous Database. Both Thin +and Thick modes of the python-oracledb driver support OCI IAM token-based +authentication. + +When using python-oracledb in Thick mode, Oracle Client libraries 19.14 (or +later), or 21.5 (or later) are needed. + +Standalone connections and pooled connections can be created in python-oracledb +Thick and Thin modes using OCI IAM token-based authentication. This can be done +by using a class like the sample :ref:`TokenHandlerIAM class ` or +by using python-oracledb's :ref:`OCI Cloud Native Authentication Plugin +(oci_tokens) `. Tokens can be specified using the +connection parameter introduced in python-oracledb 1.1. Users of earlier +python-oracledb versions can alternatively use :ref:`OCI IAM Token-Based +Authentication Connection Strings `. + +OCI IAM Token Generation and Extraction ++++++++++++++++++++++++++++++++++++++++ + +Authentication tokens can be generated using python-oracledb's +:ref:`oci_tokens ` plugin. + +Alternatively, authentication tokens can be generated through execution of an +Oracle Cloud Infrastructure command line interface (OCI-CLI) command :: + + oci iam db-token get + +On Linux, a folder ``.oci/db-token`` will be created in your home directory. +It will contain the token and private key files needed by python-oracledb. + +.. _iamhandler: + +**Example of Generating an IAM Token** + +Here, as an example, we are using a Python script to automate the process of +generating and reading OCI IAM tokens. + +.. code:: python + + import os + + import oracledb + + class TokenHandlerIAM: + + def __init__(self, + dir_name="dir_name", + command="oci iam db-token get"): + self.dir_name = dir_name + self.command = command + self.token = None + self.private_key = None + + def __call__(self, refresh): + if refresh: + if os.system(self.command) != 0: + raise Exception("token command failed!") + if self.token is None or refresh: + self.read_token_info() + return (self.token, self.private_key) + + def read_token_info(self): + token_file_name = os.path.join(self.dir_name, "token") + pkey_file_name = os.path.join(self.dir_name, "oci_db_key.pem") + with open(token_file_name) as f: + self.token = f.read().strip() + with open(pkey_file_name) as f: + if oracledb.is_thin_mode(): + self.private_key = f.read().strip() + else: + lines = [s for s in f.read().strip().split("\n") + if s not in ('-----BEGIN PRIVATE KEY-----', + '-----END PRIVATE KEY-----')] + self.private_key = "".join(lines) + +The TokenHandlerIAM class uses a callable to generate and read OCI IAM tokens. +When the callable in the TokenHandlerIAM class is invoked for the first time +to create a standalone connection or pool, the ``refresh`` parameter is +*False* which allows the callable to return a cached token, if desired. The +expiry date is then extracted from this token and compared with the current +date. If the token has not expired, then it will be used directly. If the token +has expired, the callable is invoked the second time with the ``refresh`` +parameter set to *True*. + +The TokenHandlerIAM class defined here is used in the examples shown in +:ref:`conncreationociiam`. + +.. _conncreationociiam: + +Connection Creation with OCI IAM Access Tokens +++++++++++++++++++++++++++++++++++++++++++++++ + +For OCI IAM Token-Based Authentication with a class such as the sample +:ref:`TokenHandlerIAM class `, the ``access_token`` connection +parameter must be specified. This parameter should be a 2-tuple (or a callable +that returns a 2-tuple) containing the token and private key. In the examples +used below, the ``access_token`` parameter is set to a callable. + +The examples used in the subsequent sections use the +:ref:`TokenHandlerIAM class ` to generate OCI IAM tokens to connect +to Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. + +**Standalone Connections in Thin Mode Using OCI IAM Tokens** + +When using a class such as the :ref:`TokenHandlerIAM class ` to +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.connect`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. For example: + +.. code:: python + + connection = oracledb.connect( + access_token=TokenHandlerIAM(), + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp) + +**Connection Pools in Thin Mode Using OCI IAM Tokens** + +When using a class such as :ref:`TokenHandlerIAM class ` to +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: + +.. code:: python + + connection = oracledb.create_pool( + access_token=TokenHandlerIAM(), + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp + min=1, max=5, increment=2) + +Note that the ``access_token`` parameter should be set to a callable. This is +useful when the connection pool needs to expand and create new connections but +the current token has expired. In such a case, the callable should return a +string specifying the new, valid access token. + +**Standalone Connections in Thick Mode Using OCI IAM Tokens** + +When using a class such as :ref:`TokenHandlerIAM class ` to +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``access_token`` and ``externalAuth`` parameters +of :func:`~oracledb.connect`. For example: + +.. code:: python + + connection = oracledb.connect( + access_token=TokenHandlerIAM(), + externalauth=True, # must always be True in Thick mode + dsn=mydb_low) + +**Connection Pools in Thick Mode Using OCI IAM Tokens** + +When using a class such as :ref:`TokenHandlerIAM class ` to +generate OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``access_token`` and ``externalauth`` parameters +of :func:`oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(its default value). For example: + +.. code:: python + + pool = oracledb.create_pool( + access_token=TokenHandlerIAM(), + externalauth=True, # must always be True in Thick mode + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, min=1, max=5, increment=2) + +Note that the ``access_token`` parameter should be set to a callable. This is +useful when the connection pool needs to expand and create new connections but +the current token has expired. In such a case, the callable should return a +string specifying the new, valid access token. + +.. _iamauthconnstr: + +OCI IAM Token-Based Authentication Connection Strings ++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The connection string used by python-oracledb can specify the directory where +the token and private key files are located. This syntax is usable with older +versions of python-oracledb. However, it is recommended to use connection +parameters introduced in python-oracledb 1.1 instead. See +:ref:`OCI IAM Token-Based Authentication`. + +.. note:: + + OCI IAM Token-Based Authentication Connection Strings is only supported in + python-oracledb Thick mode. See :ref:`enablingthick`. + +The Oracle Cloud Infrastructure command line interface (OCI-CLI) can be used +externally to get tokens and private keys from OCI IAM, for example with the +OCI-CLI ``oci iam db-token get`` command. + +The Oracle Net parameter ``TOKEN_AUTH`` must be set when you are using the +connection string syntax. Also, the ``PROTOCOL`` parameter must be ``tcps`` +and ``SSL_SERVER_DN_MATCH`` should be ``ON``. + +You can set ``TOKEN_AUTH=OCI_TOKEN`` in a ``sqlnet.ora`` file. Alternatively, +you can specify it in a :ref:`Connect Descriptor `, for example +when using a :ref:`tnsnames.ora ` file:: + + db_alias = + (DESCRIPTION = + (ADDRESS=(PROTOCOL=TCPS)(PORT=1522)(HOST=xxx.oraclecloud.com)) + (CONNECT_DATA=(SERVICE_NAME=xxx.adb.oraclecloud.com)) + (SECURITY = + (SSL_SERVER_CERT_DN="CN=xxx.oraclecloud.com, \ + O=Oracle Corporation,L=Redwood City,ST=California,C=US") + (TOKEN_AUTH=OCI_TOKEN) + ) + ) + +The default location for the token and private key is the same default location +that the OCI-CLI tool writes to. For example ``~/.oci/db-token/`` on Linux. + +If the token and private key files are not in the default location then their +directory must be specified with the ``TOKEN_LOCATION`` parameter in a +:ref:`sqlnet.ora ` file or in a :ref:`Connect Descriptor +`, for example when using a :ref:`tnsnames.ora ` +file:: + + db_alias = + (DESCRIPTION = + (ADDRESS=(PROTOCOL=TCPS)(PORT=1522)(HOST=xxx.oraclecloud.com)) + (CONNECT_DATA=(SERVICE_NAME=xxx.adb.oraclecloud.com)) + (SECURITY = + (SSL_SERVER_CERT_DN="CN=xxx.oraclecloud.com, \ + O=Oracle Corporation,L=Redwood City,ST=California,C=US") + (TOKEN_AUTH=OCI_TOKEN) + (TOKEN_LOCATION="/path/to/token/folder") + ) + ) + +The ``TOKEN_AUTH`` and ``TOKEN_LOCATION`` values in a connection string take +precedence over the ``sqlnet.ora`` settings. + +Standalone connection example: + +.. code-block:: python + + connection = oracledb.connect(dsn=db_alias, externalauth=True) + +Connection pool example: + +.. code-block:: python + + pool = oracledb.create_pool(dsn=db_alias, externalauth=True, + homogeneous=False, min=1, max=2, increment=1) + + connection = pool.acquire() + +.. _cloudnativeauthoci: + +OCI Cloud Native Authentication with the oci_tokens Plugin +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +With Cloud Native Authentication, python-oracledb's :ref:`oci_tokens +` plugin can automatically generate and refresh OCI +IAM tokens when required with the support of the `Oracle Cloud Infrastructure +(OCI) Software Development Kit (SDK) +`__. + +The :ref:`oci_tokens ` plugin can be imported +like: + +.. code-block:: python + + import oracledb.plugins.oci_tokens + +The plugin has a Python package dependency which needs to be installed +separately before the plugin can be used, see :ref:`ocitokenmodules`. + +The ``oci_tokens`` plugin defines and registers a :ref:`parameter hook +` function which uses the connection parameter +``extra_auth_params`` passed to :meth:`oracledb.connect()`, +:meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, or +:meth:`oracledb.create_pool_async()`. Using this parameter's values, the hook +function sets the ``access_token`` parameter of a :ref:`ConnectParams object +` to a callable which generates an OCI IAM token. Python-oracledb +then acquires and uses a token to transparently complete connection or pool +creation calls. + +For OCI Cloud Native Authentication connection and pool creation, the +``extra_auth_params`` parameter should be a dictionary with keys as shown in +the following table. + +.. list-table-with-summary:: OCI Cloud Native Authentication Configuration Keys + :header-rows: 1 + :class: wy-table-responsive + :widths: 10 25 15 + :name: _oci_configuration_parameters + :summary: The first column displays the name of the dictionary key. The second column displays its description. The third column displays whether the attribute is required or optional. + + * - Key + - Description + - Required or Optional + * - ``auth_type`` + - The authentication type. The value should be the string "ConfigFileAuthentication", "SimpleAuthentication", or "InstancePrincipal". + + With Configuration File Authentication, the location of a configuration file containing the necessary information must be provided. By default, this file is located at */home/username/.oci/config*, unless a custom location is specified during OCI IAM setup. + + With Simple Authentication, the individual configuration parameters can be provided at runtime. + + With Instance Principal Authentication, OCI compute instances can be authorized to access services on Oracle Cloud such as Oracle Autonomous Database. Python-oracledb applications running on such a compute instance are automatically authenticated, eliminating the need to provide database user credentials. This authentication method will only work on compute instances where internal network endpoints are reachable. See :ref:`instanceprincipalauth`. + + See `OCI SDK Authentication Methods `__ for more information. + - Required + * - ``user`` + - The Oracle Cloud Identifier (OCID) of the user invoking the API. For example, *ocid1.user.oc1..*. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication". + - Required + * - ``key_file`` + - The full path and filename of the private key. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication". + - Required + * - ``fingerprint`` + - The fingerprint associated with the public key that has been added to this user. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication". + - Required + * - ``tenancy`` + - The OCID of your tenancy. For example, *ocid1.tenancy.oc1..*. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication". + - Required + * - ``region`` + - The Oracle Cloud Infrastructure region. For example, *ap-mumbai-1*. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication". + - Required + * - ``profile`` + - The configuration profile name to load. + + Multiple profiles can be created, each with distinct values for necessary parameters. If not specified, the DEFAULT profile is used. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication" or "ConfigFileAuthentication". If it is not specified when using "ConfigFileAuthentication", the default value is taken. + - Required + * - ``file_location`` + - The configuration file location. The default value is *~/.oci/config*. + + This parameter can be specified when the value of the ``auth_type`` key is "ConfigFileAuthentication". + - Optional + * - ``scope`` + - This parameter identifies all databases in the cloud tenancy of the authenticated user. The default value is *urn:oracle:db::id::**. + + A scope that authorizes access to all databases within a compartment has the format *urn:oracle:db::id::*, for example, urn:oracle:db::id::ocid1.compartment.oc1..xxxxxxxx. + + A scope that authorizes access to a single database within a compartment has the format *urn:oracle:db::id::::*, for example, urn:oracle:db::id::ocid1.compartment.oc1..xxxxxx::ocid1.autonomousdatabase.oc1.phx.xxxxxx. + + This parameter can be specified when the value of the ``auth_type`` key is "SimpleAuthentication", "ConfigFileAuthentication", or "InstancePrincipal". + - Optional + +All keys and values other than ``auth_type`` are used by the `OCI SDK +`__ API +calls in the plugin. The plugin implementation can be seen in +`plugins/oci_tokens.py +`__. + +For information on the OCI specific configuration parameters, see `OCI SDK +`__. + +The examples in the subsequent sections use the :ref:`oci_tokens +` plugin to generate OCI IAM tokens to connect to +Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. + +**Standalone Connections in Thin Mode Using OCI IAM Tokens** + +When using the :ref:`oci_tokens ` plugin to generate +OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, you need +to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.connect`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. For example: + +.. code:: python + + import oracledb.plugins.oci_tokens + + token_based_auth = { # OCI specific configuration + "auth_type": "ConfigFileAuthentication", # parameters to be set when using + "profile": , # the oci_tokens plugin with + "file_location": , # configuration file authentication + } + + connection = oracledb.connect( + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp, + extra_auth_params=token_based_auth) + +**Connection Pools in Thin Mode Using OCI IAM Tokens** + +When using the :ref:`oci_tokens ` plugin to generate +OCI IAM tokens to connect to Oracle Autonomous Database in Thin mode, you need +to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: + +.. code:: python + + import oracledb.plugins.oci_tokens + + token_based_auth = { + "auth_type": "SimpleAuthentication", # OCI specific configuration + "user": , # parameters to be set when using + "key_file": , # the oci_tokens plugin with + "fingerprint": , # simple authentication + "tenancy": , + "region": , + "profile": + } + + connection = oracledb.create_pool( + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + homogeneous=true, # must always be True for connection pools + wallet_location="location_of_pem_file", + wallet_password=wp, + extra_auth_params=token_based_auth) + +**Standalone Connections in Thick Mode Using OCI IAM Tokens** + +When using the :ref:`oci_tokens ` plugin to generate +OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, you need +to explicitly set the ``externalauth`` and ``extra_auth_params`` parameters of +:func:`oracledb.connect`. For example: + +.. code:: python + + import oracledb.plugins.oci_tokens + + token_based_auth = { + "auth_type": "SimpleAuthentication", # OCI specific configuration + "user": , # parameters to be set when using + "key_file": , # the oci_tokens plugin with + "fingerprint": , # simple authentication + "tenancy": , + "region": , + "profile": + } + connection = oracledb.connect( + externalauth=True, + dsn=mydb_low, + extra_auth_params=token_based_auth) + +**Connection Pools in Thick Mode Using OCI IAM Tokens** + +When using the :ref:`oci_tokens ` plugin to generate +OCI IAM tokens to connect to Oracle Autonomous Database in Thick mode, you need +to explicitly set the ``extra_auth_params`` and ``externalauth`` parameters of +:func:`~oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(its default value). For example: + +.. code:: python + + import oracledb.plugins.oci_tokens + + token_based_auth = { # OCI specific configuration + "auth_type": "ConfigFileAuthentication", # parameters to be set when using + "profile": , # the oci_tokens plugin with + "file_location": , # configuration file authentication + } + + connection = oracledb.create_pool( + externalauth=True, # must always be True in Thick mode + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, + extra_auth_params=token_based_auth) + +.. _oauth2: + +OAuth 2.0 Token-Based Authentication +------------------------------------ + +Oracle Cloud Infrastructure (OCI) users can be centrally managed in a Microsoft +Entra ID (formerly Microsoft Azure Active Directory) service. Open +Authorization (OAuth 2.0) token-based authentication allows users to +authenticate to Oracle Database using Entra ID OAuth2 tokens. Ensure that you +have a Microsoft Azure account and your Oracle Database is registered with +Microsoft Entra ID. See `Configuring the Oracle Database for Microsoft Entra +ID Integration `_ for more information. Both Thin +and Thick modes of the python-oracledb driver support OAuth 2.0 token-based +authentication. + +When using python-oracledb in Thick mode, Oracle Client libraries 19.15 (or +later), or 21.7 (or later) are needed. + +Standalone connections and pooled connections can be created in python-oracledb +Thick and Thin modes using OAuth 2.0 token-based authentication. This can be +done or by using a class such as the example :ref:`TokenHandlerOAuth Class +` or by using python-oracledb's :ref:`Azure Cloud Native +Authentication Plugin (azure_tokens) `. Tokens can be +specified using the connection parameter introduced in python-oracledb 1.1. +Users of earlier python-oracledb versions can alternatively use :ref:`OAuth 2.0 +Token-Based Authentication Connection Strings `. + +OAuth2 Token Generation And Extraction +++++++++++++++++++++++++++++++++++++++ + +There are different ways to retrieve Entra ID OAuth2 tokens. You can use +python-oracledb's :ref:`azure_tokens ` plugin to generate +tokens. Some of the other ways to retrieve OAuth2 tokens are detailed in +`Examples of Retrieving Entra ID OAuth2 Tokens `_. You +can also retrieve Entra ID OAuth2 tokens by using `Azure Identity client +library for Python `_. + +.. _oauthhandler: + +**Example of Generating an OAuth2 Token** + +An example of automating the process of generating and reading Entra ID OAuth2 +tokens is: + +.. code:: python + + import json + import os + + import oracledb + import requests + + class TokenHandlerOAuth: + + def __init__(self, + file_name="cached_token_file_name", + api_key="api_key", + client_id="client_id", + client_secret="client_secret"): + self.token = None + self.file_name = file_name + self.url = \ + f"https://login.microsoftonline.com/{api_key}/oauth2/v2.0/token" + self.scope = \ + f"https://oracledevelopment.onmicrosoft.com/{client_id}/.default" + if os.path.exists(file_name): + with open(file_name) as f: + self.token = f.read().strip() + self.api_key = api_key + self.client_id = client_id + self.client_secret = client_secret + + def __call__(self, refresh): + if self.token is None or refresh: + post_data = dict(client_id=self.client_id, + grant_type="client_credentials", + scope=self.scope, + client_secret=self.client_secret) + r = requests.post(url=self.url, data=post_data) + result = json.loads(r.text) + self.token = result["access_token"] + with open(self.file_name, "w") as f: + f.write(self.token) + return self.token + +The TokenHandlerOAuth class uses a callable to generate and read OAuth2 +tokens. When the callable in the TokenHandlerOAuth class is invoked for the +first time to create a standalone connection or pool, the ``refresh`` parameter +is *False* which allows the callable to return a cached token, if desired. The +expiry date is then extracted from this token and compared with the current +date. If the token has not expired, then it will be used directly. If the token +has expired, the callable is invoked the second time with the ``refresh`` +parameter set to *True*. + +The TokenHandlerOAuth class defined here is used in the examples shown in +:ref:`conncreationoauth2`. + +**Example of Using a Curl Command** + +See using a :ref:`curl ` command for an alternative way to generate the +tokens. + +.. _conncreationoauth2: + +Connection Creation with OAuth2 Access Tokens ++++++++++++++++++++++++++++++++++++++++++++++ + +For OAuth 2.0 Token-Based Authentication using a class such as the sample +:ref:`TokenHandlerOAuth class `, the ``access_token`` connection +parameter must be specified. This parameter should be a string (or a callable +that returns a string) specifying an Entra ID OAuth2 token. In the examples +used below, the ``access_token`` parameter is set to a callable. + +The examples used in the subsequent sections use the +:ref:`TokenHandlerOAuth class ` to generate OAuth2 tokens to +connect to Oracle Autonomous Database with mutual TLS (mTLS). See +:ref:`autonomousdb`. + +**Standalone Connections in Thin Mode Using OAuth2 Tokens** + +When using a class such as the :ref:`TokenHandlerOAuth class ` to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token``, and also any desired +``config_dir``, ``wallet_location``, and ``wallet_password`` parameters of +:func:`~oracledb.connect`. For example: + +.. code:: python + + connection = oracledb.connect( + access_token=TokenHandlerOAuth(), + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp) + +**Connection Pools in Thin Mode Using OAuth2 Tokens** + +When using a class such as the :ref:`TokenHandlerOAuth class ` to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``access_token`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: + +.. code:: python + + connection = oracledb.create_pool( + access_token=TokenHandlerOAuth(), + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp + min=1, max=5, increment=2) + +Note that the ``access_token`` parameter should be set to a callable. This is +useful when the connection pool needs to expand and create new connections but +the current token has expired. In such a case, the callable should return a +string specifying the new, valid Entra ID OAuth2 token. + +**Standalone Connections Thick Mode Using OAuth2 Tokens** + +When using a class such as the :ref:`TokenHandlerOAuth class ` +to generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick +mode, you need to explicitly set the ``access_token`` and ``externalAuth`` +parameters of :func:`~oracledb.connect`. For example: + +.. code:: python + + connection = oracledb.connect( + access_token=TokenHandlerOAuth(), + externalauth=True, # must always be True in Thick mode + dsn=mydb_low) + +**Connection Pools in Thick Mode Using OAuth2 Tokens** + +When using a class such as the :ref:`TokenHandlerOAuth class ` to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``access_token`` and ``externalauth`` parameters +of :func:`~oracledb.create_pool`. The ``homogeneous`` parameter must be *True* +(which is its default value). For example: + +.. code:: python + + pool = oracledb.create_pool( + access_token=TokenHandlerOAuth(), + externalauth=True, # must always be True in Thick mode + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, min=1, max=5, increment=2) + +Note that the ``access_token`` parameter should be set to a callable. This is +useful when the connection pool needs to expand and create new connections but +the current token has expired. In such a case, the callable should return a +string specifying the new, valid Entra ID OAuth2 token. + +.. _oauth2connstr: + +OAuth 2.0 Token-Based Authentication Connection Strings ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +The connection string used by python-oracledb can specify the directory where +the token file is located. This syntax is usable with older versions of +python-oracledb. However, it is recommended to use connection parameters +introduced in python-oracledb 1.1 instead. See +:ref:`OAuth 2.0 Token-Based Authentication`. + +.. note:: + + OAuth 2.0 Token-Based Authentication Connection Strings is only supported + in python-oracledb Thick mode. See :ref:`enablingthick`. + +There are different ways to retrieve Entra ID OAuth2 tokens. Some of the ways to +retrieve OAuth2 tokens are detailed in `Examples of Retrieving Entra ID OAuth2 +Tokens `_. You can also retrieve Entra ID OAuth2 +tokens by using `Azure Identity client library for Python +`_. + +.. _curl: + +**Example of Using a Curl Command** + +Here, as an example, we are using Curl with a Resource Owner +Password Credential (ROPC) Flow, that is, a ``curl`` command is used against +the Entra ID API to get the Entra ID OAuth2 token:: + + curl -X POST -H 'Content-Type: application/x-www-form-urlencoded' + https://login.microsoftonline.com/your_tenant_id/oauth2/v2.0/token + -d 'client_id=your_client_id' + -d 'grant_type=client_credentials' + -d 'scope=https://oracledevelopment.onmicrosoft.com/your_client_id/.default' + -d 'client_secret=your_client_secret' + +This command generates a JSON response with token type, expiration, and access +token values. The JSON response needs to be parsed so that only the access +token is written and stored in a file. You can save the value of +``access_token`` generated to a file and set ``TOKEN_LOCATION`` to the location +of token file. See :ref:`TokenHandlerOAuth class ` for an example +of generating tokens. + +The Oracle Net parameters ``TOKEN_AUTH`` and ``TOKEN_LOCATION`` must be set when +you are using the connection string syntax. Also, the ``PROTOCOL`` +parameter must be ``tcps`` and ``SSL_SERVER_DN_MATCH`` should be ``ON``. + +You can set ``TOKEN_AUTH=OAUTH``. There is no default location set in this +case, so you must set ``TOKEN_LOCATION`` to either of the following: + +* A directory, in which case, you must create a file named ``token`` which + contains the token value +* A fully qualified file name, in which case, you must specify the entire path + of the file which contains the token value + +You can either set ``TOKEN_AUTH`` and ``TOKEN_LOCATION`` in a :ref:`sqlnet.ora +` file or alternatively, you can specify it inside a :ref:`Connect +Descriptor `, for example when using a :ref:`tnsnames.ora +` file:: + + db_alias = + (DESCRIPTION = + (ADDRESS=(PROTOCOL=TCPS)(PORT=1522)(HOST=xxx.oraclecloud.com)) + (CONNECT_DATA=(SERVICE_NAME=xxx.adb.oraclecloud.com)) + (SECURITY = + (SSL_SERVER_CERT_DN="CN=xxx.oraclecloud.com, \ + O=Oracle Corporation,L=Redwood City,ST=California,C=US") + (TOKEN_AUTH=OAUTH) + (TOKEN_LOCATION="/home/user1/mytokens/oauthtoken") + ) + ) + +The ``TOKEN_AUTH`` and ``TOKEN_LOCATION`` values in a connection string take +precedence over the ``sqlnet.ora`` settings. + +Standalone connection example: + +.. code-block:: python + + connection = oracledb.connect(dsn=db_alias, externalauth=True) + +Connection pool example: + +.. code-block:: python + + pool = oracledb.create_pool(dsn=db_alias, externalauth=True, + homogeneous=False, min=1, max=2, increment=1) + + connection = pool.acquire() + +.. _cloudnativeauthoauth: + +Azure Cloud Native Authentication with the azure_tokens Plugin +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + +With Cloud Native Authentication, python-oracledb's :ref:`azure_tokens +` plugin can automatically generate and refresh +OAuth2 tokens when required with the support of the `Microsoft Authentication +Library (MSAL) `__. + +The :ref:`azure_tokens ` plugin can be imported +like: + +.. code-block:: python + + import oracledb.plugins.azure_tokens + +The plugin has a Python package dependency which needs to be installed +separately before the plugin can be used, see :ref:`azuretokenmodules`. + +The ``azure_tokens`` plugin defines and registers a :ref:`parameter hook +` function which uses the connection parameter +``extra_auth_params`` passed to :meth:`oracledb.connect()`, +:meth:`oracledb.create_pool()`, :meth:`oracledb.connect_async()`, or +:meth:`oracledb.create_pool_async()`. Using this parameter's values, the hook +function sets the ``access_token`` parameter of a :ref:`ConnectParams object +` to a callable which generates an OAuth2 token. Python-oracledb +then acquires and uses a token to transparently complete connection or pool +creation calls. + +For OAuth 2.0 Token-Based Authentication connection and pool creation, the +``extra_auth_params`` parameter should be a dictionary with keys as shown in +the following table. + +.. list-table-with-summary:: Azure Cloud Native Authentication Configuration Keys + :header-rows: 1 + :class: wy-table-responsive + :widths: 10 30 10 + :name: _azure_configuration_parameters + :summary: The first column displays the dictionary key. The second column displays the description of the key. The third column displays whether the parameter is required or optional. + + * - Key + - Description + - Required or Optional + * - ``auth_type`` + - The authentication type. + + This must be the string "AzureServicePrincipal". This type makes the plugin acquire Azure service principal access tokens through a client credential flow. + - Required + * - ``authority`` + - This parameter must be set as a string in the URI format with the tenant ID, for example ``https://{identity provider instance}/{tenantId}``. + + The tenantId is the directory tenant against which the application operates, in either GUID or domain-name format. + - Required + * - ``client_id`` + - The application ID that is assigned to your application. + + This information can be found in the portal where the application was registered. + - Required + * - ``client_credential`` + - The client secret that was generated for your application in the application registration portal. + - Required + * - ``scopes`` + - This parameter represents the value of the scope for the request. + + It should be the resource identifier (application ID URI) of the desired resource, with the suffix ".default". For example, ``https://{uri}/clientID/.default``. + - Required + +All keys and values other than ``auth_type`` are used by the `Microsoft +Authentication Library (MSAL) `__ API calls in the plugin. The plugin +implementation can be seen in `plugins/azure_tokens.py +`__. + +For information on the Azure specific configuration parameters, see `MSAL +`__. + +The examples in the subsequent sections use the :ref:`azure_tokens +` plugin to generate OAuth2 tokens to connect to +Oracle Autonomous Database with mutual TLS (mTLS). See :ref:`autonomousdb`. + +**Standalone Connections in Thin Mode Using OAuth2 Tokens** + +When using the :ref:`azure_tokens ` plugin to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``extra_auth_params`` parameter, and also any +required ``config_dir``, ``wallet_location``, and ``wallet_password`` +parameters of :func:`~oracledb.connect`. For example: + +.. code:: python + + import oracledb.plugins.azure_tokens + + token_based_auth = { + "auth_type": "AzureServicePrincipal", # Azure specific configuration + "authority": , # parameters to be set when using + "client_id": , # the azure_tokens plugin + "client_credential": , + "scopes": + } + + connection = oracledb.connect( + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + wallet_location="location_of_pem_file", + wallet_password=wp, + extra_auth_params=token_based_auth) + +**Connection Pools in Thin Mode Using OAuth2 Tokens** + +When using the :ref:`azure_tokens ` plugin to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thin mode, +you need to explicitly set the ``extra_auth_params`` parameter of +:func:`~oracledb.create_pool`, and also any desired ``config_dir``, +``wallet_location``, and ``wallet_password`` parameters. The ``homogeneous`` +parameter must be *True* (its default value). For example: + +.. code:: python + + import oracledb.plugins.azure_tokens + + token_based_auth = { + "auth_type": "AzureServicePrincipal", # Azure specific configuration + "authority": , # parameters to be set when using + "client_id": , # the azure_tokens plugin + "client_credential": , + "scopes": + } + + connection = oracledb.create_pool( + dsn=mydb_low, + config_dir="path_to_unzipped_wallet", + homogeneous=true, # must always be True for connection pools + wallet_location="location_of_pem_file", + wallet_password=wp, + extra_auth_params=token_based_auth) + +**Standalone Connections Thick Mode Using OAuth2 Tokens** + +When using the :ref:`azure_tokens ` plugin to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``extra_auth_params`` and ``externalauth`` +parameters of :func:`~oracledb.connect`. For example: + +.. code:: python + + import oracledb.plugins.azure_tokens + + token_based_auth = { + "auth_type": "AzureServicePrincipal", # Azure specific configuration + "authority": , # parameters to be set when using + "client_id": , # the azure_tokens plugin + "client_credential": , + "scopes": + } + + connection = oracledb.connect( + externalauth=True, # must always be True in Thick mode + dsn=mydb_low, + extra_auth_params=token_based_auth) + +**Connection Pools in Thick Mode Using OAuth2 Tokens** + +When using the :ref:`azure_tokens ` plugin to +generate OAuth2 tokens to connect to Oracle Autonomous Database in Thick mode, +you need to explicitly set the ``extra_auth_params`` and ``externalauth`` +parameters of :func:`~oracledb.create_pool`. The ``homogeneous`` parameter must +be *True* (its default value). For example: + +.. code:: python + + import oracledb.plugins.azure_tokens + + token_based_auth = { + "auth_type": "AzureServicePrincipal", # Azure specific configuration + "authority": , # parameters to be set when using + "client_id": , # the azure_tokens plugin + "client_credential": , + "scopes": + } + + connection = oracledb.create_pool( + externalauth=True, # must always be True in Thick mode + homogeneous=True, # must always be True for connection pools + dsn=mydb_low, + extra_auth_params=token_based_auth) + +.. _instanceprincipalauth: + +Instance Principal Authentication +================================= + +With Instance Principal Authentication, Oracle Cloud Infrastructure (OCI) +compute instances can be authorized to access services on Oracle Cloud such as +Oracle Autonomous Database. Python-oracledb applications running on such a +compute instance do not need to provide database user credentials. + +Each compute instance behaves as a distinct type of Identity and Access +Management (IAM) Principal, that is, each compute instance has a unique +identity in the form of a digital certificate which is managed by OCI. When +using Instance Principal Authentication, a compute instance authenticates with +OCI IAM using this identity and obtains a short-lived token. This token is +then used to access Oracle Cloud services without storing or managing any +secrets in your application. + +The example below demonstrates how to connect to Oracle Autonomous +Database using Instance Principal authentication. To enable this, use +python-oracledb's :ref:`oci_tokens ` plugin which +is pre-installed with the ``oracledb`` module. + +**Step 1: Create an OCI Compute Instance** + +An `OCI compute instance `__ is a virtual machine running +within OCI that provides compute resources for your application. This compute +instance will be used to authenticate access to Oracle Cloud services when +using Instance Principal Authentication. + +To create an OCI compute instance, see the steps in `Creating an Instance +`__ section of the Oracle Cloud Infrastructure +documentation. + +For more information on OCI compute instances, see `Calling Services from a +Compute Instance `__. + +**Step 2: Install the OCI CLI on your compute instance** + +The `OCI Command Line Interface (CLI) `__ that can be used on its own or with +the Oracle Cloud console to complete OCI tasks. + +To install the OCI CLI on your compute instance, see the installation +instructions in the `Installing the CLI `__ section of Oracle Cloud Infrastructure +documentation. + +**Step 3: Create a Dynamic Group** + +A Dynamic Group is used to define rules to group the compute instances that +require access. + +To create a dynamic group using the Oracle Cloud console, see the steps in the +`To create a dynamic group `__ section of the Oracle Cloud +Infrastructure documentation. + +**Step 4: Create an IAM Policy** + +An IAM Policy is used to grant a dynamic group permission to access the +required OCI services such as Oracle Autonomous Database. If the scope is not +set, the policy should be for the specified tenancy. + +To create an IAM policy using Oracle Cloud console, see the steps in the +`Create an IAM Policy `__ section of the Oracle Cloud +Infrastructure documentation. + +**Step 5: Map an Instance Principal to an Oracle Database User** + +You must map the Instance Principal to an Oracle Database user. For more +information, see `Accessing the Database Using an Instance Principal +`__. + +Also, make sure that external authentication is enabled on Oracle ADB and +Oracle Database parameter ``IDENTITY_PROVIDER_TYPE`` is set to *OCI_IAM*. For +the steps, see `Enable IAM Authentication on ADB `__. + +**Step 6: Deploy your application on the Compute Instance** + +To use Instance Principal authentication, set ``extra_auth_params`` when +creating a standalone connection or a connection pool. The defined IAM policy +must allow access according to the specified scope. For information on the +keys of the ``extra_auth_params`` parameter, see +:ref:`_oci_configuration_parameters`. + +An example of connecting using Instance Principal: + +.. code-block:: python + + import oracledb + import oracledb.plugins.oci_tokens + + token_based_auth = { + "auth_type": "InstancePrincipal" + } + + connection = oracledb.connect( + dsn=mydb_low, + extra_auth_params=token_based_auth + ) + +.. _configproviderauthmethods: + +Authentication Methods for Centralized Configuration Providers +============================================================== + +You may need to provide authentication methods to access a centralized +configuration provider. The authentication methods for the following +centralized configuration providers are detailed in this section: + +- :ref:`OCI Object Storage Centralized Configuration Provider + ` + +- :ref:`Azure App Centralized Configuration Provider ` + +.. _ociobjectstorageauthmethods: + +OCI Object Storage Configuration Provider Authentication Methods +---------------------------------------------------------------- + +An Oracle Cloud Infrastructure (OCI) authentication method can be used to +access the OCI Object Storage centralized configuration provider. The +authentication methood can be set in the ``