diff --git a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/ConflictWorker.py b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/ConflictWorker.py index 6bdc3b2e6cc7..b79d29648f68 100644 --- a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/ConflictWorker.py +++ b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/ConflictWorker.py @@ -64,57 +64,57 @@ def initialize_async(self): udp_collection = self.try_create_document_collection(create_client, database, udp_collection) lww_sproc = {'id':'resolver', - 'body': "function resolver(incomingRecord, existingRecord, isTombstone, conflictingRecords) {\r\n" + - " var collection = getContext().getCollection();\r\n" + - "\r\n" + - " if (!incomingRecord) {\r\n" + - " if (existingRecord) {\r\n" + - "\r\n" + - " collection.deleteDocument(existingRecord._self, {}, function(err, responseOptions) {\r\n" + - " if (err) throw err;\r\n" + - " });\r\n" + - " }\r\n" + - " } else if (isTombstone) {\r\n" + - " // delete always wins.\r\n" + - " } else {\r\n" + - " var documentToUse = incomingRecord;\r\n" + - "\r\n" + - " if (existingRecord) {\r\n" + - " if (documentToUse.regionId < existingRecord.regionId) {\r\n" + - " documentToUse = existingRecord;\r\n" + - " }\r\n" + - " }\r\n" + - "\r\n" + - " var i;\r\n" + - " for (i = 0; i < conflictingRecords.length; i++) {\r\n" + - " if (documentToUse.regionId < conflictingRecords[i].regionId) {\r\n" + - " documentToUse = conflictingRecords[i];\r\n" + - " }\r\n" + - " }\r\n" + - "\r\n" + - " tryDelete(conflictingRecords, incomingRecord, existingRecord, documentToUse);\r\n" + - " }\r\n" + - "\r\n" + - " function tryDelete(documents, incoming, existing, documentToInsert) {\r\n" + - " if (documents.length > 0) {\r\n" + - " collection.deleteDocument(documents[0]._self, {}, function(err, responseOptions) {\r\n" + - " if (err) throw err;\r\n" + - "\r\n" + - " documents.shift();\r\n" + - " tryDelete(documents, incoming, existing, documentToInsert);\r\n" + - " });\r\n" + - " } else if (existing) {\r\n" + - " collection.replaceDocument(existing._self, documentToInsert,\r\n" + - " function(err, documentCreated) {\r\n" + - " if (err) throw err;\r\n" + - " });\r\n" + - " } else {\r\n" + - " collection.createDocument(collection.getSelfLink(), documentToInsert,\r\n" + - " function(err, documentCreated) {\r\n" + - " if (err) throw err;\r\n" + - " });\r\n" + - " }\r\n" + - " }\r\n" + + 'body': "function resolver(incomingRecord, existingRecord, isTombstone, conflictingRecords) {\r\n" + + " var collection = getContext().getCollection();\r\n" + + "\r\n" + + " if (!incomingRecord) {\r\n" + + " if (existingRecord) {\r\n" + + "\r\n" + + " collection.deleteDocument(existingRecord._self, {}, function(err, responseOptions) {\r\n" + + " if (err) throw err;\r\n" + + " });\r\n" + + " }\r\n" + + " } else if (isTombstone) {\r\n" + + " // delete always wins.\r\n" + + " } else {\r\n" + + " var documentToUse = incomingRecord;\r\n" + + "\r\n" + + " if (existingRecord) {\r\n" + + " if (documentToUse.regionId < existingRecord.regionId) {\r\n" + + " documentToUse = existingRecord;\r\n" + + " }\r\n" + + " }\r\n" + + "\r\n" + + " var i;\r\n" + + " for (i = 0; i < conflictingRecords.length; i++) {\r\n" + + " if (documentToUse.regionId < conflictingRecords[i].regionId) {\r\n" + + " documentToUse = conflictingRecords[i];\r\n" + + " }\r\n" + + " }\r\n" + + "\r\n" + + " tryDelete(conflictingRecords, incomingRecord, existingRecord, documentToUse);\r\n" + + " }\r\n" + + "\r\n" + + " function tryDelete(documents, incoming, existing, documentToInsert) {\r\n" + + " if (documents.length > 0) {\r\n" + + " collection.deleteDocument(documents[0]._self, {}, function(err, responseOptions) {\r\n" + + " if (err) throw err;\r\n" + + "\r\n" + + " documents.shift();\r\n" + + " tryDelete(documents, incoming, existing, documentToInsert);\r\n" + + " });\r\n" + + " } else if (existing) {\r\n" + + " collection.replaceDocument(existing._self, documentToInsert,\r\n" + + " function(err, documentCreated) {\r\n" + + " if (err) throw err;\r\n" + + " });\r\n" + + " } else {\r\n" + + " collection.createDocument(collection.getSelfLink(), documentToInsert,\r\n" + + " function(err, documentCreated) {\r\n" + + " if (err) throw err;\r\n" + + " });\r\n" + + " }\r\n" + + " }\r\n" + "}" } try: @@ -205,7 +205,7 @@ def run_update_conflict_on_manual_async(self): conflict_document_for_insertion = {'id': id, 'regionId': 0, 'regionEndpoint': self.clients[0].ReadEndpoint} conflict_document_for_insertion = self.try_insert_document(self.clients[0], self.manual_collection_link, conflict_document_for_insertion) time.sleep(1) #1 Second for write to sync. - + print("1) Performing conflicting update across %d regions on %s" % (len(self.clients), self.manual_collection_link)); i = 0 @@ -243,7 +243,7 @@ def run_delete_conflict_on_manual_async(self): conflict_document_for_insertion = {'id': id, 'regionId': 0, 'regionEndpoint': self.clients[0].ReadEndpoint} conflict_document_for_insertion = self.try_insert_document(self.clients[0], self.manual_collection_link, conflict_document_for_insertion) time.sleep(1) #1 Second for write to sync. - + print("1) Performing conflicting delete across %d regions on %s" % (len(self.clients), self.manual_collection_link)); i = 0 @@ -375,7 +375,7 @@ def run_delete_conflict_on_LWW_async(self): def run_insert_conflict_on_UDP_async(self): while True: print("1) Performing conflicting insert across 3 regions on %s" % self.udp_collection_link) - + id = str(uuid.uuid4()) i = 0 pool = ThreadPool(processes = len(self.clients)) @@ -587,7 +587,7 @@ def validate_LWW_async_internal(self, client, conflict_document, has_delete_conf options = {'partitionKey': conflict_document[0]['id']} client.ReadItem(conflict_document[0]['_self'], options) - self.trace_error("Delete conflict for document %s didnt win @ %s" % + self.trace_error("Delete conflict for document %s didnt win @ %s" % (conflict_document[0]['id'], client.ReadEndpoint)) time.sleep(0.5) @@ -614,7 +614,7 @@ def validate_LWW_async_internal(self, client, conflict_document, has_delete_conf existing_document = client.ReadItem(winner_document['_self'], options) if int(existing_document['regionId']) == int(winner_document['regionId']): - print("Winner document from region %d found at %s" % + print("Winner document from region %d found at %s" % (int(existing_document['regionId']), client.ReadEndpoint)) break else: @@ -623,9 +623,9 @@ def validate_LWW_async_internal(self, client, conflict_document, has_delete_conf time.sleep(0.5) except exceptions.AzureError as e: - self.trace_error("Winner document from region %d is not found @ %s, retrying..." % + self.trace_error("Winner document from region %d is not found @ %s, retrying..." % (int(winner_document["regionId"]), client.WriteEndpoint)) - + time.sleep(0.5) def validate_UDP_async(self, clients, conflict_document, has_delete_conflict): @@ -677,7 +677,7 @@ def validate_UDP_async_internal(self, client, conflict_document, has_delete_conf existing_document = client.ReadItem(self.udp_collection_link + "/docs/" + winner_document['id'], options) if int(existing_document['regionId']) == int(winner_document['regionId']): - print("Winner document from region %d found at %s" % + print("Winner document from region %d found at %s" % (int(existing_document["regionId"]), client.ReadEndpoint)) break else: diff --git a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/MultiMasterScenario.py b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/MultiMasterScenario.py index 453f8caa9fe9..c834dafe282e 100644 --- a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/MultiMasterScenario.py +++ b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/MultiMasterScenario.py @@ -48,7 +48,7 @@ def run_basic_async(self): print("####################################################") print("1) Starting insert loops across multiple regions ...") - + documents_to_insert_per_worker = 100 run_loop_futures = [] diff --git a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/Worker.py b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/Worker.py index c8d1dc1640bf..26ba54c91dc5 100644 --- a/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/Worker.py +++ b/sdk/cosmos/azure-cosmos/samples/MultiMasterOperations/Worker.py @@ -19,7 +19,7 @@ def run_loop_async(self, documents_to_insert): start = int(round(time.time() * 1000)) self.client.CreateItem(self.document_collection_link, document) end = int(round(time.time() * 1000)) - + latency.append(end - start) latency = sorted(latency) diff --git a/sdk/cosmos/azure-cosmos/samples/change_feed_management.py b/sdk/cosmos/azure-cosmos/samples/change_feed_management.py index 42427fb3dce7..e12d27db0994 100644 --- a/sdk/cosmos/azure-cosmos/samples/change_feed_management.py +++ b/sdk/cosmos/azure-cosmos/samples/change_feed_management.py @@ -8,12 +8,12 @@ import config # ---------------------------------------------------------------------------------------------------------- -# Prerequistes - -# -# 1. An Azure Cosmos account - +# Prerequistes - +# +# 1. An Azure Cosmos account - # https:#azure.microsoft.com/en-us/documentation/articles/documentdb-create-account/ # -# 2. Microsoft Azure Cosmos PyPi package - +# 2. Microsoft Azure Cosmos PyPi package - # https://pypi.python.org/pypi/azure-cosmos/ # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates how to consume the Change Feed and iterate on the results. @@ -85,7 +85,7 @@ def run_sample(): except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) - + finally: print("\nrun_sample done") diff --git a/sdk/cosmos/azure-cosmos/samples/collection_management.py b/sdk/cosmos/azure-cosmos/samples/container_management.py similarity index 81% rename from sdk/cosmos/azure-cosmos/samples/collection_management.py rename to sdk/cosmos/azure-cosmos/samples/container_management.py index 326fc8a6f1b8..9f0177f82b14 100644 --- a/sdk/cosmos/azure-cosmos/samples/collection_management.py +++ b/sdk/cosmos/azure-cosmos/samples/container_management.py @@ -5,29 +5,29 @@ import config # ---------------------------------------------------------------------------------------------------------- -# Prerequistes - -# -# 1. An Azure Cosmos account - +# Prerequistes - +# +# 1. An Azure Cosmos account - # https://azure.microsoft.com/en-us/documentation/articles/documentdb-create-account/ # -# 2. Microsoft Azure Cosmos PyPi package - +# 2. Microsoft Azure Cosmos PyPi package - # https://pypi.python.org/pypi/azure-cosmos/ # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates the basic CRUD operations on a Container resource for Azure Cosmos -# +# # 1. Query for Container -# +# # 2. Create Container # 2.1 - Basic Create # 2.2 - Create container with custom IndexPolicy -# 2.3 - Create container with offer throughput set +# 2.3 - Create container with provisioned throughput set # 2.4 - Create container with unique key -# 2.5 - Create Collection with partition key V2 -# 2.6 - Create Collection with partition key V1 +# 2.5 - Create Container with partition key V2 +# 2.6 - Create Container with partition key V1 # -# 3. Manage Container Offer Throughput -# 3.1 - Get Container performance tier -# 3.2 - Change performance tier +# 3. Manage Container Provisioned Throughput +# 3.1 - Get Container provisioned throughput (RU/s) +# 3.2 - Change provisioned throughput (RU/s) # # 4. Get a Container by its Id property # @@ -35,11 +35,11 @@ # # 6. Delete Container # ---------------------------------------------------------------------------------------------------------- -# Note - -# +# Note - +# # Running this sample will create (and delete) multiple Containers on your account. # Each time a Container is created the account will be billed for 1 hour of usage based on -# the performance tier of that account. +# the provisioned throughput (RU/s) of that account. # ---------------------------------------------------------------------------------------------------------- HOST = config.settings['host'] @@ -64,24 +64,24 @@ def find_container(db, id): print('Container with id \'{0}\' was found'.format(id)) else: print('No container with id \'{0}\' was found'. format(id)) - + def create_container(db, id): - """ Execute the most basic Create of container. + """ Execute the most basic Create of container. This will create a container with 400 RUs throughput and default indexing policy """ partition_key = PartitionKey(path='/id', kind='Hash') print("\n2.1 Create Container - Basic") - + try: db.create_container(id=id, partition_key=partition_key) print('Container with id \'{0}\' created'.format(id)) except exceptions.CosmosResourceExistsError: - print('A container with id \'{0}\' already exists'.format(id)) + print('A container with id \'{0}\' already exists'.format(id)) print("\n2.2 Create Container - With custom index policy") - + try: coll = { "id": "container_custom_index_policy", @@ -100,11 +100,11 @@ def create_container(db, id): print('Container with id \'{0}\' created'.format(container.id)) print('IndexPolicy Mode - \'{0}\''.format(properties['indexingPolicy']['indexingMode'])) print('IndexPolicy Automatic - \'{0}\''.format(properties['indexingPolicy']['automatic'])) - + except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(coll['id'])) - print("\n2.3 Create Container - With custom offer throughput") + print("\n2.3 Create Container - With custom provisioned throughput") try: coll = {"id": "container_custom_throughput"} @@ -114,7 +114,7 @@ def create_container(db, id): offer_throughput=400 ) print('Container with id \'{0}\' created'.format(container.id)) - + except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' already exists'.format(coll['id'])) @@ -130,15 +130,15 @@ def create_container(db, id): unique_key_paths = properties['uniqueKeyPolicy']['uniqueKeys'][0]['paths'] print('Container with id \'{0}\' created'.format(container.id)) print('Unique Key Paths - \'{0}\', \'{1}\''.format(unique_key_paths[0], unique_key_paths[1])) - + except exceptions.CosmosResourceExistsError: print('A container with id \'container_unique_keys\' already exists') - print("\n2.5 Create Collection - With Partition key V2 (Default)") + print("\n2.5 Create Container - With Partition key V2 (Default)") try: container = db.create_container( - id="collection_partition_key_v2", + id="container_partition_key_v2", partition_key=PartitionKey(path='/id', kind='Hash') ) properties = container.read() @@ -146,13 +146,13 @@ def create_container(db, id): print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: - print('A container with id \'collection_partition_key_v2\' already exists') + print('A container with id \'container_partition_key_v2\' already exists') - print("\n2.6 Create Collection - With Partition key V1") + print("\n2.6 Create Container - With Partition key V1") try: container = db.create_container( - id="collection_partition_key_v1", + id="container_partition_key_v1", partition_key=PartitionKey(path='/id', kind='Hash', version=1) ) properties = container.read() @@ -160,37 +160,37 @@ def create_container(db, id): print('Partition Key - \'{0}\''.format(properties['partitionKey'])) except exceptions.CosmosResourceExistsError: - print('A container with id \'collection_partition_key_v1\' already exists') + print('A container with id \'container_partition_key_v1\' already exists') + +def manage_provisioned_throughput(db, id): + print("\n3.1 Get Container provisioned throughput (RU/s)") -def manage_offer_throughput(db, id): - print("\n3.1 Get Container Performance tier") - - #A Container's Offer Throughput determines the performance throughput of a container. + #A Container's Provisioned Throughput determines the performance throughput of a container. #A Container is loosely coupled to Offer through the Offer's offerResourceId #Offer.offerResourceId == Container._rid #Offer.resource == Container._self - + try: # read the container, so we can get its _self container = db.get_container_client(container=id) # now use its _self to query for Offers offer = container.read_offer() - + print('Found Offer \'{0}\' for Container \'{1}\' and its throughput is \'{2}\''.format(offer.properties['id'], container.id, offer.properties['content']['offerThroughput'])) except exceptions.CosmosResourceExistsError: print('A container with id \'{0}\' does not exist'.format(id)) - print("\n3.2 Change Offer Throughput of Container") - - #The Offer Throughput of a container controls the throughput allocated to the Container + print("\n3.2 Change Provisioned Throughput of Container") + + #The Provisioned Throughput of a container controls the throughput allocated to the Container #The following code shows how you can change Container's throughput offer = container.replace_throughput(offer.offer_throughput + 100) - print('Replaced Offer. Offer Throughput is now \'{0}\''.format(offer.properties['content']['offerThroughput'])) - + print('Replaced Offer. Provisioned Throughput is now \'{0}\''.format(offer.properties['content']['offerThroughput'])) + def read_Container(db, id): print("\n4. Get a Container by id") @@ -200,26 +200,26 @@ def read_Container(db, id): print('Container with id \'{0}\' was found, it\'s link is {1}'.format(container.id, container.container_link)) except exceptions.CosmosResourceNotFoundError: - print('A container with id \'{0}\' does not exist'.format(id)) + print('A container with id \'{0}\' does not exist'.format(id)) def list_Containers(db): print("\n5. List all Container in a Database") - + print('Containers:') - + containers = list(db.list_containers()) - + if not containers: return for container in containers: print(container['id']) - + def delete_Container(db, id): print("\n6. Delete Container") - + try: db.delete_container(id) @@ -236,18 +236,18 @@ def run_sample(): # setup database for this sample try: db = client.create_database(id=DATABASE_ID) - + except exceptions.CosmosResourceExistsError: db = client.get_database_client(DATABASE_ID) - - # query for a container + + # query for a container find_container(db, CONTAINER_ID) - + # create a container create_container(db, CONTAINER_ID) - - # get & change Offer Throughput of container - manage_offer_throughput(db, CONTAINER_ID) + + # get & change Provisioned Throughput of container + manage_provisioned_throughput(db, CONTAINER_ID) # get a container using its id read_Container(db, CONTAINER_ID) @@ -261,13 +261,13 @@ def run_sample(): # cleanup database after sample try: client.delete_database(db) - + except exceptions.CosmosResourceNotFoundError: pass except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) - + finally: print("\nrun_sample done") diff --git a/sdk/cosmos/azure-cosmos/samples/database_management.py b/sdk/cosmos/azure-cosmos/samples/database_management.py index 26645a6f1a08..fe5017c5684b 100644 --- a/sdk/cosmos/azure-cosmos/samples/database_management.py +++ b/sdk/cosmos/azure-cosmos/samples/database_management.py @@ -4,12 +4,12 @@ import config # ---------------------------------------------------------------------------------------------------------- -# Prerequistes - -# -# 1. An Azure Cosmos account - +# Prerequistes - +# +# 1. An Azure Cosmos account - # https://docs.microsoft.com/azure/cosmos-db/create-sql-api-python#create-a-database-account # -# 2. Microsoft Azure Cosmos PyPi package - +# 2. Microsoft Azure Cosmos PyPi package - # https://pypi.python.org/pypi/azure-cosmos/ # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates the basic CRUD operations on a Database resource for Azure Cosmos @@ -43,17 +43,17 @@ def find_database(client, id): print('Database with id \'{0}\' was found'.format(id)) else: print('No database with id \'{0}\' was found'. format(id)) - + def create_database(client, id): print("\n2. Create Database") - + try: client.create_database(id=id) print('Database with id \'{0}\' created'.format(id)) except exceptions.CosmosResourceExistsError: - print('A database with id \'{0}\' already exists'.format(id)) + print('A database with id \'{0}\' already exists'.format(id)) def read_database(client, id): @@ -64,26 +64,26 @@ def read_database(client, id): print('Database with id \'{0}\' was found, it\'s link is {1}'.format(id, database.database_link)) except exceptions.CosmosResourceNotFoundError: - print('A database with id \'{0}\' does not exist'.format(id)) + print('A database with id \'{0}\' does not exist'.format(id)) def list_databases(client): print("\n4. List all Databases on an account") - + print('Databases:') - + databases = list(client.list_databases()) - + if not databases: return for database in databases: - print(database['id']) + print(database['id']) def delete_database(client, id): print("\n5. Delete Database") - + try: client.delete_database(id) @@ -93,15 +93,15 @@ def delete_database(client, id): print('A database with id \'{0}\' does not exist'.format(id)) -def run_sample(): +def run_sample(): client = cosmos_client.CosmosClient(HOST, {'masterKey': MASTER_KEY} ) try: # query for a database find_database(client, DATABASE_ID) - + # create a database create_database(client, DATABASE_ID) - + # get a database using its id read_database(client, DATABASE_ID) @@ -113,7 +113,7 @@ def run_sample(): except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) - + finally: print("\nrun_sample done") diff --git a/sdk/cosmos/azure-cosmos/samples/document_management.py b/sdk/cosmos/azure-cosmos/samples/document_management.py index 82f36d40852d..b4dd6b621f7e 100644 --- a/sdk/cosmos/azure-cosmos/samples/document_management.py +++ b/sdk/cosmos/azure-cosmos/samples/document_management.py @@ -6,12 +6,12 @@ import config # ---------------------------------------------------------------------------------------------------------- -# Prerequistes - -# -# 1. An Azure Cosmos account - +# Prerequistes - +# +# 1. An Azure Cosmos account - # https:#azure.microsoft.com/en-us/documentation/articles/documentdb-create-account/ # -# 2. Microsoft Azure Cosmos PyPi package - +# 2. Microsoft Azure Cosmos PyPi package - # https://pypi.python.org/pypi/azure-cosmos/ # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates the basic CRUD operations on a Item resource for Azure Cosmos @@ -32,7 +32,7 @@ def create_items(container): sales_order = get_sales_order("SalesOrder1") container.create_item(body=sales_order) - # As your app evolves, let's say your object has a new schema. You can insert SalesOrderV2 objects without any + # As your app evolves, let's say your object has a new schema. You can insert SalesOrderV2 objects without any # changes to the database tier. sales_order2 = get_sales_order_v2("SalesOrder2") container.create_item(body=sales_order2) @@ -56,9 +56,9 @@ def read_items(container): # Important to handle throttles whenever you are doing operations such as this that might # result in a 429 (throttled request) item_list = list(container.read_all_items(max_item_count=10)) - + print('Found {0} items'.format(item_list.__len__())) - + for doc in item_list: print('Item Id: {0}'.format(doc.get('id'))) @@ -66,7 +66,7 @@ def read_items(container): def query_items(container, doc_id): print('\n1.4 Querying for an Item by Id\n') - # enable_cross_partition_query should be set to True as the collection is partitioned + # enable_cross_partition_query should be set to True as the container is partitioned items = list(container.query_items( query="SELECT * FROM r WHERE r.id=@id", parameters=[ @@ -144,7 +144,7 @@ def get_sales_order_v2(item_id): 'items' : [ {'order_qty' : 3, 'product_code' : 'A-123', # notice how in item details we no longer reference a ProductId - 'product_name' : 'Product 1', # instead we have decided to denormalise our schema and include + 'product_name' : 'Product 1', # instead we have decided to denormalise our schema and include 'currency_symbol' : '$', # the Product details relevant to the Order on to the Order directly 'currecny_code' : 'USD', # this is a typical refactor that happens in the course of an application 'unit_price' : 17.1, # that would have previously required schema changes and data migrations etc. @@ -191,7 +191,7 @@ def run_sample(): except exceptions.CosmosHttpResponseError as e: print('\nrun_sample has caught an error. {0}'.format(e.message)) - + finally: print("\nrun_sample done") diff --git a/sdk/cosmos/azure-cosmos/samples/index_management.py b/sdk/cosmos/azure-cosmos/samples/index_management.py index 5b2580fd556d..169b2410d7f1 100644 --- a/sdk/cosmos/azure-cosmos/samples/index_management.py +++ b/sdk/cosmos/azure-cosmos/samples/index_management.py @@ -15,23 +15,23 @@ CONTAINER_ID = "index-samples" PARTITION_KEY = PartitionKey(path='/id', kind='Hash') -# A typical collection has the following properties within it's indexingPolicy property +# A typical container has the following properties within it's indexingPolicy property # indexingMode # automatic # includedPaths # excludedPaths -# +# # We can toggle 'automatic' to eiher be True or False depending upon whether we want to have indexing over all columns by default or not. # indexingMode can be either of consistent, lazy or none -# -# We can provide options while creating documents. indexingDirective is one such, -# by which we can tell whether it should be included or excluded in the index of the parent collection. +# +# We can provide options while creating documents. indexingDirective is one such, +# by which we can tell whether it should be included or excluded in the index of the parent container. # indexingDirective can be either 'Include', 'Exclude' or 'Default' # To run this Demo, please provide your own CA certs file or download one from # http://curl.haxx.se/docs/caextract.html -# Setup the certificate file in .pem format. +# Setup the certificate file in .pem format. # If you still get an SSLError, try disabling certificate verification and suppress warnings def obtain_client(): @@ -62,7 +62,7 @@ def query_entities(parent, entity_type, id = None): else: entities = list(parent.query_databases(find_entity_by_id_query)) - elif entity_type == 'collection': + elif entity_type == 'container': if id == None: entities = list(parent.list_containers()) else: @@ -95,15 +95,15 @@ def create_database_if_not_exists(client, database_id): pass -def delete_container_if_exists(db, collection_id): +def delete_container_if_exists(db, container_id): try: - db.delete_container(collection_id) - print('Collection with id \'{0}\' was deleted'.format(collection_id)) + db.delete_container(container_id) + print('Container with id \'{0}\' was deleted'.format(container_id)) except exceptions.CosmosResourceNotFoundError: pass except exceptions.CosmosHttpResponseError as e: if e.status_code == 400: - print("Bad request for collection link", collection_id) + print("Bad request for container link", container_id) raise @@ -144,19 +144,19 @@ def query_documents_with_custom_query(container, query_with_optional_parameters, def explicitly_exclude_from_index(db): """ The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added. - There may be scenarios where you want to exclude a specific doc from the index even though all other - documents are being indexed automatically. + There may be scenarios where you want to exclude a specific doc from the index even though all other + documents are being indexed automatically. This method demonstrates how to use an index directive to control this """ try: delete_container_if_exists(db, CONTAINER_ID) - # Create a collection with default index policy (i.e. automatic = true) + # Create a container with default index policy (i.e. automatic = true) created_Container = db.create_container(id=CONTAINER_ID, partition_key=PARTITION_KEY) print(created_Container) - print("\n" + "-" * 25 + "\n1. Collection created with index policy") + print("\n" + "-" * 25 + "\n1. Container created with index policy") properties = created_Container.read() print_dictionary_items(properties["indexingPolicy"]) @@ -172,7 +172,7 @@ def explicitly_exclude_from_index(db): } query_documents_with_custom_query(created_Container, query) - # Now, create a document but this time explictly exclude it from the collection using IndexingDirective + # Now, create a document but this time explictly exclude it from the container using IndexingDirective # Then query for that document # Shoud NOT find it, because we excluded it from the index # BUT, the document is there and doing a ReadDocument by Id will prove it @@ -203,14 +203,14 @@ def explicitly_exclude_from_index(db): def use_manual_indexing(db): """The default index policy on a DocumentContainer will AUTOMATICALLY index ALL documents added. - There may be cases where you can want to turn-off automatic indexing and only selectively add only specific documents to the index. + There may be cases where you can want to turn-off automatic indexing and only selectively add only specific documents to the index. This method demonstrates how to control this by setting the value of automatic within indexingPolicy to False """ try: delete_container_if_exists(db, CONTAINER_ID) - # Create a collection with manual (instead of automatic) indexing + # Create a container with manual (instead of automatic) indexing created_Container = db.create_container( id=CONTAINER_ID, indexing_policy={"automatic" : False}, @@ -219,12 +219,12 @@ def use_manual_indexing(db): properties = created_Container.read() print(created_Container) - print("\n" + "-" * 25 + "\n2. Collection created with index policy") + print("\n" + "-" * 25 + "\n2. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) # Create a document # Then query for that document - # We should find nothing, because automatic indexing on the collection level is False + # We should find nothing, because automatic indexing on the container level is False # BUT, the document is there and doing a ReadDocument by Id will prove it doc = created_Container.create_item(body={ "id" : "doc1", "orderId" : "order1" }) print("\n" + "-" * 25 + "Document doc1 created with order1" + "-" * 25) @@ -266,9 +266,9 @@ def use_manual_indexing(db): def exclude_paths_from_index(db): """The default behavior is for Cosmos to index every attribute in every document automatically. There are times when a document contains large amounts of information, in deeply nested structures - that you know you will never search on. In extreme cases like this, you can exclude paths from the + that you know you will never search on. In extreme cases like this, you can exclude paths from the index to save on storage cost, improve write performance and also improve read performance because the index is smaller - + This method demonstrates how to set excludedPaths within indexingPolicy """ try: @@ -281,28 +281,28 @@ def exclude_paths_from_index(db): "subDoc" : { "searchable" : "searchable", "nonSearchable" : "value" }, "excludedNode" : { "subExcluded" : "something", "subExcludedNode" : { "someProperty" : "value" } } } - collection_to_create = { "id" : CONTAINER_ID , - "indexingPolicy" : - { + container_to_create = { "id" : CONTAINER_ID , + "indexingPolicy" : + { "includedPaths" : [ {'path' : "/*"} ], # Special mandatory path of "/*" required to denote include entire tree "excludedPaths" : [ {'path' : "/metaData/*"}, # exclude metaData node, and anything under it - {'path' : "/subDoc/nonSearchable/*"}, # exclude ONLY a part of subDoc + {'path' : "/subDoc/nonSearchable/*"}, # exclude ONLY a part of subDoc {'path' : "/\"excludedNode\"/*"} # exclude excludedNode node, and anything under it ] - } + } } - print(collection_to_create) + print(container_to_create) print(doc_with_nested_structures) - # Create a collection with the defined properties + # Create a container with the defined properties # The effect of the above IndexingPolicy is that only id, foo, and the subDoc/searchable are indexed created_Container = db.create_container( - id=collection_to_create['id'], - indexing_policy=collection_to_create['indexingPolicy'], + id=container_to_create['id'], + indexing_policy=container_to_create['indexingPolicy'], partition_key=PARTITION_KEY ) properties = created_Container.read() print(created_Container) - print("\n" + "-" * 25 + "\n4. Collection created with index policy") + print("\n" + "-" * 25 + "\n4. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) # The effect of the above IndexingPolicy is that only id, foo, and the subDoc/searchable are indexed @@ -337,35 +337,35 @@ def exclude_paths_from_index(db): def range_scan_on_hash_index(db): - """When a range index is not available (i.e. Only hash or no index found on the path), comparisons queries can still + """When a range index is not available (i.e. Only hash or no index found on the path), comparisons queries can still be performed as scans using Allow scan request headers passed through options This method demonstrates how to force a scan when only hash indexes exist on the path ===== Warning===== - This was made an opt-in model by design. - Scanning is an expensive operation and doing this will have a large impact + This was made an opt-in model by design. + Scanning is an expensive operation and doing this will have a large impact on RequstUnits charged for an operation and will likely result in queries being throttled sooner. """ try: delete_container_if_exists(db, CONTAINER_ID) # Force a range scan operation on a hash indexed path - collection_to_create = { "id" : CONTAINER_ID , - "indexingPolicy" : - { + container_to_create = { "id" : CONTAINER_ID , + "indexingPolicy" : + { "includedPaths" : [ {'path' : "/"} ], "excludedPaths" : [ {'path' : "/length/*"} ] # exclude length - } + } } created_Container = db.create_container( - id=collection_to_create['id'], - indexing_policy=collection_to_create['indexingPolicy'], + id=container_to_create['id'], + indexing_policy=container_to_create['indexingPolicy'], partition_key=PARTITION_KEY ) properties = created_Container.read() print(created_Container) - print("\n" + "-" * 25 + "\n5. Collection created with index policy") + print("\n" + "-" * 25 + "\n5. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) doc1 = created_Container.create_item(body={ "id" : "dyn1", "length" : 10, "width" : 5, "height" : 15 }) @@ -404,14 +404,14 @@ def use_range_indexes_on_strings(db): """ try: delete_container_if_exists(db, CONTAINER_ID) - # collections = query_entities(client, 'collection', parent_link = database_link) - # print(collections) + # containers = query_entities(client, 'container', parent_link = database_link) + # print(containers) # Use range indexes on strings - + # This is how you can specify a range index on strings (and numbers) for all properties. - # This is the recommended indexing policy for collections. i.e. precision -1 - #indexingPolicy = { + # This is the recommended indexing policy for containers. i.e. precision -1 + #indexingPolicy = { # 'indexingPolicy': { # 'includedPaths': [ # { @@ -429,7 +429,7 @@ def use_range_indexes_on_strings(db): # For demo purposes, we are going to use the default (range on numbers, hash on strings) for the whole document (/* ) # and just include a range index on strings for the "region". - collection_definition = { + container_definition = { 'id': CONTAINER_ID, 'indexingPolicy': { 'includedPaths': [ @@ -451,13 +451,13 @@ def use_range_indexes_on_strings(db): } created_Container = db.create_container( - id=collection_definition['id'], - indexing_policy=collection_definition['indexingPolicy'], + id=container_definition['id'], + indexing_policy=container_definition['indexingPolicy'], partition_key=PARTITION_KEY ) properties = created_Container.read() print(created_Container) - print("\n" + "-" * 25 + "\n6. Collection created with index policy") + print("\n" + "-" * 25 + "\n6. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) created_Container.create_item(body={ "id" : "doc1", "region" : "USA" }) @@ -470,7 +470,7 @@ def use_range_indexes_on_strings(db): message = "Documents ordered by region" query_documents_with_custom_query(created_Container, query, message) - # You can also perform filters against string comparison like >= 'UK'. Note that you can perform a prefix query, + # You can also perform filters against string comparison like >= 'UK'. Note that you can perform a prefix query, # the equivalent of LIKE 'U%' (is >= 'U' AND < 'U') query = { "query" : "SELECT * FROM r WHERE r.region >= 'U'" } message = "Documents with region begining with U" @@ -489,12 +489,12 @@ def perform_index_transformations(db): try: delete_container_if_exists(db, CONTAINER_ID) - # Create a collection with default indexing policy + # Create a container with default indexing policy created_Container = db.create_container(id=CONTAINER_ID, partition_key=PARTITION_KEY) properties = created_Container.read() print(created_Container) - print("\n" + "-" * 25 + "\n7. Collection created with index policy") + print("\n" + "-" * 25 + "\n7. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) # Insert some documents @@ -507,8 +507,8 @@ def perform_index_transformations(db): print("Changing to string & number range indexing with maximum precision (needed for Order By).") properties['indexingPolicy']['includedPaths'][0]['indexes'] = [{ - 'kind': documents.IndexKind.Range, - 'dataType': documents.DataType.String, + 'kind': documents.IndexKind.Range, + 'dataType': documents.DataType.String, 'precision': -1 }] @@ -520,7 +520,7 @@ def perform_index_transformations(db): properties = created_Container.read() # Check progress and wait for completion - should be instantaneous since we have only a few documents, but larger - # collections will take time. + # containers will take time. print_dictionary_items(properties["indexingPolicy"]) # Now exclude a path from indexing to save on storage space. @@ -548,7 +548,7 @@ def perform_multi_orderby_query(db): try: delete_container_if_exists(db, CONTAINER_ID) - # Create a collection with composite indexes + # Create a container with composite indexes indexing_policy = { "compositeIndexes": [ [ @@ -590,7 +590,7 @@ def perform_multi_orderby_query(db): properties = created_container.read() print(created_container) - print("\n" + "-" * 25 + "\n8. Collection created with index policy") + print("\n" + "-" * 25 + "\n8. Container created with index policy") print_dictionary_items(properties["indexingPolicy"]) # Insert some documents diff --git a/sdk/cosmos/azure-cosmos/samples/nonpartitioned_collection_operations.py b/sdk/cosmos/azure-cosmos/samples/nonpartitioned_container_operations.py similarity index 95% rename from sdk/cosmos/azure-cosmos/samples/nonpartitioned_collection_operations.py rename to sdk/cosmos/azure-cosmos/samples/nonpartitioned_container_operations.py index 9d57e2e83360..646a9769a8ad 100644 --- a/sdk/cosmos/azure-cosmos/samples/nonpartitioned_collection_operations.py +++ b/sdk/cosmos/azure-cosmos/samples/nonpartitioned_container_operations.py @@ -51,8 +51,8 @@ CONTAINER_ID = config.settings['container_id'] -def create_nonpartitioned_collection(db): - # Create a non partitioned collection using the rest API and older version +def create_nonpartitioned_container(db): + # Create a non partitioned container using the rest API and older version client = requests.Session() base_url_split = HOST.split(":"); resource_url = base_url_split[0] + ":" + base_url_split[1] + ":" + base_url_split[2].split("/")[ @@ -80,12 +80,12 @@ def create_nonpartitioned_collection(db): # python 3 compatible: convert data from byte to unicode string data = data.decode('utf-8') data = json.loads(data) - created_collection = db.get_container_client("mycoll") + created_container = db.get_container_client("mycoll") - # Create a document in the non partitioned collection using the rest API and older version + # Create a document in the non partitioned container using the rest API and older version resource_url = base_url_split[0] + ":" + base_url_split[1] + ":" + base_url_split[2].split("/")[0] \ - + "//dbs/" + db.id + "/colls/" + created_collection.id + "/docs/" - resource_id_or_fullname = "dbs/" + db.id + "/colls/" + created_collection.id + + "//dbs/" + db.id + "/colls/" + created_container.id + "/docs/" + resource_id_or_fullname = "dbs/" + db.id + "/colls/" + created_container.id resource_type = "docs" data = json.dumps(get_sales_order('SaledOrder0')) @@ -105,7 +105,7 @@ def create_nonpartitioned_collection(db): data = data.decode('utf-8') data = json.loads(data) created_document = data - return created_collection, "SaledOrder0" + return created_container, "SaledOrder0" def get_authorization(client, verb, resource_id_or_fullname, resource_type, headers): @@ -169,7 +169,7 @@ def read_items(container): def query_items(container, doc_id): print('\n1.4 Querying for an Item by Id\n') - # enable_cross_partition_query should be set to True as the collection is partitioned + # enable_cross_partition_query should be set to True as the container is partitioned items = list(container.query_items( query="SELECT * FROM r WHERE r.id=@id", parameters=[ @@ -273,13 +273,13 @@ def run_sample(): # setup container for this sample try: - container, document = create_nonpartitioned_collection(db) + container, document = create_nonpartitioned_container(db) print('Container with id \'{0}\' created'.format(CONTAINER_ID)) except exceptions.CosmosResourceExistsError: print('Container with id \'{0}\' was found'.format(CONTAINER_ID)) - # Read Item created in non partitioned collection using older API version + # Read Item created in non partitioned container using older API version read_item(container, document) create_items(container) read_items(container)