diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_client.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_client.py index 5dff06e16136..7bcaa3cfc744 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_client.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_client.py @@ -35,7 +35,6 @@ AppendPositionAccessConditions, SequenceNumberAccessConditions, StorageErrorException, - UserDelegationKey, CpkInfo) from ._serialize import get_modify_conditions, get_source_conditions, get_cpk_scope_info, get_api_version from ._deserialize import get_page_ranges_result, deserialize_blob_properties, deserialize_blob_stream @@ -43,7 +42,7 @@ upload_block_blob, upload_append_blob, upload_page_blob) -from ._models import BlobType, BlobBlock +from ._models import BlobType, BlobBlock, BlobProperties from ._download import StorageStreamDownloader from ._lease import BlobLeaseClient, get_access_conditions @@ -51,9 +50,6 @@ from datetime import datetime from ._generated.models import BlockList from ._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobSasPermissions, ContentSettings, PremiumPageBlobTier, StandardBlobTier, @@ -94,7 +90,7 @@ class BlobClient(StorageAccountHostsMixin): # pylint: disable=too-many-public-m The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient @@ -788,13 +784,14 @@ def get_blob_properties(self, **kwargs): snapshot=self.snapshot, lease_access_conditions=access_conditions, modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, cpk_info=cpk_info, **kwargs) except StorageErrorException as error: process_storage_error(error) blob_props.name = self.blob_name - blob_props.container = self.container_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name return blob_props # type: ignore def _set_http_headers_options(self, content_settings=None, **kwargs): diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_service_client.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_service_client.py index 787089422353..7a97c82fcdeb 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_service_client.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_blob_service_client.py @@ -77,7 +77,7 @@ class BlobServiceClient(StorageAccountHostsMixin): The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient @@ -400,7 +400,7 @@ def list_containers( :dedent: 12 :caption: Listing the containers in the blob service. """ - include = 'metadata' if include_metadata else None + include = ['metadata'] if include_metadata else None timeout = kwargs.pop('timeout', None) results_per_page = kwargs.pop('results_per_page', None) command = functools.partial( diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_container_client.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_container_client.py index 9521b9839766..ed893793aa24 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_container_client.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_container_client.py @@ -97,7 +97,7 @@ class ContainerClient(StorageAccountHostsMixin): The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_azure_blob_storage.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_azure_blob_storage.py index 4da98d39c392..aa2784212021 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_azure_blob_storage.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_azure_blob_storage.py @@ -55,7 +55,7 @@ def __init__(self, url, **kwargs): self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-07-07' + self.api_version = '2019-12-12' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_configuration.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_configuration.py index 24ac275f9379..5bf56719ad19 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_configuration.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/_configuration.py @@ -40,7 +40,7 @@ def __init__(self, url, **kwargs): self.generate_client_request_id = True self.url = url - self.version = "2019-07-07" + self.version = "2019-12-12" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_azure_blob_storage_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_azure_blob_storage_async.py index 6273538a3a10..7b1aa347f118 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_azure_blob_storage_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_azure_blob_storage_async.py @@ -56,7 +56,7 @@ def __init__( self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2019-07-07' + self.api_version = '2019-12-12' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_configuration_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_configuration_async.py index e061a25e717e..a500a0cfe381 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_configuration_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/_configuration_async.py @@ -41,7 +41,7 @@ def __init__(self, url, **kwargs): self.accept_language = None self.url = url - self.version = "2019-07-07" + self.version = "2019-12-12" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py index db94e0a92b36..2f765d42722f 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_append_blob_operations_async.py @@ -24,7 +24,6 @@ class AppendBlobOperations: :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - :ivar comp: . Constant value: "appendblock". """ models = models @@ -37,9 +36,8 @@ def __init__(self, client, config, serializer, deserializer) -> None: self._config = config self.x_ms_blob_type = "AppendBlob" - self.comp = "appendblock" - async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Create Append Blob operation creates a new append blob. :param content_length: The length of the request. @@ -63,6 +61,9 @@ async def create(self, content_length, timeout=None, metadata=None, request_id=N KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -151,6 +152,8 @@ async def create(self, content_length, timeout=None, metadata=None, request_id=N header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -200,6 +203,7 @@ async def create(self, content_length, timeout=None, metadata=None, request_id=N 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -292,6 +296,8 @@ async def append_block(self, body, content_length, timeout=None, transactional_c if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + comp = "appendblock" + # Construct URL url = self.append_block.metadata['url'] path_format_arguments = { @@ -303,7 +309,7 @@ async def append_block(self, body, content_length, timeout=None, transactional_c query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} @@ -475,6 +481,8 @@ async def append_block_from_url(self, source_url, content_length, source_range=N if source_modified_access_conditions is not None: source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + # Construct URL url = self.append_block_from_url.metadata['url'] path_format_arguments = { @@ -486,7 +494,7 @@ async def append_block_from_url(self, source_url, content_length, source_range=N query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} @@ -561,3 +569,111 @@ async def append_block_from_url(self, source_url, content_length, source_range=N } return cls(response, None, response_headers) append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + async def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, *, cls=None, **kwargs): + """The Seal operation seals the Append Blob to make it read-only. Seal is + supported only on version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + + comp = "seal" + + # Construct URL + url = self.seal.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py index 3374d597419f..a9fbfc277a57 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_blob_operations_async.py @@ -41,7 +41,7 @@ def __init__(self, client, config, serializer, deserializer) -> None: self.x_ms_copy_action = "abort" self.restype = "account" - async def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. @@ -52,6 +52,10 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -126,6 +130,8 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -175,6 +181,8 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont header_dict = { 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), @@ -198,6 +206,7 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), @@ -205,6 +214,8 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -213,6 +224,8 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont header_dict = { 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), @@ -236,6 +249,7 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), @@ -243,6 +257,8 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -253,7 +269,7 @@ async def download(self, snapshot=None, timeout=None, range=None, range_get_cont return deserialized download.metadata = {'url': '/{containerName}/{blob}'} - async def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. @@ -264,6 +280,10 @@ async def get_properties(self, snapshot=None, timeout=None, request_id=None, lea href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -327,6 +347,8 @@ async def get_properties(self, snapshot=None, timeout=None, request_id=None, lea query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -366,6 +388,8 @@ async def get_properties(self, snapshot=None, timeout=None, request_id=None, lea 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), @@ -400,12 +424,17 @@ async def get_properties(self, snapshot=None, timeout=None, request_id=None, lea 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) get_properties.metadata = {'url': '/{containerName}/{blob}'} - async def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob @@ -428,6 +457,10 @@ async def delete(self, snapshot=None, timeout=None, delete_snapshots=None, reque href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -487,6 +520,8 @@ async def delete(self, snapshot=None, timeout=None, delete_snapshots=None, reque query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -1017,6 +1052,79 @@ async def undelete(self, timeout=None, request_id=None, *, cls=None, **kwargs): return cls(response, None, response_headers) undelete.metadata = {'url': '/{containerName}/{blob}'} + async def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, *, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "expiry" + + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{containerName}/{blob}'} + async def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Set HTTP Headers operation sets system properties on the blob. @@ -1275,6 +1383,7 @@ async def set_metadata(self, timeout=None, metadata=None, request_id=None, lease 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -1912,6 +2021,7 @@ async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cp 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), @@ -1919,7 +2029,7 @@ async def create_snapshot(self, timeout=None, metadata=None, request_id=None, cp return cls(response, None, response_headers) create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Start Copy From URL operation copies a blob or an internet resource to a new blob. @@ -1957,6 +2067,12 @@ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, ti KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2029,6 +2145,10 @@ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, ti header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') if source_if_unmodified_since is not None: @@ -2064,6 +2184,7 @@ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, ti 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), @@ -2072,7 +2193,7 @@ async def start_copy_from_url(self, copy_source, timeout=None, metadata=None, ti return cls(response, None, response_headers) start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): + async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. @@ -2108,6 +2229,12 @@ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read from the copy source. :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2180,6 +2307,10 @@ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') if source_content_md5 is not None: header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') @@ -2216,6 +2347,7 @@ async def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), @@ -2304,7 +2436,7 @@ async def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, leas return cls(response, None, response_headers) abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - async def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): + async def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, *, cls=None, **kwargs): """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium @@ -2316,6 +2448,16 @@ async def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -2357,6 +2499,10 @@ async def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id # Construct parameters query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -2441,3 +2587,387 @@ async def get_account_info(self, *, cls=None, **kwargs): } return cls(response, None, response_headers) get_account_info.metadata = {'url': '/{containerName}/{blob}'} + + async def quick_query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + """The QuickQuery operation enables users to select/project on blob data + by providing simple query expressions. + + :param query_request: the query request + :type query_request: ~azure.storage.blob.models.QueryRequest + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + comp = "query" + + # Construct URL + url = self.quick_query.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct body + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + await response.load_body() + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + quick_query.metadata = {'url': '/{containerName}/{blob}'} + + async def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, *, cls=None, **kwargs): + """The Get Tags operation enables users to get the tags associated with a + blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "tags" + + # Construct URL + url = self.get_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlobTags', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} + + async def set_tags(self, timeout=None, snapshot=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, *, cls=None, **kwargs): + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param tags: Blob tags + :type tags: ~azure.storage.blob.models.BlobTags + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "tags" + + # Construct URL + url = self.set_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags') + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_block_blob_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_block_blob_operations_async.py index b5225decc55c..ab07535bd90e 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_block_blob_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_block_blob_operations_async.py @@ -37,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer) -> None: self._config = config self.x_ms_blob_type = "BlockBlob" - async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; @@ -75,6 +75,9 @@ async def upload(self, body, content_length, timeout=None, transactional_content KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -168,6 +171,8 @@ async def upload(self, body, content_length, timeout=None, transactional_content header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -219,6 +224,7 @@ async def upload(self, body, content_length, timeout=None, transactional_content 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -504,7 +510,7 @@ async def stage_block_from_url(self, block_id, content_length, source_url, sourc return cls(response, None, response_headers) stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a @@ -546,6 +552,9 @@ async def commit_block_list(self, blocks, timeout=None, transactional_content_md KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -643,6 +652,8 @@ async def commit_block_list(self, blocks, timeout=None, transactional_content_md header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') if blob_cache_control is not None: header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') if blob_content_type is not None: @@ -695,6 +706,7 @@ async def commit_block_list(self, blocks, timeout=None, transactional_content_md 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_container_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_container_operations_async.py index 5f2635477d3a..b7e1eb840e75 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_container_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_container_operations_async.py @@ -591,6 +591,79 @@ async def set_access_policy(self, container_acl=None, timeout=None, access=None, return cls(response, None, response_headers) set_access_policy.metadata = {'url': '/{containerName}'} + async def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, *, cls=None, **kwargs): + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_container_name: Optional. Version 2019-12-12 and + laster. Specifies the name of the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and + laster. Specifies the version of the deleted container to restore. + :type deleted_container_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{containerName}'} + async def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, *, cls=None, **kwargs): """[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_page_blob_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_page_blob_operations_async.py index 5ea13fa19dfb..6ab1820ec145 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_page_blob_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_page_blob_operations_async.py @@ -37,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer) -> None: self._config = config self.x_ms_blob_type = "PageBlob" - async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, *, cls=None, **kwargs): """The Create operation creates a new page blob. :param content_length: The length of the request. @@ -74,6 +74,9 @@ async def create(self, content_length, blob_content_length, timeout=None, tier=N KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -167,6 +170,8 @@ async def create(self, content_length, blob_content_length, timeout=None, tier=N header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -216,6 +221,7 @@ async def create(self, content_length, blob_content_length, timeout=None, tier=N 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_service_operations_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_service_operations_async.py index b62063c8eb9b..e12c2b9bfb56 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_service_operations_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/aio/operations_async/_service_operations_async.py @@ -276,10 +276,9 @@ async def list_containers_segment(self, prefix=None, marker=None, maxresults=Non of 5000. :type maxresults: int :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. Possible values - include: 'metadata' - :type include: str or - ~azure.storage.blob.models.ListContainersIncludeType + metadata be returned as part of the response body. + :type include: list[str or + ~azure.storage.blob.models.ListContainersIncludeType] :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -315,7 +314,7 @@ async def list_containers_segment(self, prefix=None, marker=None, maxresults=Non if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) if include is not None: - query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType') + query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -565,3 +564,101 @@ async def submit_batch(self, body, content_length, multipart_content_type, timeo return deserialized submit_batch.metadata = {'url': '/'} + + async def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, *, cls=None, **kwargs): + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param where: Filters the results to return only to return only blobs + whose tags match the specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "blobs" + + # Construct URL + url = self.filter_blobs.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FilterBlobSegment', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + filter_blobs.metadata = {'url': '/'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/__init__.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/__init__.py index 4cf758cfccb6..b16a559b72b9 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/__init__.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/__init__.py @@ -15,10 +15,12 @@ from ._models_py3 import BlobFlatListSegment from ._models_py3 import BlobHierarchyListSegment from ._models_py3 import BlobHTTPHeaders - from ._models_py3 import BlobItem + from ._models_py3 import BlobItemInternal from ._models_py3 import BlobMetadata from ._models_py3 import BlobPrefix from ._models_py3 import BlobProperties + from ._models_py3 import BlobTag + from ._models_py3 import BlobTags from ._models_py3 import Block from ._models_py3 import BlockList from ._models_py3 import BlockLookupList @@ -31,8 +33,12 @@ from ._models_py3 import CpkScopeInfo from ._models_py3 import DataLakeStorageError, DataLakeStorageErrorException from ._models_py3 import DataLakeStorageErrorError + from ._models_py3 import DelimitedTextConfiguration from ._models_py3 import DirectoryHttpHeaders + from ._models_py3 import FilterBlobItem + from ._models_py3 import FilterBlobSegment from ._models_py3 import GeoReplication + from ._models_py3 import JsonTextConfiguration from ._models_py3 import KeyInfo from ._models_py3 import LeaseAccessConditions from ._models_py3 import ListBlobsFlatSegmentResponse @@ -43,6 +49,9 @@ from ._models_py3 import ModifiedAccessConditions from ._models_py3 import PageList from ._models_py3 import PageRange + from ._models_py3 import QueryRequest + from ._models_py3 import QuickQueryFormat + from ._models_py3 import QuickQuerySerialization from ._models_py3 import RetentionPolicy from ._models_py3 import SequenceNumberAccessConditions from ._models_py3 import SignedIdentifier @@ -58,10 +67,12 @@ from ._models import BlobFlatListSegment from ._models import BlobHierarchyListSegment from ._models import BlobHTTPHeaders - from ._models import BlobItem + from ._models import BlobItemInternal from ._models import BlobMetadata from ._models import BlobPrefix from ._models import BlobProperties + from ._models import BlobTag + from ._models import BlobTags from ._models import Block from ._models import BlockList from ._models import BlockLookupList @@ -74,8 +85,12 @@ from ._models import CpkScopeInfo from ._models import DataLakeStorageError, DataLakeStorageErrorException from ._models import DataLakeStorageErrorError + from ._models import DelimitedTextConfiguration from ._models import DirectoryHttpHeaders + from ._models import FilterBlobItem + from ._models import FilterBlobSegment from ._models import GeoReplication + from ._models import JsonTextConfiguration from ._models import KeyInfo from ._models import LeaseAccessConditions from ._models import ListBlobsFlatSegmentResponse @@ -86,6 +101,9 @@ from ._models import ModifiedAccessConditions from ._models import PageList from ._models import PageRange + from ._models import QueryRequest + from ._models import QuickQueryFormat + from ._models import QuickQuerySerialization from ._models import RetentionPolicy from ._models import SequenceNumberAccessConditions from ._models import SignedIdentifier @@ -101,6 +119,7 @@ AccessTierRequired, AccountKind, ArchiveStatus, + BlobExpiryOptions, BlobType, BlockListType, CopyStatusType, @@ -115,6 +134,7 @@ PathRenameMode, PremiumPageBlobAccessTier, PublicAccessType, + QuickQueryFormatType, RehydratePriority, SequenceNumberActionType, SkuName, @@ -128,10 +148,12 @@ 'BlobFlatListSegment', 'BlobHierarchyListSegment', 'BlobHTTPHeaders', - 'BlobItem', + 'BlobItemInternal', 'BlobMetadata', 'BlobPrefix', 'BlobProperties', + 'BlobTag', + 'BlobTags', 'Block', 'BlockList', 'BlockLookupList', @@ -144,8 +166,12 @@ 'CpkScopeInfo', 'DataLakeStorageError', 'DataLakeStorageErrorException', 'DataLakeStorageErrorError', + 'DelimitedTextConfiguration', 'DirectoryHttpHeaders', + 'FilterBlobItem', + 'FilterBlobSegment', 'GeoReplication', + 'JsonTextConfiguration', 'KeyInfo', 'LeaseAccessConditions', 'ListBlobsFlatSegmentResponse', @@ -156,6 +182,9 @@ 'ModifiedAccessConditions', 'PageList', 'PageRange', + 'QueryRequest', + 'QuickQueryFormat', + 'QuickQuerySerialization', 'RetentionPolicy', 'SequenceNumberAccessConditions', 'SignedIdentifier', @@ -175,10 +204,12 @@ 'BlobType', 'StorageErrorCode', 'GeoReplicationStatusType', + 'QuickQueryFormatType', 'AccessTierRequired', 'AccessTierOptional', 'PremiumPageBlobAccessTier', 'RehydratePriority', + 'BlobExpiryOptions', 'BlockListType', 'DeleteSnapshotsOptionType', 'EncryptionAlgorithmType', diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py index 382b7cb703ba..6bdfd2a9b05b 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_azure_blob_storage_enums.py @@ -200,6 +200,12 @@ class GeoReplicationStatusType(str, Enum): unavailable = "unavailable" +class QuickQueryFormatType(str, Enum): + + delimited = "delimited" + json = "json" + + class AccessTierRequired(str, Enum): p4 = "P4" @@ -257,6 +263,14 @@ class RehydratePriority(str, Enum): standard = "Standard" +class BlobExpiryOptions(str, Enum): + + never_expire = "NeverExpire" + relative_to_creation = "RelativeToCreation" + relative_to_now = "RelativeToNow" + absolute = "Absolute" + + class BlockListType(str, Enum): committed = "committed" @@ -282,11 +296,14 @@ class ListBlobsIncludeItem(str, Enum): metadata = "metadata" snapshots = "snapshots" uncommittedblobs = "uncommittedblobs" + versions = "versions" + tags = "tags" class ListContainersIncludeType(str, Enum): metadata = "metadata" + deleted = "deleted" class PathRenameMode(str, Enum): diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models.py index 52da5f19ef71..3717803f3958 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models.py @@ -41,7 +41,7 @@ def __init__(self, **kwargs): class AppendPositionAccessConditions(Model): """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url. + AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. :param max_size: Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would cause @@ -78,7 +78,7 @@ class BlobFlatListSegment(Model): All required parameters must be populated in order to send to Azure. :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] """ _validation = { @@ -86,7 +86,7 @@ class BlobFlatListSegment(Model): } _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, } _xml_map = { 'name': 'Blobs' @@ -105,7 +105,7 @@ class BlobHierarchyListSegment(Model): :param blob_prefixes: :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] """ _validation = { @@ -114,7 +114,7 @@ class BlobHierarchyListSegment(Model): _attribute_map = { 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, } _xml_map = { 'name': 'Blobs' @@ -175,7 +175,7 @@ def __init__(self, **kwargs): self.blob_content_disposition = kwargs.get('blob_content_disposition', None) -class BlobItem(Model): +class BlobItemInternal(Model): """An Azure Storage blob. All required parameters must be populated in order to send to Azure. @@ -186,10 +186,20 @@ class BlobItem(Model): :type deleted: bool :param snapshot: Required. :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool :param properties: Required. :type properties: ~azure.storage.blob.models.BlobProperties :param metadata: :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_policy_id: + :type object_replication_policy_id: str + :param object_replication_rule_status: + :type object_replication_rule_status: dict[str, str] """ _validation = { @@ -203,20 +213,30 @@ class BlobItem(Model): 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, 'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, + 'object_replication_policy_id': {'key': 'ObjectReplicationPolicyId', 'type': 'str', 'xml': {'name': 'ObjectReplicationPolicyId'}}, + 'object_replication_rule_status': {'key': 'ObjectReplicationRuleStatus', 'type': '{str}', 'xml': {'name': 'ObjectReplicationRuleStatus'}}, } _xml_map = { 'name': 'Blob' } def __init__(self, **kwargs): - super(BlobItem, self).__init__(**kwargs) + super(BlobItemInternal, self).__init__(**kwargs) self.name = kwargs.get('name', None) self.deleted = kwargs.get('deleted', None) self.snapshot = kwargs.get('snapshot', None) + self.version_id = kwargs.get('version_id', None) + self.is_current_version = kwargs.get('is_current_version', None) self.properties = kwargs.get('properties', None) self.metadata = kwargs.get('metadata', None) + self.blob_tags = kwargs.get('blob_tags', None) + self.object_replication_policy_id = kwargs.get('object_replication_policy_id', None) + self.object_replication_rule_status = kwargs.get('object_replication_rule_status', None) class BlobMetadata(Model): @@ -342,6 +362,12 @@ class BlobProperties(Model): :type encryption_scope: str :param access_tier_change_time: :type access_tier_change_time: datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: datetime + :param is_sealed: + :type is_sealed: bool """ _validation = { @@ -382,6 +408,9 @@ class BlobProperties(Model): 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, + 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, + 'is_sealed': {'key': 'IsSealed', 'type': 'bool', 'xml': {'name': 'IsSealed'}}, } _xml_map = { 'name': 'Properties' @@ -421,6 +450,64 @@ def __init__(self, **kwargs): self.customer_provided_key_sha256 = kwargs.get('customer_provided_key_sha256', None) self.encryption_scope = kwargs.get('encryption_scope', None) self.access_tier_change_time = kwargs.get('access_tier_change_time', None) + self.tag_count = kwargs.get('tag_count', None) + self.expires_on = kwargs.get('expires_on', None) + self.is_sealed = kwargs.get('is_sealed', None) + + +class BlobTag(Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__(self, **kwargs): + super(BlobTag, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) + + +class BlobTags(Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__(self, **kwargs): + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = kwargs.get('blob_tag_set', None) class Block(Model): @@ -565,6 +652,10 @@ class ContainerItem(Model): :param name: Required. :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str :param properties: Required. :type properties: ~azure.storage.blob.models.ContainerProperties :param metadata: @@ -578,6 +669,8 @@ class ContainerItem(Model): _attribute_map = { 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, } @@ -588,6 +681,8 @@ class ContainerItem(Model): def __init__(self, **kwargs): super(ContainerItem, self).__init__(**kwargs) self.name = kwargs.get('name', None) + self.deleted = kwargs.get('deleted', None) + self.version = kwargs.get('version', None) self.properties = kwargs.get('properties', None) self.metadata = kwargs.get('metadata', None) @@ -618,6 +713,10 @@ class ContainerProperties(Model): :type default_encryption_scope: str :param prevent_encryption_scope_override: :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int """ _validation = { @@ -636,6 +735,8 @@ class ContainerProperties(Model): 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, } _xml_map = { } @@ -652,6 +753,8 @@ def __init__(self, **kwargs): self.has_legal_hold = kwargs.get('has_legal_hold', None) self.default_encryption_scope = kwargs.get('default_encryption_scope', None) self.prevent_encryption_scope_override = kwargs.get('prevent_encryption_scope_override', None) + self.deleted_time = kwargs.get('deleted_time', None) + self.remaining_retention_days = kwargs.get('remaining_retention_days', None) class CorsRule(Model): @@ -824,6 +927,51 @@ def __init__(self, **kwargs): self.message = kwargs.get('message', None) +class DelimitedTextConfiguration(Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator + :type column_separator: str + :param field_quote: Required. field quote + :type field_quote: str + :param record_separator: Required. record separator + :type record_separator: str + :param escape_char: Required. escape char + :type escape_char: str + :param headers_present: Required. has headers + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__(self, **kwargs): + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = kwargs.get('column_separator', None) + self.field_quote = kwargs.get('field_quote', None) + self.record_separator = kwargs.get('record_separator', None) + self.escape_char = kwargs.get('escape_char', None) + self.headers_present = kwargs.get('headers_present', None) + + class DirectoryHttpHeaders(Model): """Additional parameters for a set of operations, such as: Directory_create, Directory_rename, Blob_rename. @@ -859,6 +1007,80 @@ def __init__(self, **kwargs): self.content_disposition = kwargs.get('content_disposition', None) +class FilterBlobItem(Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tag_value: Required. + :type tag_value: str + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + 'tag_value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, + 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, **kwargs): + super(FilterBlobItem, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.container_name = kwargs.get('container_name', None) + self.tag_value = kwargs.get('tag_value', None) + + +class FilterBlobSegment(Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, **kwargs): + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = kwargs.get('service_endpoint', None) + self.where = kwargs.get('where', None) + self.blobs = kwargs.get('blobs', None) + self.next_marker = kwargs.get('next_marker', None) + + class GeoReplication(Model): """Geo-Replication information for the Secondary Storage Service. @@ -892,6 +1114,31 @@ def __init__(self, **kwargs): self.last_sync_time = kwargs.get('last_sync_time', None) +class JsonTextConfiguration(Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__(self, **kwargs): + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = kwargs.get('record_separator', None) + + class KeyInfo(Model): """Key information. @@ -1266,6 +1513,102 @@ def __init__(self, **kwargs): self.end = kwargs.get('end', None) +class QueryRequest(Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL" . + :vartype query_type: str + :param expression: Required. a query statement + :type expression: str + :param input_serialization: + :type input_serialization: + ~azure.storage.blob.models.QuickQuerySerialization + :param output_serialization: + :type output_serialization: + ~azure.storage.blob.models.QuickQuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuickQuerySerialization', 'xml': {'name': 'InputSerialization'}}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuickQuerySerialization', 'xml': {'name': 'OutputSerialization'}}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__(self, **kwargs): + super(QueryRequest, self).__init__(**kwargs) + self.expression = kwargs.get('expression', None) + self.input_serialization = kwargs.get('input_serialization', None) + self.output_serialization = kwargs.get('output_serialization', None) + + +class QuickQueryFormat(Model): + """QuickQueryFormat. + + :param type: Possible values include: 'delimited', 'json' + :type type: str or ~azure.storage.blob.models.QuickQueryFormatType + :param delimited_text_configuration: + :type delimited_text_configuration: + ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: + :type json_text_configuration: + ~azure.storage.blob.models.JsonTextConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'QuickQueryFormatType', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(QuickQueryFormat, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.delimited_text_configuration = kwargs.get('delimited_text_configuration', None) + self.json_text_configuration = kwargs.get('json_text_configuration', None) + + +class QuickQuerySerialization(Model): + """QuickQuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QuickQueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QuickQueryFormat', 'xml': {'name': 'Format'}}, + } + _xml_map = { + } + + def __init__(self, **kwargs): + super(QuickQuerySerialization, self).__init__(**kwargs) + self.format = kwargs.get('format', None) + + class RetentionPolicy(Model): """the retention policy which determines how long the associated data should persist. diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models_py3.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models_py3.py index e7c30810cc86..2ce184a8c734 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models_py3.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/models/_models_py3.py @@ -41,7 +41,7 @@ def __init__(self, *, start: str=None, expiry: str=None, permission: str=None, * class AppendPositionAccessConditions(Model): """Additional parameters for a set of operations, such as: - AppendBlob_append_block, AppendBlob_append_block_from_url. + AppendBlob_append_block, AppendBlob_append_block_from_url, AppendBlob_seal. :param max_size: Optional conditional header. The max length in bytes permitted for the append blob. If the Append Block operation would cause @@ -78,7 +78,7 @@ class BlobFlatListSegment(Model): All required parameters must be populated in order to send to Azure. :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] """ _validation = { @@ -86,7 +86,7 @@ class BlobFlatListSegment(Model): } _attribute_map = { - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'BlobItems', 'itemsName': 'Blob'}}, } _xml_map = { 'name': 'Blobs' @@ -105,7 +105,7 @@ class BlobHierarchyListSegment(Model): :param blob_prefixes: :type blob_prefixes: list[~azure.storage.blob.models.BlobPrefix] :param blob_items: Required. - :type blob_items: list[~azure.storage.blob.models.BlobItem] + :type blob_items: list[~azure.storage.blob.models.BlobItemInternal] """ _validation = { @@ -114,7 +114,7 @@ class BlobHierarchyListSegment(Model): _attribute_map = { 'blob_prefixes': {'key': 'BlobPrefixes', 'type': '[BlobPrefix]', 'xml': {'name': 'BlobPrefix', 'itemsName': 'BlobPrefix'}}, - 'blob_items': {'key': 'BlobItems', 'type': '[BlobItem]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, + 'blob_items': {'key': 'BlobItems', 'type': '[BlobItemInternal]', 'xml': {'name': 'Blob', 'itemsName': 'Blob'}}, } _xml_map = { 'name': 'Blobs' @@ -175,7 +175,7 @@ def __init__(self, *, blob_cache_control: str=None, blob_content_type: str=None, self.blob_content_disposition = blob_content_disposition -class BlobItem(Model): +class BlobItemInternal(Model): """An Azure Storage blob. All required parameters must be populated in order to send to Azure. @@ -186,10 +186,20 @@ class BlobItem(Model): :type deleted: bool :param snapshot: Required. :type snapshot: str + :param version_id: + :type version_id: str + :param is_current_version: + :type is_current_version: bool :param properties: Required. :type properties: ~azure.storage.blob.models.BlobProperties :param metadata: :type metadata: ~azure.storage.blob.models.BlobMetadata + :param blob_tags: + :type blob_tags: ~azure.storage.blob.models.BlobTags + :param object_replication_policy_id: + :type object_replication_policy_id: str + :param object_replication_rule_status: + :type object_replication_rule_status: dict[str, str] """ _validation = { @@ -203,20 +213,30 @@ class BlobItem(Model): 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, 'snapshot': {'key': 'Snapshot', 'type': 'str', 'xml': {'name': 'Snapshot'}}, + 'version_id': {'key': 'VersionId', 'type': 'str', 'xml': {'name': 'VersionId'}}, + 'is_current_version': {'key': 'IsCurrentVersion', 'type': 'bool', 'xml': {'name': 'IsCurrentVersion'}}, 'properties': {'key': 'Properties', 'type': 'BlobProperties', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': 'BlobMetadata', 'xml': {'name': 'Metadata'}}, + 'blob_tags': {'key': 'BlobTags', 'type': 'BlobTags', 'xml': {'name': 'BlobTags'}}, + 'object_replication_policy_id': {'key': 'ObjectReplicationPolicyId', 'type': 'str', 'xml': {'name': 'ObjectReplicationPolicyId'}}, + 'object_replication_rule_status': {'key': 'ObjectReplicationRuleStatus', 'type': '{str}', 'xml': {'name': 'ObjectReplicationRuleStatus'}}, } _xml_map = { 'name': 'Blob' } - def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, metadata=None, **kwargs) -> None: - super(BlobItem, self).__init__(**kwargs) + def __init__(self, *, name: str, deleted: bool, snapshot: str, properties, version_id: str=None, is_current_version: bool=None, metadata=None, blob_tags=None, object_replication_policy_id: str=None, object_replication_rule_status=None, **kwargs) -> None: + super(BlobItemInternal, self).__init__(**kwargs) self.name = name self.deleted = deleted self.snapshot = snapshot + self.version_id = version_id + self.is_current_version = is_current_version self.properties = properties self.metadata = metadata + self.blob_tags = blob_tags + self.object_replication_policy_id = object_replication_policy_id + self.object_replication_rule_status = object_replication_rule_status class BlobMetadata(Model): @@ -342,6 +362,12 @@ class BlobProperties(Model): :type encryption_scope: str :param access_tier_change_time: :type access_tier_change_time: datetime + :param tag_count: + :type tag_count: int + :param expires_on: + :type expires_on: datetime + :param is_sealed: + :type is_sealed: bool """ _validation = { @@ -382,12 +408,15 @@ class BlobProperties(Model): 'customer_provided_key_sha256': {'key': 'CustomerProvidedKeySha256', 'type': 'str', 'xml': {'name': 'CustomerProvidedKeySha256'}}, 'encryption_scope': {'key': 'EncryptionScope', 'type': 'str', 'xml': {'name': 'EncryptionScope'}}, 'access_tier_change_time': {'key': 'AccessTierChangeTime', 'type': 'rfc-1123', 'xml': {'name': 'AccessTierChangeTime'}}, + 'tag_count': {'key': 'TagCount', 'type': 'int', 'xml': {'name': 'TagCount'}}, + 'expires_on': {'key': 'Expiry-Time', 'type': 'rfc-1123', 'xml': {'name': 'Expiry-Time'}}, + 'is_sealed': {'key': 'IsSealed', 'type': 'bool', 'xml': {'name': 'IsSealed'}}, } _xml_map = { 'name': 'Properties' } - def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, **kwargs) -> None: + def __init__(self, *, last_modified, etag: str, creation_time=None, content_length: int=None, content_type: str=None, content_encoding: str=None, content_language: str=None, content_md5: bytearray=None, content_disposition: str=None, cache_control: str=None, blob_sequence_number: int=None, blob_type=None, lease_status=None, lease_state=None, lease_duration=None, copy_id: str=None, copy_status=None, copy_source: str=None, copy_progress: str=None, copy_completion_time=None, copy_status_description: str=None, server_encrypted: bool=None, incremental_copy: bool=None, destination_snapshot: str=None, deleted_time=None, remaining_retention_days: int=None, access_tier=None, access_tier_inferred: bool=None, archive_status=None, customer_provided_key_sha256: str=None, encryption_scope: str=None, access_tier_change_time=None, tag_count: int=None, expires_on=None, is_sealed: bool=None, **kwargs) -> None: super(BlobProperties, self).__init__(**kwargs) self.creation_time = creation_time self.last_modified = last_modified @@ -421,6 +450,64 @@ def __init__(self, *, last_modified, etag: str, creation_time=None, content_leng self.customer_provided_key_sha256 = customer_provided_key_sha256 self.encryption_scope = encryption_scope self.access_tier_change_time = access_tier_change_time + self.tag_count = tag_count + self.expires_on = expires_on + self.is_sealed = is_sealed + + +class BlobTag(Model): + """BlobTag. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. + :type key: str + :param value: Required. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str', 'xml': {'name': 'Key'}}, + 'value': {'key': 'Value', 'type': 'str', 'xml': {'name': 'Value'}}, + } + _xml_map = { + 'name': 'Tag' + } + + def __init__(self, *, key: str, value: str, **kwargs) -> None: + super(BlobTag, self).__init__(**kwargs) + self.key = key + self.value = value + + +class BlobTags(Model): + """Blob tags. + + All required parameters must be populated in order to send to Azure. + + :param blob_tag_set: Required. + :type blob_tag_set: list[~azure.storage.blob.models.BlobTag] + """ + + _validation = { + 'blob_tag_set': {'required': True}, + } + + _attribute_map = { + 'blob_tag_set': {'key': 'BlobTagSet', 'type': '[BlobTag]', 'xml': {'name': 'TagSet', 'itemsName': 'TagSet', 'wrapped': True}}, + } + _xml_map = { + 'name': 'Tags' + } + + def __init__(self, *, blob_tag_set, **kwargs) -> None: + super(BlobTags, self).__init__(**kwargs) + self.blob_tag_set = blob_tag_set class Block(Model): @@ -565,6 +652,10 @@ class ContainerItem(Model): :param name: Required. :type name: str + :param deleted: + :type deleted: bool + :param version: + :type version: str :param properties: Required. :type properties: ~azure.storage.blob.models.ContainerProperties :param metadata: @@ -578,6 +669,8 @@ class ContainerItem(Model): _attribute_map = { 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'deleted': {'key': 'Deleted', 'type': 'bool', 'xml': {'name': 'Deleted'}}, + 'version': {'key': 'Version', 'type': 'str', 'xml': {'name': 'Version'}}, 'properties': {'key': 'Properties', 'type': 'ContainerProperties', 'xml': {'name': 'Properties'}}, 'metadata': {'key': 'Metadata', 'type': '{str}', 'xml': {'name': 'Metadata'}}, } @@ -585,9 +678,11 @@ class ContainerItem(Model): 'name': 'Container' } - def __init__(self, *, name: str, properties, metadata=None, **kwargs) -> None: + def __init__(self, *, name: str, properties, deleted: bool=None, version: str=None, metadata=None, **kwargs) -> None: super(ContainerItem, self).__init__(**kwargs) self.name = name + self.deleted = deleted + self.version = version self.properties = properties self.metadata = metadata @@ -618,6 +713,10 @@ class ContainerProperties(Model): :type default_encryption_scope: str :param prevent_encryption_scope_override: :type prevent_encryption_scope_override: bool + :param deleted_time: + :type deleted_time: datetime + :param remaining_retention_days: + :type remaining_retention_days: int """ _validation = { @@ -636,11 +735,13 @@ class ContainerProperties(Model): 'has_legal_hold': {'key': 'HasLegalHold', 'type': 'bool', 'xml': {'name': 'HasLegalHold'}}, 'default_encryption_scope': {'key': 'DefaultEncryptionScope', 'type': 'str', 'xml': {'name': 'DefaultEncryptionScope'}}, 'prevent_encryption_scope_override': {'key': 'DenyEncryptionScopeOverride', 'type': 'bool', 'xml': {'name': 'DenyEncryptionScopeOverride'}}, + 'deleted_time': {'key': 'DeletedTime', 'type': 'rfc-1123', 'xml': {'name': 'DeletedTime'}}, + 'remaining_retention_days': {'key': 'RemainingRetentionDays', 'type': 'int', 'xml': {'name': 'RemainingRetentionDays'}}, } _xml_map = { } - def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, **kwargs) -> None: + def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=None, lease_duration=None, public_access=None, has_immutability_policy: bool=None, has_legal_hold: bool=None, default_encryption_scope: str=None, prevent_encryption_scope_override: bool=None, deleted_time=None, remaining_retention_days: int=None, **kwargs) -> None: super(ContainerProperties, self).__init__(**kwargs) self.last_modified = last_modified self.etag = etag @@ -652,6 +753,8 @@ def __init__(self, *, last_modified, etag: str, lease_status=None, lease_state=N self.has_legal_hold = has_legal_hold self.default_encryption_scope = default_encryption_scope self.prevent_encryption_scope_override = prevent_encryption_scope_override + self.deleted_time = deleted_time + self.remaining_retention_days = remaining_retention_days class CorsRule(Model): @@ -824,6 +927,51 @@ def __init__(self, *, code: str=None, message: str=None, **kwargs) -> None: self.message = message +class DelimitedTextConfiguration(Model): + """delimited text configuration. + + All required parameters must be populated in order to send to Azure. + + :param column_separator: Required. column separator + :type column_separator: str + :param field_quote: Required. field quote + :type field_quote: str + :param record_separator: Required. record separator + :type record_separator: str + :param escape_char: Required. escape char + :type escape_char: str + :param headers_present: Required. has headers + :type headers_present: bool + """ + + _validation = { + 'column_separator': {'required': True}, + 'field_quote': {'required': True}, + 'record_separator': {'required': True}, + 'escape_char': {'required': True}, + 'headers_present': {'required': True}, + } + + _attribute_map = { + 'column_separator': {'key': 'ColumnSeparator', 'type': 'str', 'xml': {'name': 'ColumnSeparator'}}, + 'field_quote': {'key': 'FieldQuote', 'type': 'str', 'xml': {'name': 'FieldQuote'}}, + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + 'escape_char': {'key': 'EscapeChar', 'type': 'str', 'xml': {'name': 'EscapeChar'}}, + 'headers_present': {'key': 'HeadersPresent', 'type': 'bool', 'xml': {'name': 'HasHeaders'}}, + } + _xml_map = { + 'name': 'DelimitedTextConfiguration' + } + + def __init__(self, *, column_separator: str, field_quote: str, record_separator: str, escape_char: str, headers_present: bool, **kwargs) -> None: + super(DelimitedTextConfiguration, self).__init__(**kwargs) + self.column_separator = column_separator + self.field_quote = field_quote + self.record_separator = record_separator + self.escape_char = escape_char + self.headers_present = headers_present + + class DirectoryHttpHeaders(Model): """Additional parameters for a set of operations, such as: Directory_create, Directory_rename, Blob_rename. @@ -859,6 +1007,80 @@ def __init__(self, *, cache_control: str=None, content_type: str=None, content_e self.content_disposition = content_disposition +class FilterBlobItem(Model): + """Blob info from a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. + :type name: str + :param container_name: Required. + :type container_name: str + :param tag_value: Required. + :type tag_value: str + """ + + _validation = { + 'name': {'required': True}, + 'container_name': {'required': True}, + 'tag_value': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str', 'xml': {'name': 'Name'}}, + 'container_name': {'key': 'ContainerName', 'type': 'str', 'xml': {'name': 'ContainerName'}}, + 'tag_value': {'key': 'TagValue', 'type': 'str', 'xml': {'name': 'TagValue'}}, + } + _xml_map = { + 'name': 'Blob' + } + + def __init__(self, *, name: str, container_name: str, tag_value: str, **kwargs) -> None: + super(FilterBlobItem, self).__init__(**kwargs) + self.name = name + self.container_name = container_name + self.tag_value = tag_value + + +class FilterBlobSegment(Model): + """The result of a Filter Blobs API call. + + All required parameters must be populated in order to send to Azure. + + :param service_endpoint: Required. + :type service_endpoint: str + :param where: Required. + :type where: str + :param blobs: Required. + :type blobs: list[~azure.storage.blob.models.FilterBlobItem] + :param next_marker: + :type next_marker: str + """ + + _validation = { + 'service_endpoint': {'required': True}, + 'where': {'required': True}, + 'blobs': {'required': True}, + } + + _attribute_map = { + 'service_endpoint': {'key': 'ServiceEndpoint', 'type': 'str', 'xml': {'name': 'ServiceEndpoint', 'attr': True}}, + 'where': {'key': 'Where', 'type': 'str', 'xml': {'name': 'Where'}}, + 'blobs': {'key': 'Blobs', 'type': '[FilterBlobItem]', 'xml': {'name': 'Blobs', 'itemsName': 'Blobs', 'wrapped': True}}, + 'next_marker': {'key': 'NextMarker', 'type': 'str', 'xml': {'name': 'NextMarker'}}, + } + _xml_map = { + 'name': 'EnumerationResults' + } + + def __init__(self, *, service_endpoint: str, where: str, blobs, next_marker: str=None, **kwargs) -> None: + super(FilterBlobSegment, self).__init__(**kwargs) + self.service_endpoint = service_endpoint + self.where = where + self.blobs = blobs + self.next_marker = next_marker + + class GeoReplication(Model): """Geo-Replication information for the Secondary Storage Service. @@ -892,6 +1114,31 @@ def __init__(self, *, status, last_sync_time, **kwargs) -> None: self.last_sync_time = last_sync_time +class JsonTextConfiguration(Model): + """json text configuration. + + All required parameters must be populated in order to send to Azure. + + :param record_separator: Required. record separator + :type record_separator: str + """ + + _validation = { + 'record_separator': {'required': True}, + } + + _attribute_map = { + 'record_separator': {'key': 'RecordSeparator', 'type': 'str', 'xml': {'name': 'RecordSeparator'}}, + } + _xml_map = { + 'name': 'JsonTextConfiguration' + } + + def __init__(self, *, record_separator: str, **kwargs) -> None: + super(JsonTextConfiguration, self).__init__(**kwargs) + self.record_separator = record_separator + + class KeyInfo(Model): """Key information. @@ -1266,6 +1513,102 @@ def __init__(self, *, start: int, end: int, **kwargs) -> None: self.end = end +class QueryRequest(Model): + """the quick query body. + + Variables are only populated by the server, and will be ignored when + sending a request. + + All required parameters must be populated in order to send to Azure. + + :ivar query_type: Required. the query type. Default value: "SQL" . + :vartype query_type: str + :param expression: Required. a query statement + :type expression: str + :param input_serialization: + :type input_serialization: + ~azure.storage.blob.models.QuickQuerySerialization + :param output_serialization: + :type output_serialization: + ~azure.storage.blob.models.QuickQuerySerialization + """ + + _validation = { + 'query_type': {'required': True, 'constant': True}, + 'expression': {'required': True}, + } + + _attribute_map = { + 'query_type': {'key': 'QueryType', 'type': 'str', 'xml': {'name': 'QueryType'}}, + 'expression': {'key': 'Expression', 'type': 'str', 'xml': {'name': 'Expression'}}, + 'input_serialization': {'key': 'InputSerialization', 'type': 'QuickQuerySerialization', 'xml': {'name': 'InputSerialization'}}, + 'output_serialization': {'key': 'OutputSerialization', 'type': 'QuickQuerySerialization', 'xml': {'name': 'OutputSerialization'}}, + } + _xml_map = { + 'name': 'QueryRequest' + } + + query_type = "SQL" + + def __init__(self, *, expression: str, input_serialization=None, output_serialization=None, **kwargs) -> None: + super(QueryRequest, self).__init__(**kwargs) + self.expression = expression + self.input_serialization = input_serialization + self.output_serialization = output_serialization + + +class QuickQueryFormat(Model): + """QuickQueryFormat. + + :param type: Possible values include: 'delimited', 'json' + :type type: str or ~azure.storage.blob.models.QuickQueryFormatType + :param delimited_text_configuration: + :type delimited_text_configuration: + ~azure.storage.blob.models.DelimitedTextConfiguration + :param json_text_configuration: + :type json_text_configuration: + ~azure.storage.blob.models.JsonTextConfiguration + """ + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'QuickQueryFormatType', 'xml': {'name': 'Type'}}, + 'delimited_text_configuration': {'key': 'DelimitedTextConfiguration', 'type': 'DelimitedTextConfiguration', 'xml': {'name': 'DelimitedTextConfiguration'}}, + 'json_text_configuration': {'key': 'JsonTextConfiguration', 'type': 'JsonTextConfiguration', 'xml': {'name': 'JsonTextConfiguration'}}, + } + _xml_map = { + } + + def __init__(self, *, type=None, delimited_text_configuration=None, json_text_configuration=None, **kwargs) -> None: + super(QuickQueryFormat, self).__init__(**kwargs) + self.type = type + self.delimited_text_configuration = delimited_text_configuration + self.json_text_configuration = json_text_configuration + + +class QuickQuerySerialization(Model): + """QuickQuerySerialization. + + All required parameters must be populated in order to send to Azure. + + :param format: Required. + :type format: ~azure.storage.blob.models.QuickQueryFormat + """ + + _validation = { + 'format': {'required': True}, + } + + _attribute_map = { + 'format': {'key': 'Format', 'type': 'QuickQueryFormat', 'xml': {'name': 'Format'}}, + } + _xml_map = { + } + + def __init__(self, *, format, **kwargs) -> None: + super(QuickQuerySerialization, self).__init__(**kwargs) + self.format = format + + class RetentionPolicy(Model): """the retention policy which determines how long the associated data should persist. diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_append_blob_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_append_blob_operations.py index 58948c64d126..d30cedda2fb9 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_append_blob_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_append_blob_operations.py @@ -24,7 +24,6 @@ class AppendBlobOperations(object): :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar x_ms_blob_type: Specifies the type of blob to create: block blob, page blob, or append blob. Constant value: "AppendBlob". - :ivar comp: . Constant value: "appendblock". """ models = models @@ -37,9 +36,8 @@ def __init__(self, client, config, serializer, deserializer): self._config = config self.x_ms_blob_type = "AppendBlob" - self.comp = "appendblock" - def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + def create(self, content_length, timeout=None, metadata=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Create Append Blob operation creates a new append blob. :param content_length: The length of the request. @@ -63,6 +61,9 @@ def create(self, content_length, timeout=None, metadata=None, request_id=None, b KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -151,6 +152,8 @@ def create(self, content_length, timeout=None, metadata=None, request_id=None, b header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -200,6 +203,7 @@ def create(self, content_length, timeout=None, metadata=None, request_id=None, b 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -292,6 +296,8 @@ def append_block(self, body, content_length, timeout=None, transactional_content if modified_access_conditions is not None: if_none_match = modified_access_conditions.if_none_match + comp = "appendblock" + # Construct URL url = self.append_block.metadata['url'] path_format_arguments = { @@ -303,7 +309,7 @@ def append_block(self, body, content_length, timeout=None, transactional_content query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} @@ -475,6 +481,8 @@ def append_block_from_url(self, source_url, content_length, source_range=None, s if source_modified_access_conditions is not None: source_if_none_match = source_modified_access_conditions.source_if_none_match + comp = "appendblock" + # Construct URL url = self.append_block_from_url.metadata['url'] path_format_arguments = { @@ -486,7 +494,7 @@ def append_block_from_url(self, source_url, content_length, source_range=None, s query_parameters = {} if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) - query_parameters['comp'] = self._serialize.query("self.comp", self.comp, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') # Construct headers header_parameters = {} @@ -561,3 +569,111 @@ def append_block_from_url(self, source_url, content_length, source_range=None, s } return cls(response, None, response_headers) append_block_from_url.metadata = {'url': '/{containerName}/{blob}'} + + def seal(self, timeout=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, append_position_access_conditions=None, cls=None, **kwargs): + """The Seal operation seals the Append Blob to make it read-only. Seal is + supported only on version 2019-12-12 version or later. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param append_position_access_conditions: Additional parameters for + the operation + :type append_position_access_conditions: + ~azure.storage.blob.models.AppendPositionAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + append_position = None + if append_position_access_conditions is not None: + append_position = append_position_access_conditions.append_position + + comp = "seal" + + # Construct URL + url = self.seal.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + if append_position is not None: + header_parameters['x-ms-blob-condition-appendpos'] = self._serialize.header("append_position", append_position, 'long') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + seal.metadata = {'url': '/{containerName}/{blob}'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_blob_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_blob_operations.py index 3927498cf07a..2938560a4dda 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_blob_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_blob_operations.py @@ -41,7 +41,7 @@ def __init__(self, client, config, serializer, deserializer): self.x_ms_copy_action = "abort" self.restype = "account" - def download(self, snapshot=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + def download(self, snapshot=None, version_id=None, timeout=None, range=None, range_get_content_md5=None, range_get_content_crc64=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Download operation reads or downloads a blob from the system, including its metadata and properties. You can also call Download to read a snapshot. @@ -52,6 +52,10 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -126,6 +130,8 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -174,6 +180,8 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md header_dict = { 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), @@ -197,6 +205,7 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), @@ -204,6 +213,8 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -212,6 +223,8 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md header_dict = { 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), @@ -235,6 +248,7 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), @@ -242,6 +256,8 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } @@ -252,7 +268,7 @@ def download(self, snapshot=None, timeout=None, range=None, range_get_content_md return deserialized download.metadata = {'url': '/{containerName}/{blob}'} - def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + def get_properties(self, snapshot=None, version_id=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Get Properties operation returns all user-defined metadata, standard HTTP properties, and system properties for the blob. It does not return the content of the blob. @@ -263,6 +279,10 @@ def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_acc href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -326,6 +346,8 @@ def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_acc query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -365,6 +387,8 @@ def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_acc 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), 'x-ms-creation-time': self._deserialize('rfc-1123', response.headers.get('x-ms-creation-time')), 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'x-ms-or-policy-id': self._deserialize('str', response.headers.get('x-ms-or-policy-id')), + 'x-ms-or': self._deserialize('{str}', response.headers.get('x-ms-or')), 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), @@ -399,12 +423,17 @@ def get_properties(self, snapshot=None, timeout=None, request_id=None, lease_acc 'x-ms-access-tier-inferred': self._deserialize('bool', response.headers.get('x-ms-access-tier-inferred')), 'x-ms-archive-status': self._deserialize('str', response.headers.get('x-ms-archive-status')), 'x-ms-access-tier-change-time': self._deserialize('rfc-1123', response.headers.get('x-ms-access-tier-change-time')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), + 'x-ms-is-current-version': self._deserialize('bool', response.headers.get('x-ms-is-current-version')), + 'x-ms-tag-count': self._deserialize('long', response.headers.get('x-ms-tag-count')), + 'x-ms-expiry-time': self._deserialize('rfc-1123', response.headers.get('x-ms-expiry-time')), + 'x-ms-blob-sealed': self._deserialize('bool', response.headers.get('x-ms-blob-sealed')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } return cls(response, None, response_headers) get_properties.metadata = {'url': '/{containerName}/{blob}'} - def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + def delete(self, snapshot=None, version_id=None, timeout=None, delete_snapshots=None, request_id=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): """If the storage account's soft delete feature is disabled then, when a blob is deleted, it is permanently removed from the storage account. If the storage account's soft delete feature is enabled, then, when a blob @@ -427,6 +456,10 @@ def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id= href="https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/creating-a-snapshot-of-a-blob">Creating a Snapshot of a Blob. :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -486,6 +519,8 @@ def delete(self, snapshot=None, timeout=None, delete_snapshots=None, request_id= query_parameters = {} if snapshot is not None: query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) @@ -1016,6 +1051,79 @@ def undelete(self, timeout=None, request_id=None, cls=None, **kwargs): return cls(response, None, response_headers) undelete.metadata = {'url': '/{containerName}/{blob}'} + def set_expiry(self, expiry_options, timeout=None, request_id=None, expires_on=None, cls=None, **kwargs): + """Sets the time a blob will expire and be deleted. + + :param expiry_options: Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', + 'RelativeToNow', 'Absolute' + :type expiry_options: str or + ~azure.storage.blob.models.BlobExpiryOptions + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param expires_on: The time to set the blob to expiry + :type expires_on: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "expiry" + + # Construct URL + url = self.set_expiry.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-expiry-option'] = self._serialize.header("expiry_options", expiry_options, 'str') + if expires_on is not None: + header_parameters['x-ms-expiry-time'] = self._serialize.header("expires_on", expires_on, 'rfc-1123') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_expiry.metadata = {'url': '/{containerName}/{blob}'} + def set_http_headers(self, timeout=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): """The Set HTTP Headers operation sets system properties on the blob. @@ -1274,6 +1382,7 @@ def set_metadata(self, timeout=None, metadata=None, request_id=None, lease_acces 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -1911,6 +2020,7 @@ def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), @@ -1918,7 +2028,7 @@ def create_snapshot(self, timeout=None, metadata=None, request_id=None, cpk_info return cls(response, None, response_headers) create_snapshot.metadata = {'url': '/{containerName}/{blob}'} - def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, rehydrate_priority=None, request_id=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): """The Start Copy From URL operation copies a blob or an internet resource to a new blob. @@ -1956,6 +2066,12 @@ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2028,6 +2144,10 @@ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') if source_if_unmodified_since is not None: @@ -2063,6 +2183,7 @@ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), @@ -2071,7 +2192,7 @@ def start_copy_from_url(self, copy_source, timeout=None, metadata=None, tier=Non return cls(response, None, response_headers) start_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): + def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, request_id=None, source_content_md5=None, blob_tags_string=None, seal_blob=None, source_modified_access_conditions=None, modified_access_conditions=None, lease_access_conditions=None, cls=None, **kwargs): """The Copy From URL operation copies a blob or an internet resource to a new blob. It will not return a response until the copy is complete. @@ -2107,6 +2228,12 @@ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, req :param source_content_md5: Specify the md5 calculated for the range of bytes that must be read from the copy source. :type source_content_md5: bytearray + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str + :param seal_blob: Overrides the sealed state of the destination blob. + Service version 2019-12-12 and newer. + :type seal_blob: bool :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: @@ -2179,6 +2306,10 @@ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, req header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') if source_content_md5 is not None: header_parameters['x-ms-source-content-md5'] = self._serialize.header("source_content_md5", source_content_md5, 'bytearray') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') + if seal_blob is not None: + header_parameters['x-ms-seal-blob'] = self._serialize.header("seal_blob", seal_blob, 'bool') header_parameters['x-ms-requires-sync'] = self._serialize.header("self.x_ms_requires_sync", self.x_ms_requires_sync, 'str') if source_if_modified_since is not None: header_parameters['x-ms-source-if-modified-since'] = self._serialize.header("source_if_modified_since", source_if_modified_since, 'rfc-1123') @@ -2215,6 +2346,7 @@ def copy_from_url(self, copy_source, timeout=None, metadata=None, tier=None, req 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), 'x-ms-copy-status': self._deserialize(models.SyncCopyStatusType, response.headers.get('x-ms-copy-status')), @@ -2303,7 +2435,7 @@ def abort_copy_from_url(self, copy_id, timeout=None, request_id=None, lease_acce return cls(response, None, response_headers) abort_copy_from_url.metadata = {'url': '/{containerName}/{blob}'} - def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): + def set_tier(self, tier, snapshot=None, version_id=None, timeout=None, rehydrate_priority=None, request_id=None, lease_access_conditions=None, cls=None, **kwargs): """The Set Tier operation sets the tier on a blob. The operation is allowed on a page blob in a premium storage account and on a block blob in a blob storage account (locally redundant storage only). A premium @@ -2315,6 +2447,16 @@ def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, include: 'P4', 'P6', 'P10', 'P15', 'P20', 'P30', 'P40', 'P50', 'P60', 'P70', 'P80', 'Hot', 'Cool', 'Archive' :type tier: str or ~azure.storage.blob.models.AccessTierRequired + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -2356,6 +2498,10 @@ def set_tier(self, tier, timeout=None, rehydrate_priority=None, request_id=None, # Construct parameters query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -2440,3 +2586,386 @@ def get_account_info(self, cls=None, **kwargs): } return cls(response, None, response_headers) get_account_info.metadata = {'url': '/{containerName}/{blob}'} + + def quick_query(self, query_request=None, snapshot=None, timeout=None, request_id=None, lease_access_conditions=None, cpk_info=None, modified_access_conditions=None, cls=None, **kwargs): + """The QuickQuery operation enables users to select/project on blob data + by providing simple query expressions. + + :param query_request: the query request + :type query_request: ~azure.storage.blob.models.QueryRequest + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param lease_access_conditions: Additional parameters for the + operation + :type lease_access_conditions: + ~azure.storage.blob.models.LeaseAccessConditions + :param cpk_info: Additional parameters for the operation + :type cpk_info: ~azure.storage.blob.models.CpkInfo + :param modified_access_conditions: Additional parameters for the + operation + :type modified_access_conditions: + ~azure.storage.blob.models.ModifiedAccessConditions + :param callable cls: A custom type or function that will be passed the + direct response + :return: object or the result of cls(response) + :rtype: Generator + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + lease_id = None + if lease_access_conditions is not None: + lease_id = lease_access_conditions.lease_id + encryption_key = None + if cpk_info is not None: + encryption_key = cpk_info.encryption_key + encryption_key_sha256 = None + if cpk_info is not None: + encryption_key_sha256 = cpk_info.encryption_key_sha256 + encryption_algorithm = None + if cpk_info is not None: + encryption_algorithm = cpk_info.encryption_algorithm + if_modified_since = None + if modified_access_conditions is not None: + if_modified_since = modified_access_conditions.if_modified_since + if_unmodified_since = None + if modified_access_conditions is not None: + if_unmodified_since = modified_access_conditions.if_unmodified_since + if_match = None + if modified_access_conditions is not None: + if_match = modified_access_conditions.if_match + if_none_match = None + if modified_access_conditions is not None: + if_none_match = modified_access_conditions.if_none_match + + comp = "query" + + # Construct URL + url = self.quick_query.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if lease_id is not None: + header_parameters['x-ms-lease-id'] = self._serialize.header("lease_id", lease_id, 'str') + if encryption_key is not None: + header_parameters['x-ms-encryption-key'] = self._serialize.header("encryption_key", encryption_key, 'str') + if encryption_key_sha256 is not None: + header_parameters['x-ms-encryption-key-sha256'] = self._serialize.header("encryption_key_sha256", encryption_key_sha256, 'str') + if encryption_algorithm is not None: + header_parameters['x-ms-encryption-algorithm'] = self._serialize.header("encryption_algorithm", encryption_algorithm, 'EncryptionAlgorithmType') + if if_modified_since is not None: + header_parameters['If-Modified-Since'] = self._serialize.header("if_modified_since", if_modified_since, 'rfc-1123') + if if_unmodified_since is not None: + header_parameters['If-Unmodified-Since'] = self._serialize.header("if_unmodified_since", if_unmodified_since, 'rfc-1123') + if if_match is not None: + header_parameters['If-Match'] = self._serialize.header("if_match", if_match, 'str') + if if_none_match is not None: + header_parameters['If-None-Match'] = self._serialize.header("if_none_match", if_none_match, 'str') + + # Construct body + if query_request is not None: + body_content = self._serialize.body(query_request, 'QueryRequest') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=True, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200, 206]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + if response.status_code == 206: + deserialized = response.stream_download(self._client._pipeline) + header_dict = { + 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), + 'x-ms-meta': self._deserialize('{str}', response.headers.get('x-ms-meta')), + 'Content-Length': self._deserialize('long', response.headers.get('Content-Length')), + 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), + 'Content-Range': self._deserialize('str', response.headers.get('Content-Range')), + 'ETag': self._deserialize('str', response.headers.get('ETag')), + 'Content-MD5': self._deserialize('bytearray', response.headers.get('Content-MD5')), + 'Content-Encoding': self._deserialize('str', response.headers.get('Content-Encoding')), + 'Cache-Control': self._deserialize('str', response.headers.get('Cache-Control')), + 'Content-Disposition': self._deserialize('str', response.headers.get('Content-Disposition')), + 'Content-Language': self._deserialize('str', response.headers.get('Content-Language')), + 'x-ms-blob-sequence-number': self._deserialize('long', response.headers.get('x-ms-blob-sequence-number')), + 'x-ms-blob-type': self._deserialize(models.BlobType, response.headers.get('x-ms-blob-type')), + 'x-ms-copy-completion-time': self._deserialize('rfc-1123', response.headers.get('x-ms-copy-completion-time')), + 'x-ms-copy-status-description': self._deserialize('str', response.headers.get('x-ms-copy-status-description')), + 'x-ms-copy-id': self._deserialize('str', response.headers.get('x-ms-copy-id')), + 'x-ms-copy-progress': self._deserialize('str', response.headers.get('x-ms-copy-progress')), + 'x-ms-copy-source': self._deserialize('str', response.headers.get('x-ms-copy-source')), + 'x-ms-copy-status': self._deserialize(models.CopyStatusType, response.headers.get('x-ms-copy-status')), + 'x-ms-lease-duration': self._deserialize(models.LeaseDurationType, response.headers.get('x-ms-lease-duration')), + 'x-ms-lease-state': self._deserialize(models.LeaseStateType, response.headers.get('x-ms-lease-state')), + 'x-ms-lease-status': self._deserialize(models.LeaseStatusType, response.headers.get('x-ms-lease-status')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Accept-Ranges': self._deserialize('str', response.headers.get('Accept-Ranges')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-blob-committed-block-count': self._deserialize('int', response.headers.get('x-ms-blob-committed-block-count')), + 'x-ms-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-server-encrypted')), + 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), + 'x-ms-encryption-scope': self._deserialize('str', response.headers.get('x-ms-encryption-scope')), + 'x-ms-blob-content-md5': self._deserialize('bytearray', response.headers.get('x-ms-blob-content-md5')), + 'x-ms-content-crc64': self._deserialize('bytearray', response.headers.get('x-ms-content-crc64')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + quick_query.metadata = {'url': '/{containerName}/{blob}'} + + def get_tags(self, timeout=None, request_id=None, snapshot=None, version_id=None, cls=None, **kwargs): + """The Get Tags operation enables users to get the tags associated with a + blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: BlobTags or the result of cls(response) + :rtype: ~azure.storage.blob.models.BlobTags + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "tags" + + # Construct URL + url = self.get_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('BlobTags', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + get_tags.metadata = {'url': '/{containerName}/{blob}'} + + def set_tags(self, timeout=None, snapshot=None, version_id=None, transactional_content_md5=None, transactional_content_crc64=None, request_id=None, tags=None, cls=None, **kwargs): + """The Set Tags operation enables users to set tags on a blob. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param snapshot: The snapshot parameter is an opaque DateTime value + that, when present, specifies the blob snapshot to retrieve. For more + information on working with blob snapshots, see Creating + a Snapshot of a Blob. + :type snapshot: str + :param version_id: The version id parameter is an opaque DateTime + value that, when present, specifies the version of the blob to operate + on. It's for service version 2019-10-10 and newer. + :type version_id: str + :param transactional_content_md5: Specify the transactional md5 for + the body, to be validated by the service. + :type transactional_content_md5: bytearray + :param transactional_content_crc64: Specify the transactional crc64 + for the body, to be validated by the service. + :type transactional_content_crc64: bytearray + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param tags: Blob tags + :type tags: ~azure.storage.blob.models.BlobTags + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "tags" + + # Construct URL + url = self.set_tags.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if snapshot is not None: + query_parameters['snapshot'] = self._serialize.query("snapshot", snapshot, 'str') + if version_id is not None: + query_parameters['versionid'] = self._serialize.query("version_id", version_id, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/xml; charset=utf-8' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if transactional_content_md5 is not None: + header_parameters['Content-MD5'] = self._serialize.header("transactional_content_md5", transactional_content_md5, 'bytearray') + if transactional_content_crc64 is not None: + header_parameters['x-ms-content-crc64'] = self._serialize.header("transactional_content_crc64", transactional_content_crc64, 'bytearray') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct body + if tags is not None: + body_content = self._serialize.body(tags, 'BlobTags') + else: + body_content = None + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters, body_content) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [204]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + set_tags.metadata = {'url': '/{containerName}/{blob}'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_block_blob_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_block_blob_operations.py index 034e32f8211f..022b2a5cce17 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_block_blob_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_block_blob_operations.py @@ -37,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer): self._config = config self.x_ms_blob_type = "BlockBlob" - def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + def upload(self, body, content_length, timeout=None, transactional_content_md5=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Upload Block Blob operation updates the content of an existing block blob. Updating an existing block blob overwrites any existing metadata on the blob. Partial updates are not supported with Put Blob; @@ -75,6 +75,9 @@ def upload(self, body, content_length, timeout=None, transactional_content_md5=N KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -168,6 +171,8 @@ def upload(self, body, content_length, timeout=None, transactional_content_md5=N header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -219,6 +224,7 @@ def upload(self, body, content_length, timeout=None, transactional_content_md5=N 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), @@ -504,7 +510,7 @@ def stage_block_from_url(self, block_id, content_length, source_url, source_rang return cls(response, None, response_headers) stage_block_from_url.metadata = {'url': '/{containerName}/{blob}'} - def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None, transactional_content_crc64=None, metadata=None, tier=None, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Commit Block List operation writes a blob by specifying the list of block IDs that make up the blob. In order to be written as part of a blob, a block must have been successfully written to the server in a @@ -546,6 +552,9 @@ def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -643,6 +652,8 @@ def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') if blob_cache_control is not None: header_parameters['x-ms-blob-cache-control'] = self._serialize.header("blob_cache_control", blob_cache_control, 'str') if blob_content_type is not None: @@ -695,6 +706,7 @@ def commit_block_list(self, blocks, timeout=None, transactional_content_md5=None 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_container_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_container_operations.py index ee777cf97a29..5730483519a4 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_container_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_container_operations.py @@ -591,6 +591,79 @@ def set_access_policy(self, container_acl=None, timeout=None, access=None, reque return cls(response, None, response_headers) set_access_policy.metadata = {'url': '/{containerName}'} + def restore(self, timeout=None, request_id=None, deleted_container_name=None, deleted_container_version=None, cls=None, **kwargs): + """Restores a previously-deleted container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param deleted_container_name: Optional. Version 2019-12-12 and + laster. Specifies the name of the deleted container to restore. + :type deleted_container_name: str + :param deleted_container_version: Optional. Version 2019-12-12 and + laster. Specifies the version of the deleted container to restore. + :type deleted_container_version: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: None or the result of cls(response) + :rtype: None + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + restype = "container" + comp = "undelete" + + # Construct URL + url = self.restore.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + query_parameters['restype'] = self._serialize.query("restype", restype, 'str') + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if deleted_container_name is not None: + header_parameters['x-ms-deleted-container-name'] = self._serialize.header("deleted_container_name", deleted_container_name, 'str') + if deleted_container_version is not None: + header_parameters['x-ms-deleted-container-version'] = self._serialize.header("deleted_container_version", deleted_container_version, 'str') + + # Construct and send request + request = self._client.put(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [201]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + if cls: + response_headers = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + return cls(response, None, response_headers) + restore.metadata = {'url': '/{containerName}'} + def acquire_lease(self, timeout=None, duration=None, proposed_lease_id=None, request_id=None, modified_access_conditions=None, cls=None, **kwargs): """[Update] establishes and manages a lock on a container for delete operations. The lock duration can be 15 to 60 seconds, or can be diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_page_blob_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_page_blob_operations.py index 70e8048a6803..7c9ec9dc904c 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_page_blob_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_page_blob_operations.py @@ -37,7 +37,7 @@ def __init__(self, client, config, serializer, deserializer): self._config = config self.x_ms_blob_type = "PageBlob" - def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): + def create(self, content_length, blob_content_length, timeout=None, tier=None, metadata=None, blob_sequence_number=0, request_id=None, blob_tags_string=None, blob_http_headers=None, lease_access_conditions=None, cpk_info=None, cpk_scope_info=None, modified_access_conditions=None, cls=None, **kwargs): """The Create operation creates a new page blob. :param content_length: The length of the request. @@ -74,6 +74,9 @@ def create(self, content_length, blob_content_length, timeout=None, tier=None, m KB character limit that is recorded in the analytics logs when storage analytics logging is enabled. :type request_id: str + :param blob_tags_string: Optional. Used to set blob tags in various + blob operations. + :type blob_tags_string: str :param blob_http_headers: Additional parameters for the operation :type blob_http_headers: ~azure.storage.blob.models.BlobHTTPHeaders :param lease_access_conditions: Additional parameters for the @@ -167,6 +170,8 @@ def create(self, content_length, blob_content_length, timeout=None, tier=None, m header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') if request_id is not None: header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + if blob_tags_string is not None: + header_parameters['x-ms-tags'] = self._serialize.header("blob_tags_string", blob_tags_string, 'str') header_parameters['x-ms-blob-type'] = self._serialize.header("self.x_ms_blob_type", self.x_ms_blob_type, 'str') if blob_content_type is not None: header_parameters['x-ms-blob-content-type'] = self._serialize.header("blob_content_type", blob_content_type, 'str') @@ -216,6 +221,7 @@ def create(self, content_length, blob_content_length, timeout=None, tier=None, m 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'x-ms-version-id': self._deserialize('str', response.headers.get('x-ms-version-id')), 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'x-ms-request-server-encrypted': self._deserialize('bool', response.headers.get('x-ms-request-server-encrypted')), 'x-ms-encryption-key-sha256': self._deserialize('str', response.headers.get('x-ms-encryption-key-sha256')), diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py index b8f4f8e42323..0a49915e1dd5 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/operations/_service_operations.py @@ -276,10 +276,9 @@ def list_containers_segment(self, prefix=None, marker=None, maxresults=None, inc of 5000. :type maxresults: int :param include: Include this parameter to specify that the container's - metadata be returned as part of the response body. Possible values - include: 'metadata' - :type include: str or - ~azure.storage.blob.models.ListContainersIncludeType + metadata be returned as part of the response body. + :type include: list[str or + ~azure.storage.blob.models.ListContainersIncludeType] :param timeout: The timeout parameter is expressed in seconds. For more information, see Setting @@ -315,7 +314,7 @@ def list_containers_segment(self, prefix=None, marker=None, maxresults=None, inc if maxresults is not None: query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) if include is not None: - query_parameters['include'] = self._serialize.query("include", include, 'ListContainersIncludeType') + query_parameters['include'] = self._serialize.query("include", include, '[ListContainersIncludeType]', div=',') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) query_parameters['comp'] = self._serialize.query("comp", comp, 'str') @@ -564,3 +563,101 @@ def submit_batch(self, body, content_length, multipart_content_type, timeout=Non return deserialized submit_batch.metadata = {'url': '/'} + + def filter_blobs(self, timeout=None, request_id=None, where=None, marker=None, maxresults=None, cls=None, **kwargs): + """The Filter Blobs operation enables callers to list blobs across all + containers whose tags match a given search expression. Filter blobs + searches across all containers within a storage account but can be + scoped within the expression to a single container. + + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param where: Filters the results to return only to return only blobs + whose tags match the specified expression. + :type where: str + :param marker: A string value that identifies the portion of the list + of containers to be returned with the next listing operation. The + operation returns the NextMarker value within the response body if the + listing operation did not return all containers remaining to be listed + with the current page. The NextMarker value can be used as the value + for the marker parameter in a subsequent call to request the next page + of list items. The marker value is opaque to the client. + :type marker: str + :param maxresults: Specifies the maximum number of containers to + return. If the request does not specify maxresults, or specifies a + value greater than 5000, the server will return up to 5000 items. Note + that if the listing operation crosses a partition boundary, then the + service will return a continuation token for retrieving the remainder + of the results. For this reason, it is possible that the service will + return fewer results than specified by maxresults, or than the default + of 5000. + :type maxresults: int + :param callable cls: A custom type or function that will be passed the + direct response + :return: FilterBlobSegment or the result of cls(response) + :rtype: ~azure.storage.blob.models.FilterBlobSegment + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + comp = "blobs" + + # Construct URL + url = self.filter_blobs.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if where is not None: + query_parameters['where'] = self._serialize.query("where", where, 'str') + if marker is not None: + query_parameters['marker'] = self._serialize.query("marker", marker, 'str') + if maxresults is not None: + query_parameters['maxresults'] = self._serialize.query("maxresults", maxresults, 'int', minimum=1) + query_parameters['comp'] = self._serialize.query("comp", comp, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/xml' + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + + # Construct and send request + request = self._client.get(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('FilterBlobSegment', response) + header_dict = { + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + filter_blobs.metadata = {'url': '/'} diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/version.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/version.py index 629812170000..be045899fa00 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/version.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_generated/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "2019-07-07" +VERSION = "2019-12-12" diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_models.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_models.py index bd92eff4683f..e59fb4bec9eb 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_models.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_models.py @@ -21,7 +21,7 @@ from ._generated.models import AccessPolicy as GenAccessPolicy from ._generated.models import StorageErrorException from ._generated.models import BlobPrefix as GenBlobPrefix -from ._generated.models import BlobItem +from ._generated.models import BlobItemInternal class BlobType(str, Enum): @@ -615,7 +615,7 @@ def _extract_data_cb(self, get_next_return): def _build_item(self, item): if isinstance(item, BlobProperties): return item - if isinstance(item, BlobItem): + if isinstance(item, BlobItemInternal): blob = BlobProperties._from_generated(item) # pylint: disable=protected-access blob.container = self.container return blob diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_serialize.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_serialize.py index 4d66fd915c37..6ca870354c38 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_serialize.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_serialize.py @@ -18,7 +18,9 @@ _SUPPORTED_API_VERSIONS = [ '2019-02-02', - '2019-07-07' + '2019-07-07', + '2019-10-10', + '2019-12-12', ] diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/__init__.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/__init__.py new file mode 100644 index 000000000000..5b396cd202e8 --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/__init__.py @@ -0,0 +1,5 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io.py new file mode 100644 index 000000000000..93a5c134849a --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io.py @@ -0,0 +1,464 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import json +import logging +import struct +import sys + +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +STRUCT_FLOAT = struct.Struct('= 0), n + input_bytes = self.reader.read(n) + if n > 0 and not input_bytes: + raise StopIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return self.read_long() + + def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(self.read(4))[0] + + def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(self.read(8))[0] + + def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = self.read_long() + assert (nbytes >= 0), nbytes + return self.read(nbytes) + + def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + def skip_boolean(self): + self.skip(1) + + def skip_int(self): + self.skip_long() + + def skip_long(self): + b = ord(self.read(1)) + while (b & 0x80) != 0: + b = ord(self.read(1)) + + def skip_float(self): + self.skip(4) + + def skip_double(self): + self.skip(8) + + def skip_bytes(self): + self.skip(self.read_long()) + + def skip_utf8(self): + self.skip_bytes() + + def skip(self, n): + self.reader.seek(self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class DatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + def read(self, decoder): + return self.read_data(self.writer_schema, decoder) + + def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = decoder.read_boolean() + elif writer_schema.type == 'string': + result = decoder.read_utf8() + elif writer_schema.type == 'int': + result = decoder.read_int() + elif writer_schema.type == 'long': + result = decoder.read_long() + elif writer_schema.type == 'float': + result = decoder.read_float() + elif writer_schema.type == 'double': + result = decoder.read_double() + elif writer_schema.type == 'bytes': + result = decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = decoder.skip_boolean() + elif writer_schema.type == 'string': + result = decoder.skip_utf8() + elif writer_schema.type == 'int': + result = decoder.skip_int() + elif writer_schema.type == 'long': + result = decoder.skip_long() + elif writer_schema.type == 'float': + result = decoder.skip_float() + elif writer_schema.type == 'double': + result = decoder.skip_double() + elif writer_schema.type == 'bytes': + result = decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = self.skip_enum(decoder) + elif writer_schema.type == 'array': + self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return decoder.read(writer_schema.size) + + @staticmethod + def skip_fixed(writer_schema, decoder): + return decoder.skip(writer_schema.size) + + @staticmethod + def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + def skip_enum(decoder): + return decoder.skip_int() + + def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + read_items.append(self.read_data(writer_schema.items, decoder)) + block_count = decoder.read_long() + return read_items + + def skip_array(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + self.skip_data(writer_schema.items, decoder) + block_count = decoder.read_long() + + def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + decoder.read_long() + for _ in range(block_count): + key = decoder.read_utf8() + read_items[key] = self.read_data(writer_schema.values, decoder) + block_count = decoder.read_long() + return read_items + + def skip_map(self, writer_schema, decoder): + block_count = decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = decoder.read_long() + decoder.skip(block_size) + else: + for _ in range(block_count): + decoder.skip_utf8() + self.skip_data(writer_schema.values, decoder) + block_count = decoder.read_long() + + def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return self.read_data(selected_writer_schema, decoder) + + def skip_union(self, writer_schema, decoder): + index_of_schema = int(decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io_async.py new file mode 100644 index 000000000000..e9812163795f --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/avro_io_async.py @@ -0,0 +1,448 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Input/output utilities. + +Includes: + - i/o-specific constants + - i/o-specific exceptions + - schema validation + - leaf value encoding and decoding + - datum reader/writer stuff (?) + +Also includes a generic representation for data, which uses the +following mapping: + - Schema records are implemented as dict. + - Schema arrays are implemented as list. + - Schema maps are implemented as dict. + - Schema strings are implemented as unicode. + - Schema bytes are implemented as str. + - Schema ints are implemented as int. + - Schema longs are implemented as long. + - Schema floats are implemented as float. + - Schema doubles are implemented as float. + - Schema booleans are implemented as bool. +""" + +import logging +import sys + +from ..avro import schema + +from .avro_io import STRUCT_FLOAT, STRUCT_DOUBLE, SchemaResolutionException + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Decoder + + +class AsyncBinaryDecoder(object): + """Read leaf values.""" + + def __init__(self, reader): + """ + reader is a Python object on which we can call read, seek, and tell. + """ + self._reader = reader + + @property + def reader(self): + """Reports the reader used by this decoder.""" + return self._reader + + async def read(self, n): + """Read n bytes. + + Args: + n: Number of bytes to read. + Returns: + The next n bytes from the input. + """ + assert (n >= 0), n + input_bytes = await self.reader.read(n) + if n > 0 and not input_bytes: + raise StopAsyncIteration + assert (len(input_bytes) == n), input_bytes + return input_bytes + + @staticmethod + def read_null(): + """ + null is written as zero bytes + """ + return None + + async def read_boolean(self): + """ + a boolean is written as a single byte + whose value is either 0 (false) or 1 (true). + """ + b = ord(await self.read(1)) + if b == 1: + return True + if b == 0: + return False + fail_msg = "Invalid value for boolean: %s" % b + raise schema.AvroException(fail_msg) + + async def read_int(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + return await self.read_long() + + async def read_long(self): + """ + int and long values are written using variable-length, zig-zag coding. + """ + b = ord(await self.read(1)) + n = b & 0x7F + shift = 7 + while (b & 0x80) != 0: + b = ord(await self.read(1)) + n |= (b & 0x7F) << shift + shift += 7 + datum = (n >> 1) ^ -(n & 1) + return datum + + async def read_float(self): + """ + A float is written as 4 bytes. + The float is converted into a 32-bit integer using a method equivalent to + Java's floatToIntBits and then encoded in little-endian format. + """ + return STRUCT_FLOAT.unpack(await self.read(4))[0] + + async def read_double(self): + """ + A double is written as 8 bytes. + The double is converted into a 64-bit integer using a method equivalent to + Java's doubleToLongBits and then encoded in little-endian format. + """ + return STRUCT_DOUBLE.unpack(await self.read(8))[0] + + async def read_bytes(self): + """ + Bytes are encoded as a long followed by that many bytes of data. + """ + nbytes = await self.read_long() + assert (nbytes >= 0), nbytes + return await self.read(nbytes) + + async def read_utf8(self): + """ + A string is encoded as a long followed by + that many bytes of UTF-8 encoded character data. + """ + input_bytes = await self.read_bytes() + if PY3: + try: + return input_bytes.decode('utf-8') + except UnicodeDecodeError as exn: + logger.error('Invalid UTF-8 input bytes: %r', input_bytes) + raise exn + else: + # PY2 + return unicode(input_bytes, "utf-8") # pylint: disable=undefined-variable + + def skip_null(self): + pass + + async def skip_boolean(self): + await self.skip(1) + + async def skip_int(self): + await self.skip_long() + + async def skip_long(self): + b = ord(await self.read(1)) + while (b & 0x80) != 0: + b = ord(await self.read(1)) + + async def skip_float(self): + await self.skip(4) + + async def skip_double(self): + await self.skip(8) + + async def skip_bytes(self): + await self.skip(await self.read_long()) + + async def skip_utf8(self): + await self.skip_bytes() + + async def skip(self, n): + await self.reader.seek(await self.reader.tell() + n) + + +# ------------------------------------------------------------------------------ +# DatumReader + + +class AsyncDatumReader(object): + """Deserialize Avro-encoded data into a Python data structure.""" + + def __init__(self, writer_schema=None): + """ + As defined in the Avro specification, we call the schema encoded + in the data the "writer's schema", and the schema expected by the + reader the "reader's schema". + """ + self._writer_schema = writer_schema + + # read/write properties + def set_writer_schema(self, writer_schema): + self._writer_schema = writer_schema + + writer_schema = property(lambda self: self._writer_schema, + set_writer_schema) + + async def read(self, decoder): + return await self.read_data(self.writer_schema, decoder) + + async def read_data(self, writer_schema, decoder): + # function dispatch for reading data based on type of writer's schema + if writer_schema.type == 'null': + result = decoder.read_null() + elif writer_schema.type == 'boolean': + result = await decoder.read_boolean() + elif writer_schema.type == 'string': + result = await decoder.read_utf8() + elif writer_schema.type == 'int': + result = await decoder.read_int() + elif writer_schema.type == 'long': + result = await decoder.read_long() + elif writer_schema.type == 'float': + result = await decoder.read_float() + elif writer_schema.type == 'double': + result = await decoder.read_double() + elif writer_schema.type == 'bytes': + result = await decoder.read_bytes() + elif writer_schema.type == 'fixed': + result = await self.read_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.read_enum(writer_schema, decoder) + elif writer_schema.type == 'array': + result = await self.read_array(writer_schema, decoder) + elif writer_schema.type == 'map': + result = await self.read_map(writer_schema, decoder) + elif writer_schema.type in ['union', 'error_union']: + result = await self.read_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + result = await self.read_record(writer_schema, decoder) + else: + fail_msg = "Cannot read unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + async def skip_data(self, writer_schema, decoder): + if writer_schema.type == 'null': + result = decoder.skip_null() + elif writer_schema.type == 'boolean': + result = await decoder.skip_boolean() + elif writer_schema.type == 'string': + result = await decoder.skip_utf8() + elif writer_schema.type == 'int': + result = await decoder.skip_int() + elif writer_schema.type == 'long': + result = await decoder.skip_long() + elif writer_schema.type == 'float': + result = await decoder.skip_float() + elif writer_schema.type == 'double': + result = await decoder.skip_double() + elif writer_schema.type == 'bytes': + result = await decoder.skip_bytes() + elif writer_schema.type == 'fixed': + result = await self.skip_fixed(writer_schema, decoder) + elif writer_schema.type == 'enum': + result = await self.skip_enum(decoder) + elif writer_schema.type == 'array': + await self.skip_array(writer_schema, decoder) + result = None + elif writer_schema.type == 'map': + await self.skip_map(writer_schema, decoder) + result = None + elif writer_schema.type in ['union', 'error_union']: + result = await self.skip_union(writer_schema, decoder) + elif writer_schema.type in ['record', 'error', 'request']: + await self.skip_record(writer_schema, decoder) + result = None + else: + fail_msg = "Unknown schema type: %s" % writer_schema.type + raise schema.AvroException(fail_msg) + return result + + @staticmethod + async def read_fixed(writer_schema, decoder): + """ + Fixed instances are encoded using the number of bytes declared + in the schema. + """ + return await decoder.read(writer_schema.size) + + @staticmethod + async def skip_fixed(writer_schema, decoder): + return await decoder.skip(writer_schema.size) + + @staticmethod + async def read_enum(writer_schema, decoder): + """ + An enum is encoded by a int, representing the zero-based position + of the symbol in the schema. + """ + # read data + index_of_symbol = await decoder.read_int() + if index_of_symbol >= len(writer_schema.symbols): + fail_msg = "Can't access enum index %d for enum with %d symbols" \ + % (index_of_symbol, len(writer_schema.symbols)) + raise SchemaResolutionException(fail_msg, writer_schema) + read_symbol = writer_schema.symbols[index_of_symbol] + return read_symbol + + @staticmethod + async def skip_enum(decoder): + return await decoder.skip_int() + + async def read_array(self, writer_schema, decoder): + """ + Arrays are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many array items. + A block with count zero indicates the end of the array. + Each item is encoded per the array's item schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = [] + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + read_items.append(await self.read_data(writer_schema.items, decoder)) + block_count = await decoder.read_long() + return read_items + + async def skip_array(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await self.skip_data(writer_schema.items, decoder) + block_count = await decoder.read_long() + + async def read_map(self, writer_schema, decoder): + """ + Maps are encoded as a series of blocks. + + Each block consists of a long count value, + followed by that many key/value pairs. + A block with count zero indicates the end of the map. + Each item is encoded per the map's value schema. + + If a block's count is negative, + then the count is followed immediately by a long block size, + indicating the number of bytes in the block. + The actual count in this case + is the absolute value of the count written. + """ + read_items = {} + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_count = -block_count + await decoder.read_long() + for _ in range(block_count): + key = await decoder.read_utf8() + read_items[key] = await self.read_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + return read_items + + async def skip_map(self, writer_schema, decoder): + block_count = await decoder.read_long() + while block_count != 0: + if block_count < 0: + block_size = await decoder.read_long() + await decoder.skip(block_size) + else: + for _ in range(block_count): + await decoder.skip_utf8() + await self.skip_data(writer_schema.values, decoder) + block_count = await decoder.read_long() + + async def read_union(self, writer_schema, decoder): + """ + A union is encoded by first writing a long value indicating + the zero-based position within the union of the schema of its value. + The value is then encoded per the indicated schema within the union. + """ + # schema resolution + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + selected_writer_schema = writer_schema.schemas[index_of_schema] + + # read data + return await self.read_data(selected_writer_schema, decoder) + + async def skip_union(self, writer_schema, decoder): + index_of_schema = int(await decoder.read_long()) + if index_of_schema >= len(writer_schema.schemas): + fail_msg = "Can't access branch index %d for union with %d branches" \ + % (index_of_schema, len(writer_schema.schemas)) + raise SchemaResolutionException(fail_msg, writer_schema) + return await self.skip_data(writer_schema.schemas[index_of_schema], decoder) + + async def read_record(self, writer_schema, decoder): + """ + A record is encoded by encoding the values of its fields + in the order that they are declared. In other words, a record + is encoded as just the concatenation of the encodings of its fields. + Field values are encoded per their schema. + + Schema Resolution: + * the ordering of fields may be different: fields are matched by name. + * schemas for fields with the same name in both records are resolved + recursively. + * if the writer's record contains a field with a name not present in the + reader's record, the writer's value for that field is ignored. + * if the reader's record schema has a field that contains a default value, + and writer's schema does not have a field with the same name, then the + reader should use the default value from its field. + * if the reader's record schema has a field with no default value, and + writer's schema does not have a field with the same name, then the + field's value is unset. + """ + # schema resolution + read_record = {} + for field in writer_schema.fields: + field_val = await self.read_data(field.type, decoder) + read_record[field.name] = field_val + return read_record + + async def skip_record(self, writer_schema, decoder): + for field in writer_schema.fields: + await self.skip_data(field.type, decoder) + + +# ------------------------------------------------------------------------------ + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile.py new file mode 100644 index 000000000000..85b7dadb9854 --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile.py @@ -0,0 +1,224 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import io +import logging +import sys +import zlib + +from ..avro import avro_io +from ..avro import schema + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Version of the container file: +VERSION = 1 + +if PY3: + MAGIC = b'Obj' + bytes([VERSION]) + MAGIC_SIZE = len(MAGIC) +else: + MAGIC = 'Obj' + chr(VERSION) + MAGIC_SIZE = len(MAGIC) + +# Size of the synchronization marker, in number of bytes: +SYNC_SIZE = 16 + +# Schema of the container header: +META_SCHEMA = schema.parse(""" +{ + "type": "record", "name": "org.apache.avro.file.Header", + "fields": [{ + "name": "magic", + "type": {"type": "fixed", "name": "magic", "size": %(magic_size)d} + }, { + "name": "meta", + "type": {"type": "map", "values": "bytes"} + }, { + "name": "sync", + "type": {"type": "fixed", "name": "sync", "size": %(sync_size)d} + }] +} +""" % { + 'magic_size': MAGIC_SIZE, + 'sync_size': SYNC_SIZE, +}) + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null', 'deflate']) + +# Metadata key associated to the schema: +SCHEMA_KEY = "avro.schema" + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class DataFileException(schema.AvroException): + """Problem reading or writing file object containers.""" + +# ------------------------------------------------------------------------------ + + +class DataFileReader(object): + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io.BinaryDecoder(reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + + # read the header: magic, meta, sync + self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + + def __enter__(self): + return self + + def __exit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __iter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + def _read_header(self): + # seek to the beginning of the file to get magic block + self.reader.seek(0, 0) + + # read header into a dict + header = self.datum_reader.read_data(META_SCHEMA, self.raw_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + def _read_block_header(self): + self._block_count = self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + elif self.codec == 'deflate': + # Compressed data is stored as (length, data), which + # corresponds to how the "bytes" type is encoded. + data = self.raw_decoder.read_bytes() + # -15 is the log of the window size; negative indicates + # "raw" (no zlib headers) decompression. See zlib.h. + uncompressed = zlib.decompress(data, -15) + self._datum_decoder = avro_io.BinaryDecoder(io.BytesIO(uncompressed)) + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopIteration + if proposed_sync_marker != self.sync_marker: + self.reader.seek(-SYNC_SIZE, 1) + + def __next__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + self._skip_sync() + self._read_block_header() + + datum = self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + return datum + + # PY2 + def next(self): + return self.__next__() + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile_async.py new file mode 100644 index 000000000000..75f0539aa1f7 --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/datafile_async.py @@ -0,0 +1,173 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +"""Read/Write Avro File Object Containers.""" + +import logging +import sys + +from ..avro import avro_io_async +from ..avro import schema +from .datafile import DataFileException +from .datafile import MAGIC, SYNC_SIZE, META_SCHEMA, SCHEMA_KEY + + +PY3 = sys.version_info[0] == 3 + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Codecs supported by container files: +VALID_CODECS = frozenset(['null']) + + +class AsyncDataFileReader(object): + """Read files written by DataFileWriter.""" + + def __init__(self, reader, datum_reader): + """Initializes a new data file reader. + + Args: + reader: Open file to read from. + datum_reader: Avro datum reader. + """ + self._reader = reader + self._raw_decoder = avro_io_async.AsyncBinaryDecoder(reader) + self._datum_decoder = None # Maybe reset at every block. + self._datum_reader = datum_reader + self.codec = "null" + self._block_count = 0 + self._meta = None + self._sync_marker = None + + async def init(self): + # read the header: magic, meta, sync + await self._read_header() + + # ensure codec is valid + avro_codec_raw = self.get_meta('avro.codec') + if avro_codec_raw is None: + self.codec = "null" + else: + self.codec = avro_codec_raw.decode('utf-8') + if self.codec not in VALID_CODECS: + raise DataFileException('Unknown codec: %s.' % self.codec) + + # get ready to read + self._block_count = 0 + self.datum_reader.writer_schema = ( + schema.parse(self.get_meta(SCHEMA_KEY).decode('utf-8'))) + return self + + async def __aenter__(self): + return self + + async def __aexit__(self, data_type, value, traceback): + # Perform a close if there's no exception + if data_type is None: + self.close() + + def __aiter__(self): + return self + + # read-only properties + @property + def reader(self): + return self._reader + + @property + def raw_decoder(self): + return self._raw_decoder + + @property + def datum_decoder(self): + return self._datum_decoder + + @property + def datum_reader(self): + return self._datum_reader + + @property + def sync_marker(self): + return self._sync_marker + + @property + def meta(self): + return self._meta + + # read/write properties + @property + def block_count(self): + return self._block_count + + def get_meta(self, key): + """Reports the value of a given metadata key. + + Args: + key: Metadata key (string) to report the value of. + Returns: + Value associated to the metadata key, as bytes. + """ + return self._meta.get(key) + + async def _read_header(self): + # seek to the beginning of the file to get magic block + await self.reader.seek(0, 0) + + # read header into a dict + header = await self.datum_reader.read_data(META_SCHEMA, self.raw_decoder) + + # check magic number + if header.get('magic') != MAGIC: + fail_msg = "Not an Avro data file: %s doesn't match %s." \ + % (header.get('magic'), MAGIC) + raise schema.AvroException(fail_msg) + + # set metadata + self._meta = header['meta'] + + # set sync marker + self._sync_marker = header['sync'] + + async def _read_block_header(self): + self._block_count = await self.raw_decoder.read_long() + if self.codec == "null": + # Skip a long; we don't need to use the length. + await self.raw_decoder.skip_long() + self._datum_decoder = self._raw_decoder + else: + raise DataFileException("Unknown codec: %r" % self.codec) + + async def _skip_sync(self): + """ + Read the length of the sync marker; if it matches the sync marker, + return True. Otherwise, seek back to where we started and return False. + """ + proposed_sync_marker = await self.reader.read(SYNC_SIZE) + if SYNC_SIZE > 0 and not proposed_sync_marker: + raise StopAsyncIteration + if proposed_sync_marker != self.sync_marker: + await self.reader.seek(-SYNC_SIZE, 1) + + async def __anext__(self): + """Return the next datum in the file.""" + if self.block_count == 0: + await self._skip_sync() + await self._read_block_header() + + datum = await self.datum_reader.read(self.datum_decoder) + self._block_count -= 1 + return datum + + def close(self): + """Close this reader.""" + self.reader.close() + + +if __name__ == '__main__': + raise Exception('Not a standalone module') diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/schema.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/schema.py new file mode 100644 index 000000000000..6832ab4fd2d5 --- /dev/null +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/avro/schema.py @@ -0,0 +1,1221 @@ +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +# pylint: disable=too-many-lines + +"""Representation of Avro schemas. + +A schema may be one of: + - A record, mapping field names to field value data; + - An error, equivalent to a record; + - An enum, containing one of a small set of symbols; + - An array of values, all of the same schema; + - A map containing string/value pairs, each of a declared schema; + - A union of other schemas; + - A fixed sized binary object; + - A unicode string; + - A sequence of bytes; + - A 32-bit signed int; + - A 64-bit signed long; + - A 32-bit floating-point float; + - A 64-bit floating-point double; + - A boolean; + - Null. +""" + +import abc +import json +import logging +import re +import sys +from six import with_metaclass + +PY2 = sys.version_info[0] == 2 + +if PY2: + _str = unicode # pylint: disable=undefined-variable +else: + _str = str + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------------------ +# Constants + +# Log level more verbose than DEBUG=10, INFO=20, etc. +DEBUG_VERBOSE = 5 + +NULL = 'null' +BOOLEAN = 'boolean' +STRING = 'string' +BYTES = 'bytes' +INT = 'int' +LONG = 'long' +FLOAT = 'float' +DOUBLE = 'double' +FIXED = 'fixed' +ENUM = 'enum' +RECORD = 'record' +ERROR = 'error' +ARRAY = 'array' +MAP = 'map' +UNION = 'union' + +# Request and error unions are part of Avro protocols: +REQUEST = 'request' +ERROR_UNION = 'error_union' + +PRIMITIVE_TYPES = frozenset([ + NULL, + BOOLEAN, + STRING, + BYTES, + INT, + LONG, + FLOAT, + DOUBLE, +]) + +NAMED_TYPES = frozenset([ + FIXED, + ENUM, + RECORD, + ERROR, +]) + +VALID_TYPES = frozenset.union( + PRIMITIVE_TYPES, + NAMED_TYPES, + [ + ARRAY, + MAP, + UNION, + REQUEST, + ERROR_UNION, + ], +) + +SCHEMA_RESERVED_PROPS = frozenset([ + 'type', + 'name', + 'namespace', + 'fields', # Record + 'items', # Array + 'size', # Fixed + 'symbols', # Enum + 'values', # Map + 'doc', +]) + +FIELD_RESERVED_PROPS = frozenset([ + 'default', + 'name', + 'doc', + 'order', + 'type', +]) + +VALID_FIELD_SORT_ORDERS = frozenset([ + 'ascending', + 'descending', + 'ignore', +]) + + +# ------------------------------------------------------------------------------ +# Exceptions + + +class Error(Exception): + """Base class for errors in this module.""" + + +class AvroException(Error): + """Generic Avro schema error.""" + + +class SchemaParseException(AvroException): + """Error while parsing a JSON schema descriptor.""" + + +class Schema(with_metaclass(abc.ABCMeta, object)): + """Abstract base class for all Schema classes.""" + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object. + + Args: + data_type: Type of the schema to initialize. + other_props: Optional dictionary of additional properties. + """ + if data_type not in VALID_TYPES: + raise SchemaParseException('%r is not a valid Avro type.' % data_type) + + # All properties of this schema, as a map: property name -> property value + self._props = {} + + self._props['type'] = data_type + self._type = data_type + + if other_props: + self._props.update(other_props) + + @property + def namespace(self): + """Returns: the namespace this schema belongs to, if any, or None.""" + return self._props.get('namespace', None) + + @property + def type(self): + """Returns: the type of this schema.""" + return self._type + + @property + def doc(self): + """Returns: the documentation associated to this schema, if any, or None.""" + return self._props.get('doc', None) + + @property + def props(self): + """Reports all the properties of this schema. + + Includes all properties, reserved and non reserved. + JSON properties of this schema are directly generated from this dict. + + Returns: + A dictionary of properties associated to this schema. + """ + return self._props + + @property + def other_props(self): + """Returns: the dictionary of non-reserved properties.""" + return dict(filter_keys_out(items=self._props, keys=SCHEMA_RESERVED_PROPS)) + + def __str__(self): + """Returns: the JSON representation of this schema.""" + return json.dumps(self.to_json(names=None)) + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + + +# ------------------------------------------------------------------------------ + + +_RE_NAME = re.compile(r'[A-Za-z_][A-Za-z0-9_]*') + +_RE_FULL_NAME = re.compile( + r'^' + r'[.]?(?:[A-Za-z_][A-Za-z0-9_]*[.])*' # optional namespace + r'([A-Za-z_][A-Za-z0-9_]*)' # name + r'$' +) + + +class Name(object): + """Representation of an Avro name.""" + + def __init__(self, name, namespace=None): + """Parses an Avro name. + + Args: + name: Avro name to parse (relative or absolute). + namespace: Optional explicit namespace if the name is relative. + """ + # Normalize: namespace is always defined as a string, possibly empty. + if namespace is None: + namespace = '' + + if '.' in name: + # name is absolute, namespace is ignored: + self._fullname = name + + match = _RE_FULL_NAME.match(self._fullname) + if match is None: + raise SchemaParseException( + 'Invalid absolute schema name: %r.' % self._fullname) + + self._name = match.group(1) + self._namespace = self._fullname[:-(len(self._name) + 1)] + + else: + # name is relative, combine with explicit namespace: + self._name = name + self._namespace = namespace + self._fullname = (self._name + if (not self._namespace) else + '%s.%s' % (self._namespace, self._name)) + + # Validate the fullname: + if _RE_FULL_NAME.match(self._fullname) is None: + raise SchemaParseException( + 'Invalid schema name %r infered from name %r and namespace %r.' + % (self._fullname, self._name, self._namespace)) + + def __eq__(self, other): + if not isinstance(other, Name): + return NotImplemented + return self.fullname == other.fullname + + @property + def simple_name(self): + """Returns: the simple name part of this name.""" + return self._name + + @property + def namespace(self): + """Returns: this name's namespace, possible the empty string.""" + return self._namespace + + @property + def fullname(self): + """Returns: the full name.""" + return self._fullname + + +# ------------------------------------------------------------------------------ + + +class Names(object): + """Tracks Avro named schemas and default namespace during parsing.""" + + def __init__(self, default_namespace=None, names=None): + """Initializes a new name tracker. + + Args: + default_namespace: Optional default namespace. + names: Optional initial mapping of known named schemas. + """ + if names is None: + names = {} + self._names = names + self._default_namespace = default_namespace + + @property + def names(self): + """Returns: the mapping of known named schemas.""" + return self._names + + @property + def default_namespace(self): + """Returns: the default namespace, if any, or None.""" + return self._default_namespace + + def new_with_default_namespace(self, namespace): + """Creates a new name tracker from this tracker, but with a new default ns. + + Args: + namespace: New default namespace to use. + Returns: + New name tracker with the specified default namespace. + """ + return Names(names=self._names, default_namespace=namespace) + + def get_name(self, name, namespace=None): + """Resolves the Avro name according to this name tracker's state. + + Args: + name: Name to resolve (absolute or relative). + namespace: Optional explicit namespace. + Returns: + The specified name, resolved according to this tracker. + """ + if namespace is None: + namespace = self._default_namespace + return Name(name=name, namespace=namespace) + + def get_schema(self, name, namespace=None): + """Resolves an Avro schema by name. + + Args: + name: Name (relative or absolute) of the Avro schema to look up. + namespace: Optional explicit namespace. + Returns: + The schema with the specified name, if any, or None. + """ + avro_name = self.get_name(name=name, namespace=namespace) + return self._names.get(avro_name.fullname, None) + + def prune_namespace(self, properties): + """given a properties, return properties with namespace removed if + it matches the own default namespace + """ + if self.default_namespace is None: + # I have no default -- no change + return properties + if 'namespace' not in properties: + # he has no namespace - no change + return properties + if properties['namespace'] != self.default_namespace: + # we're different - leave his stuff alone + return properties + # we each have a namespace and it's redundant. delete his. + prunable = properties.copy() + del prunable['namespace'] + return prunable + + def register(self, schema): + """Registers a new named schema in this tracker. + + Args: + schema: Named Avro schema to register in this tracker. + """ + if schema.fullname in VALID_TYPES: + raise SchemaParseException( + '%s is a reserved type name.' % schema.fullname) + if schema.fullname in self.names: + raise SchemaParseException( + 'Avro name %r already exists.' % schema.fullname) + + logger.log(DEBUG_VERBOSE, 'Register new name for %r', schema.fullname) + self._names[schema.fullname] = schema + + +# ------------------------------------------------------------------------------ + + +class NamedSchema(Schema): + """Abstract base class for named schemas. + + Named schemas are enumerated in NAMED_TYPES. + """ + + def __init__( + self, + data_type, + name, + namespace=None, + names=None, + other_props=None, + ): + """Initializes a new named schema object. + + Args: + data_type: Type of the named schema. + name: Name (absolute or relative) of the schema. + namespace: Optional explicit namespace if name is relative. + names: Tracker to resolve and register Avro names. + other_props: Optional map of additional properties of the schema. + """ + assert (data_type in NAMED_TYPES), ('Invalid named type: %r' % data_type) + self._avro_name = names.get_name(name=name, namespace=namespace) + + super(NamedSchema, self).__init__(data_type, other_props) + + names.register(self) + + self._props['name'] = self.name + if self.namespace: + self._props['namespace'] = self.namespace + + @property + def avro_name(self): + """Returns: the Name object describing this schema's name.""" + return self._avro_name + + @property + def name(self): + return self._avro_name.simple_name + + @property + def namespace(self): + return self._avro_name.namespace + + @property + def fullname(self): + return self._avro_name.fullname + + def name_ref(self, names): + """Reports this schema name relative to the specified name tracker. + + Args: + names: Avro name tracker to relativise this schema name against. + Returns: + This schema name, relativised against the specified name tracker. + """ + if self.namespace == names.default_namespace: + return self.name + return self.fullname + + @abc.abstractmethod + def to_json(self, names): + """Converts the schema object into its AVRO specification representation. + + Schema types that have names (records, enums, and fixed) must + be aware of not re-defining schemas that are already listed + in the parameter names. + """ + raise Exception('Cannot run abstract method.') + +# ------------------------------------------------------------------------------ + + +_NO_DEFAULT = object() + + +class Field(object): + """Representation of the schema of a field in a record.""" + + def __init__( + self, + data_type, + name, + index, + has_default, + default=_NO_DEFAULT, + order=None, + doc=None, + other_props=None + ): + """Initializes a new Field object. + + Args: + data_type: Avro schema of the field. + name: Name of the field. + index: 0-based position of the field. + has_default: + default: + order: + doc: + other_props: + """ + if (not isinstance(name, _str)) or (not name): + raise SchemaParseException('Invalid record field name: %r.' % name) + if (order is not None) and (order not in VALID_FIELD_SORT_ORDERS): + raise SchemaParseException('Invalid record field order: %r.' % order) + + # All properties of this record field: + self._props = {} + + self._has_default = has_default + if other_props: + self._props.update(other_props) + + self._index = index + self._type = self._props['type'] = data_type + self._name = self._props['name'] = name + + if has_default: + self._props['default'] = default + + if order is not None: + self._props['order'] = order + + if doc is not None: + self._props['doc'] = doc + + @property + def type(self): + """Returns: the schema of this field.""" + return self._type + + @property + def name(self): + """Returns: this field name.""" + return self._name + + @property + def index(self): + """Returns: the 0-based index of this field in the record.""" + return self._index + + @property + def default(self): + return self._props['default'] + + @property + def has_default(self): + return self._has_default + + @property + def order(self): + return self._props.get('order', None) + + @property + def doc(self): + return self._props.get('doc', None) + + @property + def props(self): + return self._props + + @property + def other_props(self): + return filter_keys_out(items=self._props, keys=FIELD_RESERVED_PROPS) + + def __str__(self): + return json.dumps(self.to_json()) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['type'] = self.type.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Primitive Types + + +class PrimitiveSchema(Schema): + """Schema of a primitive Avro type. + + Valid primitive types are defined in PRIMITIVE_TYPES. + """ + + def __init__(self, data_type, other_props=None): + """Initializes a new schema object for the specified primitive type. + + Args: + data_type: Type of the schema to construct. Must be primitive. + """ + if data_type not in PRIMITIVE_TYPES: + raise AvroException('%r is not a valid primitive type.' % data_type) + super(PrimitiveSchema, self).__init__(data_type, other_props=other_props) + + @property + def name(self): + """Returns: the simple name of this schema.""" + # The name of a primitive type is the type itself. + return self.type + + @property + def fullname(self): + """Returns: the fully qualified name of this schema.""" + # The full name is the simple name for primitive schema. + return self.name + + def to_json(self, names=None): + if len(self.props) == 1: + return self.fullname + return self.props + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (non-recursive) + + +class FixedSchema(NamedSchema): + def __init__( + self, + name, + namespace, + size, + names=None, + other_props=None, + ): + # Ensure valid ctor args + if not isinstance(size, int): + fail_msg = 'Fixed Schema requires a valid integer for size property.' + raise AvroException(fail_msg) + + super(FixedSchema, self).__init__( + data_type=FIXED, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + self._props['size'] = size + + @property + def size(self): + """Returns: the size of this fixed schema, in bytes.""" + return self._props['size'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ + + +class EnumSchema(NamedSchema): + def __init__( + self, + name, + namespace, + symbols, + names=None, + doc=None, + other_props=None, + ): + """Initializes a new enumeration schema object. + + Args: + name: Simple name of this enumeration. + namespace: Optional namespace. + symbols: Ordered list of symbols defined in this enumeration. + names: + doc: + other_props: + """ + symbols = tuple(symbols) + symbol_set = frozenset(symbols) + if (len(symbol_set) != len(symbols) + or not all(map(lambda symbol: isinstance(symbol, _str), symbols))): + raise AvroException( + 'Invalid symbols for enum schema: %r.' % (symbols,)) + + super(EnumSchema, self).__init__( + data_type=ENUM, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + + self._props['symbols'] = symbols + if doc is not None: + self._props['doc'] = doc + + @property + def symbols(self): + """Returns: the symbols defined in this enum.""" + return self._props['symbols'] + + def to_json(self, names=None): + if names is None: + names = Names() + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + return names.prune_namespace(self.props) + + def __eq__(self, that): + return self.props == that.props + + +# ------------------------------------------------------------------------------ +# Complex Types (recursive) + + +class ArraySchema(Schema): + """Schema of an array.""" + + def __init__(self, items, other_props=None): + """Initializes a new array schema object. + + Args: + items: Avro schema of the array items. + other_props: + """ + super(ArraySchema, self).__init__( + data_type=ARRAY, + other_props=other_props, + ) + self._items_schema = items + self._props['items'] = items + + @property + def items(self): + """Returns: the schema of the items in this array.""" + return self._items_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + item_schema = self.items + to_dump['items'] = item_schema.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class MapSchema(Schema): + """Schema of a map.""" + + def __init__(self, values, other_props=None): + """Initializes a new map schema object. + + Args: + values: Avro schema of the map values. + other_props: + """ + super(MapSchema, self).__init__( + data_type=MAP, + other_props=other_props, + ) + self._values_schema = values + self._props['values'] = values + + @property + def values(self): + """Returns: the schema of the values in this map.""" + return self._values_schema + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = self.props.copy() + to_dump['values'] = self.values.to_json(names) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class UnionSchema(Schema): + """Schema of a union.""" + + def __init__(self, schemas): + """Initializes a new union schema object. + + Args: + schemas: Ordered collection of schema branches in the union. + """ + super(UnionSchema, self).__init__(data_type=UNION) + self._schemas = tuple(schemas) + + # Validate the schema branches: + + # All named schema names are unique: + named_branches = tuple( + filter(lambda schema: schema.type in NAMED_TYPES, self._schemas)) + unique_names = frozenset(map(lambda schema: schema.fullname, named_branches)) + if len(unique_names) != len(named_branches): + raise AvroException( + 'Invalid union branches with duplicate schema name:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + # Types are unique within unnamed schemas, and union is not allowed: + unnamed_branches = tuple( + filter(lambda schema: schema.type not in NAMED_TYPES, self._schemas)) + unique_types = frozenset(map(lambda schema: schema.type, unnamed_branches)) + if UNION in unique_types: + raise AvroException( + 'Invalid union branches contain other unions:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + if len(unique_types) != len(unnamed_branches): + raise AvroException( + 'Invalid union branches with duplicate type:%s' + % ''.join(map(lambda schema: ('\n\t - %s' % schema), self._schemas))) + + @property + def schemas(self): + """Returns: the ordered list of schema branches in the union.""" + return self._schemas + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + to_dump.append(schema.to_json(names)) + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ + + +class ErrorUnionSchema(UnionSchema): + """Schema representing the declared errors of a protocol message.""" + + def __init__(self, schemas): + """Initializes an error-union schema. + + Args: + schema: collection of error schema. + """ + # Prepend "string" to handle system errors + schemas = [PrimitiveSchema(data_type=STRING)] + list(schemas) + super(ErrorUnionSchema, self).__init__(schemas=schemas) + + def to_json(self, names=None): + if names is None: + names = Names() + to_dump = [] + for schema in self.schemas: + # Don't print the system error schema + if schema.type == STRING: + continue + to_dump.append(schema.to_json(names)) + return to_dump + + +# ------------------------------------------------------------------------------ + + +class RecordSchema(NamedSchema): + """Schema of a record.""" + + @staticmethod + def _make_field(index, field_desc, names): + """Builds field schemas from a list of field JSON descriptors. + + Args: + index: 0-based index of the field in the record. + field_desc: JSON descriptors of a record field. + Return: + The field schema. + """ + field_schema = schema_from_json_data( + json_data=field_desc['type'], + names=names, + ) + other_props = ( + dict(filter_keys_out(items=field_desc, keys=FIELD_RESERVED_PROPS))) + return Field( + data_type=field_schema, + name=field_desc['name'], + index=index, + has_default=('default' in field_desc), + default=field_desc.get('default', _NO_DEFAULT), + order=field_desc.get('order', None), + doc=field_desc.get('doc', None), + other_props=other_props, + ) + + @staticmethod + def make_field_list(field_desc_list, names): + """Builds field schemas from a list of field JSON descriptors. + + Guarantees field name unicity. + + Args: + field_desc_list: collection of field JSON descriptors. + names: Avro schema tracker. + Yields + Field schemas. + """ + for index, field_desc in enumerate(field_desc_list): + yield RecordSchema._make_field(index, field_desc, names) + + @staticmethod + def _make_field_map(fields): + """Builds the field map. + + Guarantees field name unicity. + + Args: + fields: iterable of field schema. + Returns: + A map of field schemas, indexed by name. + """ + field_map = {} + for field in fields: + if field.name in field_map: + raise SchemaParseException( + 'Duplicate record field name %r.' % field.name) + field_map[field.name] = field + return field_map + + def __init__( + self, + name, + namespace, + fields=None, + make_fields=None, + names=None, + record_type=RECORD, + doc=None, + other_props=None + ): + """Initializes a new record schema object. + + Args: + name: Name of the record (absolute or relative). + namespace: Optional namespace the record belongs to, if name is relative. + fields: collection of fields to add to this record. + Exactly one of fields or make_fields must be specified. + make_fields: function creating the fields that belong to the record. + The function signature is: make_fields(names) -> ordered field list. + Exactly one of fields or make_fields must be specified. + names: + record_type: Type of the record: one of RECORD, ERROR or REQUEST. + Protocol requests are not named. + doc: + other_props: + """ + if record_type == REQUEST: + # Protocol requests are not named: + super(RecordSchema, self).__init__( + data_type=REQUEST, + other_props=other_props, + ) + elif record_type in [RECORD, ERROR]: + # Register this record name in the tracker: + super(RecordSchema, self).__init__( + data_type=record_type, + name=name, + namespace=namespace, + names=names, + other_props=other_props, + ) + else: + raise SchemaParseException( + 'Invalid record type: %r.' % record_type) + + if record_type in [RECORD, ERROR]: + avro_name = names.get_name(name=name, namespace=namespace) + nested_names = names.new_with_default_namespace(namespace=avro_name.namespace) + elif record_type == REQUEST: + # Protocol request has no name: no need to change default namespace: + nested_names = names + + if fields is None: + fields = make_fields(names=nested_names) + else: + assert make_fields is None + self._fields = tuple(fields) + + self._field_map = RecordSchema._make_field_map(self._fields) + + self._props['fields'] = fields + if doc is not None: + self._props['doc'] = doc + + @property + def fields(self): + """Returns: the field schemas, as an ordered tuple.""" + return self._fields + + @property + def field_map(self): + """Returns: a read-only map of the field schemas index by field names.""" + return self._field_map + + def to_json(self, names=None): + if names is None: + names = Names() + # Request records don't have names + if self.type == REQUEST: + return [f.to_json(names) for f in self.fields] + + if self.fullname in names.names: + return self.name_ref(names) + names.names[self.fullname] = self + + to_dump = names.prune_namespace(self.props.copy()) + to_dump['fields'] = [f.to_json(names) for f in self.fields] + return to_dump + + def __eq__(self, that): + to_cmp = json.loads(_str(self)) + return to_cmp == json.loads(_str(that)) + + +# ------------------------------------------------------------------------------ +# Module functions + + +def filter_keys_out(items, keys): + """Filters a collection of (key, value) items. + + Exclude any item whose key belongs to keys. + + Args: + items: Dictionary of items to filter the keys out of. + keys: Keys to filter out. + Yields: + Filtered items. + """ + for key, value in items.items(): + if key in keys: + continue + yield key, value + + +# ------------------------------------------------------------------------------ + + +def _schema_from_json_string(json_string, names): + if json_string in PRIMITIVE_TYPES: + return PrimitiveSchema(data_type=json_string) + + # Look for a known named schema: + schema = names.get_schema(name=json_string) + if schema is None: + raise SchemaParseException( + 'Unknown named schema %r, known names: %r.' + % (json_string, sorted(names.names))) + return schema + + +def _schema_from_json_array(json_array, names): + def MakeSchema(desc): + return schema_from_json_data(json_data=desc, names=names) + + return UnionSchema(map(MakeSchema, json_array)) + + +def _schema_from_json_object(json_object, names): + data_type = json_object.get('type') + if data_type is None: + raise SchemaParseException( + 'Avro schema JSON descriptor has no "type" property: %r' % json_object) + + other_props = dict( + filter_keys_out(items=json_object, keys=SCHEMA_RESERVED_PROPS)) + + if data_type in PRIMITIVE_TYPES: + # FIXME should not ignore other properties + result = PrimitiveSchema(data_type, other_props=other_props) + + elif data_type in NAMED_TYPES: + name = json_object.get('name') + namespace = json_object.get('namespace', names.default_namespace) + if data_type == FIXED: + size = json_object.get('size') + result = FixedSchema(name, namespace, size, names, other_props) + elif data_type == ENUM: + symbols = json_object.get('symbols') + doc = json_object.get('doc') + result = EnumSchema(name, namespace, symbols, names, doc, other_props) + + elif data_type in [RECORD, ERROR]: + field_desc_list = json_object.get('fields', ()) + + def MakeFields(names): + return tuple(RecordSchema.make_field_list(field_desc_list, names)) + + result = RecordSchema( + name=name, + namespace=namespace, + make_fields=MakeFields, + names=names, + record_type=data_type, + doc=json_object.get('doc'), + other_props=other_props, + ) + else: + raise Exception('Internal error: unknown type %r.' % data_type) + + elif data_type in VALID_TYPES: + # Unnamed, non-primitive Avro type: + + if data_type == ARRAY: + items_desc = json_object.get('items') + if items_desc is None: + raise SchemaParseException( + 'Invalid array schema descriptor with no "items" : %r.' + % json_object) + result = ArraySchema( + items=schema_from_json_data(items_desc, names), + other_props=other_props, + ) + + elif data_type == MAP: + values_desc = json_object.get('values') + if values_desc is None: + raise SchemaParseException( + 'Invalid map schema descriptor with no "values" : %r.' + % json_object) + result = MapSchema( + values=schema_from_json_data(values_desc, names=names), + other_props=other_props, + ) + + elif data_type == ERROR_UNION: + error_desc_list = json_object.get('declared_errors') + assert error_desc_list is not None + error_schemas = map( + lambda desc: schema_from_json_data(desc, names=names), + error_desc_list) + result = ErrorUnionSchema(schemas=error_schemas) + + else: + raise Exception('Internal error: unknown type %r.' % data_type) + else: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r' % json_object) + return result + + +# Parsers for the JSON data types: +_JSONDataParserTypeMap = { + _str: _schema_from_json_string, + list: _schema_from_json_array, + dict: _schema_from_json_object, +} + + +def schema_from_json_data(json_data, names=None): + """Builds an Avro Schema from its JSON descriptor. + + Args: + json_data: JSON data representing the descriptor of the Avro schema. + names: Optional tracker for Avro named schemas. + Returns: + The Avro schema parsed from the JSON descriptor. + Raises: + SchemaParseException: if the descriptor is invalid. + """ + if names is None: + names = Names() + + # Select the appropriate parser based on the JSON data type: + parser = _JSONDataParserTypeMap.get(type(json_data)) + if parser is None: + raise SchemaParseException( + 'Invalid JSON descriptor for an Avro schema: %r.' % json_data) + return parser(json_data, names=names) + + +# ------------------------------------------------------------------------------ + + +def parse(json_string): + """Constructs a Schema from its JSON descriptor in text form. + + Args: + json_string: String representation of the JSON descriptor of the schema. + Returns: + The parsed schema. + Raises: + SchemaParseException: on JSON parsing error, + or if the JSON descriptor is invalid. + """ + try: + json_data = json.loads(json_string) + except Exception as exn: + raise SchemaParseException( + 'Error parsing schema from JSON: %r. ' + 'Error message: %r.' + % (json_string, exn)) + + # Initialize the names object + names = Names() + + # construct the Avro Schema object + return schema_from_json_data(json_data, names) diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client.py index d5aa27fab499..53348f8ecc22 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client.py @@ -246,6 +246,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs) ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, Pipeline(config.transport, policies=policies) def _batch_send( diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client_async.py index 177225191739..d252ad063fb6 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/base_client_async.py @@ -102,6 +102,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs), ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, AsyncPipeline(config.transport, policies=policies) async def _batch_send( diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/constants.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/constants.py index 7fb05b559850..f67ea29cc137 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/constants.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_shared/constants.py @@ -17,9 +17,10 @@ # for python 3.5+, there was a change to the definition of the socket timeout (as far as socket.sendall is concerned) # The socket timeout is now the maximum total duration to send all data. if sys.version_info >= (3, 5): - # the timeout to connect is 20 seconds, and the read timeout is 2000 seconds - # the 2000 seconds was calculated with: 100MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) - READ_TIMEOUT = 2000 + # the timeout to connect is 20 seconds, and the read timeout is 80000 seconds + # the 80000 seconds was calculated with: + # 4000MB (max block size)/ 50KB/s (an arbitrarily chosen minimum upload speed) + READ_TIMEOUT = 80000 STORAGE_OAUTH_SCOPE = "https://storage.azure.com/.default" diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/_upload_helpers.py b/sdk/storage/azure-storage-blob/azure/storage/blob/_upload_helpers.py index 85d40698864f..d3862eb72646 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/_upload_helpers.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/_upload_helpers.py @@ -80,8 +80,8 @@ def upload_block_blob( # pylint: disable=too-many-locals blob_headers = kwargs.pop('blob_headers', None) tier = kwargs.pop('standard_blob_tier', None) - # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size): + # Do single put if the size is smaller than or equal config.max_single_put_size + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): try: data = data.read(length) if not isinstance(data, six.binary_type): diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_client_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_client_async.py index 5861ca03fd97..1115152417bc 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_client_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_client_async.py @@ -26,7 +26,7 @@ upload_block_blob, upload_append_blob, upload_page_blob) -from .._models import BlobType, BlobBlock +from .._models import BlobType, BlobBlock, BlobProperties from .._lease import get_access_conditions from ._lease_async import BlobLeaseClient from ._download_async import StorageStreamDownloader @@ -35,9 +35,6 @@ from datetime import datetime from azure.core.pipeline.policies import HTTPPolicy from .._models import ( # pylint: disable=unused-import - ContainerProperties, - BlobProperties, - BlobSasPermissions, ContentSettings, PremiumPageBlobTier, StandardBlobTier, @@ -74,7 +71,7 @@ class BlobClient(AsyncStorageAccountHostsMixin, BlobClientBase): # pylint: disa The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient @@ -483,13 +480,14 @@ async def get_blob_properties(self, **kwargs): snapshot=self.snapshot, lease_access_conditions=access_conditions, modified_access_conditions=mod_conditions, - cls=deserialize_blob_properties, + cls=kwargs.pop('cls', None) or deserialize_blob_properties, cpk_info=cpk_info, **kwargs) except StorageErrorException as error: process_storage_error(error) blob_props.name = self.blob_name - blob_props.container = self.container_name + if isinstance(blob_props, BlobProperties): + blob_props.container = self.container_name return blob_props # type: ignore @distributed_trace_async diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_service_client_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_service_client_async.py index 77bc5657e7cd..4d45dc336e81 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_service_client_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_blob_service_client_async.py @@ -76,7 +76,7 @@ class BlobServiceClient(AsyncStorageAccountHostsMixin, BlobServiceClientBase): The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient @@ -354,7 +354,7 @@ def list_containers( :dedent: 16 :caption: Listing the containers in the blob service. """ - include = 'metadata' if include_metadata else None + include = ['metadata'] if include_metadata else None timeout = kwargs.pop('timeout', None) results_per_page = kwargs.pop('results_per_page', None) command = functools.partial( diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py index a0133468b2a0..c881e68bd286 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_container_client_async.py @@ -79,7 +79,7 @@ class ContainerClient(AsyncStorageAccountHostsMixin, ContainerClientBase): The hostname of the secondary endpoint. :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks. Defaults to 4*1024*1024, or 4MB. - :keyword int max_single_put_size: If the blob size is less than max_single_put_size, then the blob will be + :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be uploaded with only one http PUT request. If the blob size is larger than max_single_put_size, the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB. :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_models.py b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_models.py index 312802255d70..e519a8ce9783 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_models.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_models.py @@ -16,7 +16,7 @@ from .._generated.models import StorageErrorException from .._generated.models import BlobPrefix as GenBlobPrefix -from .._generated.models import BlobItem +from .._generated.models import BlobItemInternal class ContainerPropertiesPaged(AsyncPageIterator): @@ -154,7 +154,7 @@ async def _extract_data_cb(self, get_next_return): def _build_item(self, item): if isinstance(item, BlobProperties): return item - if isinstance(item, BlobItem): + if isinstance(item, BlobItemInternal): blob = BlobProperties._from_generated(item) # pylint: disable=protected-access blob.container = self.container return blob diff --git a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_upload_helpers.py b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_upload_helpers.py index b936ee6cfc59..7842e03f9d50 100644 --- a/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_upload_helpers.py +++ b/sdk/storage/azure-storage-blob/azure/storage/blob/aio/_upload_helpers.py @@ -56,7 +56,7 @@ async def upload_block_blob( # pylint: disable=too-many-locals tier = kwargs.pop('standard_blob_tier', None) # Do single put if the size is smaller than config.max_single_put_size - if adjusted_count is not None and (adjusted_count < blob_settings.max_single_put_size): + if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size): try: data = data.read(length) if not isinstance(data, six.binary_type): diff --git a/sdk/storage/azure-storage-blob/swagger/README.md b/sdk/storage/azure-storage-blob/swagger/README.md index d0258953233a..3332c55b5690 100644 --- a/sdk/storage/azure-storage-blob/swagger/README.md +++ b/sdk/storage/azure-storage-blob/swagger/README.md @@ -19,7 +19,7 @@ autorest --use=C:/work/autorest.python --version=2.0.4280 ### Settings ``` yaml -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.BlobStorage/preview/2019-07-07/blob.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.BlobStorage/preview/2019-12-12/blob.json output-folder: ../azure/storage/blob/_generated namespace: azure.storage.blob no-namespace-folders: true diff --git a/sdk/storage/azure-storage-blob/tests/avro/__init__.py b/sdk/storage/azure-storage-blob/tests/avro/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/changeFeed.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/changeFeed.avro new file mode 100644 index 000000000000..67679fbbdb19 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/changeFeed.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_0.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_0.avro new file mode 100644 index 000000000000..0f7fe1250eaf Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_0.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_1.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_1.avro new file mode 100644 index 000000000000..d30ad64f85b5 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_1.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_10.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_10.avro new file mode 100644 index 000000000000..73f1bbec8e13 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_10.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_11.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_11.avro new file mode 100644 index 000000000000..ec48cd5280af Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_11.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_12.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_12.avro new file mode 100644 index 000000000000..11abb089d3d3 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_12.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_13.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_13.avro new file mode 100644 index 000000000000..4436b93f1f5b Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_13.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_14.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_14.avro new file mode 100644 index 000000000000..29d1247bf8e2 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_14.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_2.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_2.avro new file mode 100644 index 000000000000..737f6a22740b Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_2.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_3.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_3.avro new file mode 100644 index 000000000000..47b4de9b7d64 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_3.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_4.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_4.avro new file mode 100644 index 000000000000..8559ed0ef66e Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_4.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_5.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_5.avro new file mode 100644 index 000000000000..1143e4b7446c Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_5.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_6.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_6.avro new file mode 100644 index 000000000000..99c6f2686933 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_6.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_7.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_7.avro new file mode 100644 index 000000000000..5a28fa77dc87 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_7.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_8.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_8.avro new file mode 100644 index 000000000000..4a1bbe3bd41e Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_8.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_9.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_9.avro new file mode 100644 index 000000000000..0c7da2b4c705 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_deflate_9.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_0.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_0.avro new file mode 100644 index 000000000000..91c2b2469e54 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_0.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_1.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_1.avro new file mode 100644 index 000000000000..01371934eba3 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_1.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_10.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_10.avro new file mode 100644 index 000000000000..97aaaa0bb91a Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_10.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_11.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_11.avro new file mode 100644 index 000000000000..7dcc7b48f774 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_11.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_12.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_12.avro new file mode 100644 index 000000000000..ddf42625f4f3 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_12.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_13.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_13.avro new file mode 100644 index 000000000000..277376ae1aa5 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_13.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_14.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_14.avro new file mode 100644 index 000000000000..3c34ec843837 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_14.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_2.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_2.avro new file mode 100644 index 000000000000..bf119d9e16f5 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_2.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_3.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_3.avro new file mode 100644 index 000000000000..d542117f7f6e Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_3.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_4.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_4.avro new file mode 100644 index 000000000000..b514fd821841 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_4.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_5.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_5.avro new file mode 100644 index 000000000000..29e8ca4d5f35 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_5.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_6.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_6.avro new file mode 100644 index 000000000000..df22b0f901a3 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_6.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_7.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_7.avro new file mode 100644 index 000000000000..1168f99d0d19 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_7.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_8.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_8.avro new file mode 100644 index 000000000000..b4136af69b60 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_8.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_9.avro b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_9.avro new file mode 100644 index 000000000000..90abc0622404 Binary files /dev/null and b/sdk/storage/azure-storage-blob/tests/avro/samples/test_null_9.avro differ diff --git a/sdk/storage/azure-storage-blob/tests/avro/test_avro.py b/sdk/storage/azure-storage-blob/tests/avro/test_avro.py new file mode 100644 index 000000000000..4ef89f752eb9 --- /dev/null +++ b/sdk/storage/azure-storage-blob/tests/avro/test_avro.py @@ -0,0 +1,134 @@ + +# coding: utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import inspect +import os +import unittest +from io import BytesIO, open +from azure.storage.blob._shared.avro.datafile import DataFileReader +from azure.storage.blob._shared.avro.avro_io import DatumReader + +SCHEMAS_TO_VALIDATE = ( + ('"null"', None), + ('"boolean"', True), + ('"string"', 'adsfasdf09809dsf-=adsf'), + ('"bytes"', b'12345abcd'), + ('"int"', 1234), + ('"long"', 1234), + ('"float"', 1234.0), + ('"double"', 1234.0), + ('{"type": "fixed", "name": "Test", "size": 1}', b'B'), + ('{"type": "enum", "name": "Test", "symbols": ["A", "B"]}', 'B'), + ('{"type": "array", "items": "long"}', [1, 3, 2]), + ('{"type": "map", "values": "long"}', {'a': 1, 'b': 3, 'c': 2}), + ('["string", "null", "long"]', None), + + (""" + { + "type": "record", + "name": "Test", + "fields": [{"name": "f", "type": "long"}] + } + """, + {'f': 5}), + + (""" + { + "type": "record", + "name": "Lisp", + "fields": [{ + "name": "value", + "type": [ + "null", + "string", + { + "type": "record", + "name": "Cons", + "fields": [{"name": "car", "type": "Lisp"}, + {"name": "cdr", "type": "Lisp"}] + } + ] + }] + } + """, + {'value': {'car': {'value': 'head'}, 'cdr': {'value': None}}}), +) + +CODECS_TO_VALIDATE = ('null', 'deflate') + + +class AvroReaderTests(unittest.TestCase): + @classmethod + def setUpClass(cls): + test_file_path = inspect.getfile(cls) + cls._samples_dir_root = os.path.join(os.path.dirname(test_file_path), 'samples') + + def test_reader(self): + correct = 0 + nitems = 10 + for iexample, (writer_schema, datum) in enumerate(SCHEMAS_TO_VALIDATE): + for codec in CODECS_TO_VALIDATE: + file_path = os.path.join(AvroReaderTests._samples_dir_root, 'test_' + codec + '_' + str(iexample) + '.avro') + with open(file_path, 'rb') as reader: + datum_reader = DatumReader() + with DataFileReader(reader, datum_reader) as dfr: + round_trip_data = list(dfr) + if ([datum] * nitems) == round_trip_data: + correct += 1 + self.assertEqual( + correct, + len(CODECS_TO_VALIDATE) * len(SCHEMAS_TO_VALIDATE)) + + def test_reader_with_bytes_io(self): + correct = 0 + nitems = 10 + for iexample, (writer_schema, datum) in enumerate(SCHEMAS_TO_VALIDATE): + for codec in CODECS_TO_VALIDATE: + file_path = os.path.join(AvroReaderTests._samples_dir_root, 'test_' + codec + '_' + str(iexample) + '.avro') + with open(file_path, 'rb') as reader: + data = BytesIO(reader.read()) + datum_reader = DatumReader() + with DataFileReader(data, datum_reader) as dfr: + round_trip_data = list(dfr) + if ([datum] * nitems) == round_trip_data: + correct += 1 + self.assertEqual( + correct, + len(CODECS_TO_VALIDATE) * len(SCHEMAS_TO_VALIDATE)) + + def test_change_feed(self): + file_path = os.path.join(AvroReaderTests._samples_dir_root, 'changeFeed.avro') + with open(file_path, 'rb') as reader: + datum_reader = DatumReader() + with DataFileReader(reader, datum_reader) as dfr: + data = list(dfr) + self.assertEqual(1, len(data)) + expectedRecord = { + 'data': { + 'api': 'PutBlob', + 'blobPropertiesUpdated': None, + 'blobType': 'BlockBlob', + 'clientRequestId': '75b6c460-fcd0-11e9-87e2-85def057dae9', + 'contentLength': 12, + 'contentType': 'text/plain', + 'etag': '0x8D75EF45A3B8617', + 'previousInfo': None, + 'requestId': 'bb219c8e-401e-0028-1fdd-90f393000000', + 'sequencer': '000000000000000000000000000017140000000000000fcc', + 'snapshot': None, + 'storageDiagnostics': {'bid': 'd3053fa1-a006-0042-00dd-902bbb000000', + 'seq': '(5908,134,4044,0)', + 'sid': '5aaf98bf-f1d8-dd76-2dd2-9b60c689538d'}, + 'url': ''}, + 'eventTime': '2019-11-01T17:53:07.5106080Z', + 'eventType': 'BlobCreated', + 'id': 'bb219c8e-401e-0028-1fdd-90f393069ae4', + 'schemaVersion': 3, + 'subject': '/blobServices/default/containers/test/blobs/sdf.txt', + 'topic': '/subscriptions/ba45b233-e2ef-4169-8808-49eb0d8eba0d/resourceGroups/XClient/providers/Microsoft.Storage/storageAccounts/seanchangefeedstage'} + self.assertEqual(expectedRecord, data[0]) \ No newline at end of file diff --git a/sdk/storage/azure-storage-blob/tests/avro/test_avro_async.py b/sdk/storage/azure-storage-blob/tests/avro/test_avro_async.py new file mode 100644 index 000000000000..eeba0850bcaf --- /dev/null +++ b/sdk/storage/azure-storage-blob/tests/avro/test_avro_async.py @@ -0,0 +1,95 @@ + +# coding: utf-8 +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import inspect +import os +import pytest +import unittest +from azure.storage.blob._shared.avro.avro_io_async import AsyncDatumReader +from azure.storage.blob._shared.avro.datafile_async import AsyncDataFileReader + +from .test_avro import SCHEMAS_TO_VALIDATE + +CODECS_TO_VALIDATE = ['null'] + + +class AsyncBufferedReaderWrapper: + def __init__(self, reader): + self._reader = reader + + async def seek(self, offset, whence=0): + self._reader.seek(offset, whence) + + async def read(self, size=None): + return self._reader.read(size) + + def close(self): + self._reader.close() + + +class AvroReaderTestsAsync(unittest.TestCase): + @classmethod + def setUpClass(cls): + test_file_path = inspect.getfile(cls) + cls._samples_dir_root = os.path.join(os.path.dirname(test_file_path), 'samples') + + @pytest.mark.asyncio + async def test_reader(self): + correct = 0 + nitems = 10 + for iexample, (writer_schema, datum) in enumerate(SCHEMAS_TO_VALIDATE): + for codec in CODECS_TO_VALIDATE: + file_path = os.path.join(AvroReaderTestsAsync._samples_dir_root, 'test_' + codec + '_' + str(iexample) + '.avro') + with open(file_path, 'rb') as reader: + datum_reader = AsyncDatumReader() + async_reader = AsyncBufferedReaderWrapper(reader) + async with await AsyncDataFileReader(async_reader, datum_reader).init() as dfr: + round_trip_data = [] + async for x in dfr: + round_trip_data.append(x) + if ([datum] * nitems) == round_trip_data: + correct += 1 + self.assertEqual( + correct, + len(CODECS_TO_VALIDATE) * len(SCHEMAS_TO_VALIDATE)) + + @pytest.mark.asyncio + async def test_change_feed(self): + file_path = os.path.join(AvroReaderTestsAsync._samples_dir_root, 'changeFeed.avro') + with open(file_path, 'rb') as reader: + datum_reader = AsyncDatumReader() + async_reader = AsyncBufferedReaderWrapper(reader) + async with await AsyncDataFileReader(async_reader, datum_reader).init() as dfr: + data = [] + async for x in dfr: + data.append(x) + self.assertEqual(1, len(data)) + expectedRecord = { + 'data': { + 'api': 'PutBlob', + 'blobPropertiesUpdated': None, + 'blobType': 'BlockBlob', + 'clientRequestId': '75b6c460-fcd0-11e9-87e2-85def057dae9', + 'contentLength': 12, + 'contentType': 'text/plain', + 'etag': '0x8D75EF45A3B8617', + 'previousInfo': None, + 'requestId': 'bb219c8e-401e-0028-1fdd-90f393000000', + 'sequencer': '000000000000000000000000000017140000000000000fcc', + 'snapshot': None, + 'storageDiagnostics': {'bid': 'd3053fa1-a006-0042-00dd-902bbb000000', + 'seq': '(5908,134,4044,0)', + 'sid': '5aaf98bf-f1d8-dd76-2dd2-9b60c689538d'}, + 'url': ''}, + 'eventTime': '2019-11-01T17:53:07.5106080Z', + 'eventType': 'BlobCreated', + 'id': 'bb219c8e-401e-0028-1fdd-90f393069ae4', + 'schemaVersion': 3, + 'subject': '/blobServices/default/containers/test/blobs/sdf.txt', + 'topic': '/subscriptions/ba45b233-e2ef-4169-8808-49eb0d8eba0d/resourceGroups/XClient/providers/Microsoft.Storage/storageAccounts/seanchangefeedstage'} + self.assertEqual(expectedRecord, data[0]) \ No newline at end of file diff --git a/sdk/storage/azure-storage-blob/tests/test_blob_api_version.py b/sdk/storage/azure-storage-blob/tests/test_blob_api_version.py index 5ed02a49eb24..ae0dc65e133d 100644 --- a/sdk/storage/azure-storage-blob/tests/test_blob_api_version.py +++ b/sdk/storage/azure-storage-blob/tests/test_blob_api_version.py @@ -17,6 +17,7 @@ BlobClient, BlobSasPermissions ) +from azure.storage.blob._generated.version import VERSION from devtools_testutils import ResourceGroupPreparer, StorageAccountPreparer from _shared.testcase import StorageTestCase, GlobalStorageAccountPreparer @@ -28,7 +29,7 @@ class StorageClientTest(StorageTestCase): def setUp(self): super(StorageClientTest, self).setUp() self.api_version_1 = "2019-02-02" - self.api_version_2 = "2019-07-07" + self.api_version_2 = VERSION self.container_name = self.get_resource_name('utcontainer') # --Helpers----------------------------------------------------------------- diff --git a/sdk/storage/azure-storage-blob/tests/test_blob_api_version_async.py b/sdk/storage/azure-storage-blob/tests/test_blob_api_version_async.py index b3583db30bef..565819cc144b 100644 --- a/sdk/storage/azure-storage-blob/tests/test_blob_api_version_async.py +++ b/sdk/storage/azure-storage-blob/tests/test_blob_api_version_async.py @@ -15,6 +15,7 @@ ContainerClient, BlobClient, ) +from azure.storage.blob._generated.version import VERSION from _shared.testcase import GlobalStorageAccountPreparer from _shared.asynctestcase import AsyncStorageTestCase @@ -26,7 +27,7 @@ class StorageClientTest(AsyncStorageTestCase): def setUp(self): super(StorageClientTest, self).setUp() self.api_version_1 = "2019-02-02" - self.api_version_2 = "2019-07-07" + self.api_version_2 = VERSION self.container_name = self.get_resource_name('utcontainer') # --Helpers----------------------------------------------------------------- diff --git a/sdk/storage/azure-storage-blob/tests/test_largest_block_blob.py b/sdk/storage/azure-storage-blob/tests/test_largest_block_blob.py new file mode 100644 index 000000000000..c409f29c4a7d --- /dev/null +++ b/sdk/storage/azure-storage-blob/tests/test_largest_block_blob.py @@ -0,0 +1,348 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- + +import pytest + +from os import path, remove, sys, urandom +import platform +import uuid + +from azure.core.pipeline.policies import HTTPPolicy +from azure.storage.blob import ( + BlobServiceClient, + BlobBlock +) +from azure.storage.blob._shared.base_client import format_shared_key_credential + +from _shared.testcase import StorageTestCase, GlobalStorageAccountPreparer + +# ------------------------------------------------------------------------------ +TEST_BLOB_PREFIX = 'largestblob' +LARGEST_BLOCK_SIZE = 4000 * 1024 * 1024 +LARGEST_SINGLE_UPLOAD_SIZE = 5000 * 1024 * 1024 + +# ------------------------------------------------------------------------------ +if platform.python_implementation() == 'PyPy': + pytest.skip("Skip tests for Pypy", allow_module_level=True) + +class StorageLargestBlockBlobTest(StorageTestCase): + def _setup(self, storage_account, key, additional_policies=None, min_large_block_upload_threshold=1 * 1024 * 1024, + max_single_put_size=32 * 1024): + self.bsc = BlobServiceClient( + self.account_url(storage_account, "blob"), + credential=key, + max_single_put_size=max_single_put_size, + max_block_size=LARGEST_BLOCK_SIZE, + min_large_block_upload_threshold=min_large_block_upload_threshold, + _additional_pipeline_policies=additional_policies) + self.config = self.bsc._config + self.container_name = self.get_resource_name('utcontainer') + self.container_name = self.container_name + str(uuid.uuid4()) + + if self.is_live: + self.bsc.create_container(self.container_name) + + def _teardown(self, file_name): + if path.isfile(file_name): + try: + remove(file_name) + except: + pass + + # --Helpers----------------------------------------------------------------- + def _get_blob_reference(self): + return self.get_resource_name(TEST_BLOB_PREFIX) + + def _create_blob(self): + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + blob.upload_blob(b'') + return blob + + # --Test cases for block blobs -------------------------------------------- + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + def test_put_block_bytes_largest(self, resource_group, location, storage_account, storage_account_key): + self._setup(storage_account, storage_account_key) + blob = self._create_blob() + + # Act + data = urandom(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()).encode('utf-8') + resp = blob.stage_block( + blockId, + data, + length=LARGEST_BLOCK_SIZE) + blob.commit_block_list([BlobBlock(blockId)]) + block_list = blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + def test_put_block_bytes_largest_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob = self._create_blob() + + # Act + data = urandom(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()).encode('utf-8') + resp = blob.stage_block( + blockId, + data, + length=LARGEST_BLOCK_SIZE) + blob.commit_block_list([BlobBlock(blockId)]) + block_list = blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + def test_put_block_stream_largest(self, resource_group, location, storage_account, storage_account_key): + self._setup(storage_account, storage_account_key) + blob = self._create_blob() + + # Act + stream = LargeStream(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()) + requestId = str(uuid.uuid4()) + resp = blob.stage_block( + blockId, + stream, + length=LARGEST_BLOCK_SIZE, + client_request_id=requestId) + blob.commit_block_list([BlobBlock(blockId)]) + block_list = blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + def test_put_block_stream_largest_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob = self._create_blob() + + # Act + stream = LargeStream(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()) + requestId = str(uuid.uuid4()) + resp = blob.stage_block( + blockId, + stream, + length=LARGEST_BLOCK_SIZE, + client_request_id=requestId) + blob.commit_block_list([BlobBlock(blockId)]) + block_list = blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + def test_create_largest_blob_from_path(self, resource_group, location, storage_account, storage_account_key): + self._setup(storage_account, storage_account_key) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4())) + with open(FILE_PATH, 'wb') as stream: + largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024) + chunk = largeStream.read() + while chunk: + stream.write(chunk) + chunk = largeStream.read() + + # Act + with open(FILE_PATH, 'rb') as stream: + blob.upload_blob(stream, max_concurrency=2) + + # Assert + self._teardown(FILE_PATH) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + def test_create_largest_blob_from_path_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4())) + with open(FILE_PATH, 'wb') as stream: + largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024) + chunk = largeStream.read() + while chunk: + stream.write(chunk) + chunk = largeStream.read() + + # Act + with open(FILE_PATH, 'rb') as stream: + blob.upload_blob(stream, max_concurrency=2) + + # Assert + self._teardown(FILE_PATH) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.skip(reason="This takes really long time") + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + def test_create_largest_blob_from_stream_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + + number_of_blocks = 50000 + + stream = LargeStream(LARGEST_BLOCK_SIZE*number_of_blocks) + + # Act + blob.upload_blob(stream, max_concurrency=1) + + # Assert + self.assertEqual(payload_dropping_policy.put_block_counter, number_of_blocks) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + def test_create_largest_blob_from_stream_single_upload_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy], + max_single_put_size=LARGEST_SINGLE_UPLOAD_SIZE) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + + stream = LargeStream(LARGEST_SINGLE_UPLOAD_SIZE) + + # Act + blob.upload_blob(stream, length=LARGEST_SINGLE_UPLOAD_SIZE, max_concurrency=1) + + # Assert + self.assertEqual(payload_dropping_policy.put_block_counter, 0) + self.assertEqual(payload_dropping_policy.put_blob_counter, 1) + + +class LargeStream: + def __init__(self, length, initial_buffer_length=1024*1024): + self._base_data = urandom(initial_buffer_length) + self._base_data_length = initial_buffer_length + self._position = 0 + self._remaining = length + + def read(self, size=None): + if self._remaining == 0: + return b"" + + if size is None: + e = self._base_data_length + else: + e = size + e = min(e, self._remaining) + if e > self._base_data_length: + self._base_data = urandom(e) + self._base_data_length = e + self._remaining = self._remaining - e + return self._base_data[:e] + + def remaining(self): + return self._remaining + + +class PayloadDroppingPolicy(HTTPPolicy): + def __init__(self): + super().__init__() + self.put_block_counter = 0 + self.put_block_sizes = [] + self.put_blob_counter = 0 + self.put_blob_sizes = [] + + def send(self, request): # type: (PipelineRequest) -> PipelineResponse + if _is_put_block_request(request): + if request.http_request.body: + self.put_block_counter = self.put_block_counter + 1 + self.put_block_sizes.append(_get_body_length(request)) + replacement = "dummy_body" + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + elif _is_put_blob_request(request): + if request.http_request.body: + self.put_blob_counter = self.put_blob_counter + 1 + self.put_blob_sizes.append(_get_body_length(request)) + replacement = "dummy_body" + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + return self.next.send(request) + + +def _is_put_block_request(request): + query = request.http_request.query + return query and "comp" in query and query["comp"] == "block" + +def _is_put_blob_request(request): + query = request.http_request.query + return request.http_request.method == "PUT" and not query + +def _get_body_length(request): + body = request.http_request.body + length = 0 + if hasattr(body, "read"): + chunk = body.read(10*1024*1024) + while chunk: + length = length + len(chunk) + chunk = body.read(10 * 1024 * 1024) + else: + length = len(body) + return length + +# ------------------------------------------------------------------------------ diff --git a/sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py b/sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py new file mode 100644 index 000000000000..6ba7b698c863 --- /dev/null +++ b/sdk/storage/azure-storage-blob/tests/test_largest_block_blob_async.py @@ -0,0 +1,384 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +from io import BytesIO + +import pytest + +from os import path, remove, urandom +import platform +import uuid + +from azure.core.pipeline.policies import SansIOHTTPPolicy +from azure.core.pipeline.transport import AioHttpTransport +from multidict import CIMultiDict, CIMultiDictProxy + +from azure.storage.blob.aio import ( + BlobServiceClient +) +from azure.storage.blob import ( + BlobBlock +) +from azure.storage.blob._shared.base_client import format_shared_key_credential +from azure.storage.blob._shared.constants import CONNECTION_TIMEOUT, READ_TIMEOUT + +from _shared.asynctestcase import AsyncStorageTestCase +from _shared.testcase import GlobalStorageAccountPreparer + +# ------------------------------------------------------------------------------ +TEST_BLOB_PREFIX = 'largestblob' +LARGEST_BLOCK_SIZE = 4000 * 1024 * 1024 +LARGEST_SINGLE_UPLOAD_SIZE = 5000 * 1024 * 1024 + +# ------------------------------------------------------------------------------ +if platform.python_implementation() == 'PyPy': + pytest.skip("Skip tests for Pypy", allow_module_level=True) + + +class AiohttpTestTransport(AioHttpTransport): + """Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461 + """ + async def send(self, request, **config): + response = await super(AiohttpTestTransport, self).send(request, **config) + if not isinstance(response.headers, CIMultiDictProxy): + response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers)) + response.content_type = response.headers.get("content-type") + return response + + +class StorageLargestBlockBlobTestAsync(AsyncStorageTestCase): + async def _setup(self, storage_account, key, additional_policies=None, min_large_block_upload_threshold=1 * 1024 * 1024, + max_single_put_size=32 * 1024): + self.bsc = BlobServiceClient( + self.account_url(storage_account, "blob"), + credential=key, + max_single_put_size=max_single_put_size, + max_block_size=LARGEST_BLOCK_SIZE, + min_large_block_upload_threshold=min_large_block_upload_threshold, + _additional_pipeline_policies=additional_policies, + transport=AiohttpTestTransport( + connection_timeout=CONNECTION_TIMEOUT, + read_timeout=READ_TIMEOUT + )) + self.config = self.bsc._config + self.container_name = self.get_resource_name('utcontainer') + self.container_name = self.container_name + str(uuid.uuid4()) + + if self.is_live: + await self.bsc.create_container(self.container_name) + + def _teardown(self, file_name): + if path.isfile(file_name): + try: + remove(file_name) + except: + pass + + # --Helpers----------------------------------------------------------------- + def _get_blob_reference(self): + return self.get_resource_name(TEST_BLOB_PREFIX) + + async def _create_blob(self): + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + await blob.upload_blob(b'') + return blob + + # --Test cases for block blobs -------------------------------------------- + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_put_block_bytes_largest(self, resource_group, location, storage_account, storage_account_key): + await self._setup(storage_account, storage_account_key) + blob = await self._create_blob() + + # Act + data = urandom(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()).encode('utf-8') + resp = await blob.stage_block( + blockId, + data, + length=LARGEST_BLOCK_SIZE) + await blob.commit_block_list([BlobBlock(blockId)]) + block_list = await blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_put_block_bytes_largest_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + await self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob = await self._create_blob() + + # Act + data = urandom(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()).encode('utf-8') + resp = await blob.stage_block( + blockId, + data, + length=LARGEST_BLOCK_SIZE) + await blob.commit_block_list([BlobBlock(blockId)]) + block_list = await blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_put_block_stream_largest(self, resource_group, location, storage_account, storage_account_key): + await self._setup(storage_account, storage_account_key) + blob = await self._create_blob() + + # Act + stream = LargeStream(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()) + requestId = str(uuid.uuid4()) + resp = await blob.stage_block( + blockId, + stream, + length=LARGEST_BLOCK_SIZE, + client_request_id=requestId) + await blob.commit_block_list([BlobBlock(blockId)]) + block_list = await blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(block_list[0][0].size, LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_put_block_stream_largest_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + await self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob = await self._create_blob() + + # Act + stream = LargeStream(LARGEST_BLOCK_SIZE) + blockId = str(uuid.uuid4()) + requestId = str(uuid.uuid4()) + resp = await blob.stage_block( + blockId, + stream, + length=LARGEST_BLOCK_SIZE, + client_request_id=requestId) + await blob.commit_block_list([BlobBlock(blockId)]) + block_list = await blob.get_block_list() + + # Assert + self.assertIsNotNone(resp) + assert 'content_md5' in resp + assert 'content_crc64' in resp + assert 'request_id' in resp + self.assertIsNotNone(block_list) + self.assertEqual(len(block_list), 2) + self.assertEqual(len(block_list[1]), 0) + self.assertEqual(len(block_list[0]), 1) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @pytest.mark.skip(reason="This takes really long time") + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_create_largest_blob_from_path(self, resource_group, location, storage_account, storage_account_key): + await self._setup(storage_account, storage_account_key) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4())) + with open(FILE_PATH, 'wb') as stream: + largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024) + chunk = largeStream.read() + while chunk: + stream.write(chunk) + chunk = largeStream.read() + + # Act + with open(FILE_PATH, 'rb') as stream: + await blob.upload_blob(stream, max_concurrency=2) + + # Assert + self._teardown(FILE_PATH) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_create_largest_blob_from_path_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + await self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + FILE_PATH = 'largest_blob_from_path.temp.{}.dat'.format(str(uuid.uuid4())) + with open(FILE_PATH, 'wb') as stream: + largeStream = LargeStream(LARGEST_BLOCK_SIZE, 100 * 1024 * 1024) + chunk = largeStream.read() + while chunk: + stream.write(chunk) + chunk = largeStream.read() + + # Act + with open(FILE_PATH, 'rb') as stream: + await blob.upload_blob(stream, max_concurrency=2) + + # Assert + self._teardown(FILE_PATH) + self.assertEqual(payload_dropping_policy.put_block_counter, 1) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.skip(reason="This takes really long time") + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_create_largest_blob_from_stream_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + await self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy]) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + + number_of_blocks = 50000 + + stream = LargeStream(LARGEST_BLOCK_SIZE*number_of_blocks) + + # Act + await blob.upload_blob(stream, max_concurrency=1) + + # Assert + self.assertEqual(payload_dropping_policy.put_block_counter, number_of_blocks) + self.assertEqual(payload_dropping_policy.put_block_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + @GlobalStorageAccountPreparer() + @AsyncStorageTestCase.await_prepared_test + async def test_create_largest_blob_from_stream_single_upload_without_network(self, resource_group, location, storage_account, storage_account_key): + payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([storage_account.name, "dummy"], storage_account_key) + await self._setup(storage_account, storage_account_key, [payload_dropping_policy, credential_policy], + max_single_put_size=LARGEST_SINGLE_UPLOAD_SIZE) + blob_name = self._get_blob_reference() + blob = self.bsc.get_blob_client(self.container_name, blob_name) + + stream = LargeStream(LARGEST_SINGLE_UPLOAD_SIZE) + + # Act + await blob.upload_blob(stream, length=LARGEST_SINGLE_UPLOAD_SIZE, max_concurrency=1) + + # Assert + self.assertEqual(payload_dropping_policy.put_block_counter, 0) + self.assertEqual(payload_dropping_policy.put_blob_counter, 1) + + +class LargeStream(BytesIO): + def __init__(self, length, initial_buffer_length=1024 * 1024): + super().__init__() + self._base_data = urandom(initial_buffer_length) + self._base_data_length = initial_buffer_length + self._position = 0 + self._remaining = length + self._closed = False + + def read(self, size=None): + if self._remaining == 0: + return b"" + + if size is None: + e = self._base_data_length + else: + e = size + e = min(e, self._remaining) + if e > self._base_data_length: + self._base_data = urandom(e) + self._base_data_length = e + self._remaining = self._remaining - e + return self._base_data[:e] + + def remaining(self): + return self._remaining + + def close(self): + self._closed = True + + +class PayloadDroppingPolicy(SansIOHTTPPolicy): + def __init__(self): + super().__init__() + self.put_block_counter = 0 + self.put_block_sizes = [] + self.put_blob_counter = 0 + self.put_blob_sizes = [] + + def on_request(self, request): # type: (PipelineRequest) -> Union[None, Awaitable[None]] + if _is_put_block_request(request): + if request.http_request.body: + self.put_block_counter = self.put_block_counter + 1 + self.put_block_sizes.append(_get_body_length(request)) + replacement = "dummy_body" + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + elif _is_put_blob_request(request): + if request.http_request.body: + self.put_blob_counter = self.put_blob_counter + 1 + self.put_blob_sizes.append(_get_body_length(request)) + replacement = "dummy_body" + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + + +def _is_put_block_request(request): + query = request.http_request.query + return query and "comp" in query and query["comp"] == "block" + +def _is_put_blob_request(request): + query = request.http_request.query + return request.http_request.method == "PUT" and not query + +def _get_body_length(request): + body = request.http_request.body + length = 0 + if hasattr(body, "read"): + chunk = body.read(10*1024*1024) + while chunk: + length = length + len(chunk) + chunk = body.read(10 * 1024 * 1024) + else: + length = len(body) + return length + +# ------------------------------------------------------------------------------ diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_directory_client.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_directory_client.py index 90c525bedf99..184579cee26b 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_directory_client.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_directory_client.py @@ -230,8 +230,7 @@ def get_directory_properties(self, **kwargs): :dedent: 4 :caption: Getting the properties for a file/directory. """ - blob_properties = self._get_path_properties(**kwargs) - return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + return self._get_path_properties(cls=DirectoryProperties._deserialize_dir_properties, **kwargs) # pylint: disable=protected-access def rename_directory(self, new_name, # type: str **kwargs): diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py index e5973e85ff4a..488dffb0f023 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_data_lake_file_client.py @@ -239,8 +239,22 @@ def get_file_properties(self, **kwargs): :dedent: 4 :caption: Getting the properties for a file. """ - blob_properties = self._get_path_properties(**kwargs) - return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + return self._get_path_properties(cls=FileProperties._deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + def set_file_expiry(self, expiry_options, expires_on=None, **kwargs): + # type: (**Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime expires_on: + The time to set the file to expiry + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + return self._blob_client._client.blob.set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access def _upload_options( # pylint:disable=too-many-statements self, data, # type: Union[Iterable[AnyStr], IO[AnyStr]] @@ -337,6 +351,9 @@ def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] The match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. :return: response dict (Etag and last modified). """ options = self._upload_options( diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_configuration.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_configuration.py index 85feac4b8650..5fc3466c6b32 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_configuration.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_configuration.py @@ -52,7 +52,7 @@ def __init__(self, url, file_system, path1, **kwargs): self.file_system = file_system self.path1 = path1 self.resource = "filesystem" - self.version = "2019-02-02" + self.version = "2019-12-12" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_data_lake_storage_client.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_data_lake_storage_client.py index 54b8a0541654..dcc65ad95730 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_data_lake_storage_client.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/_data_lake_storage_client.py @@ -25,11 +25,11 @@ class DataLakeStorageClient(object): :ivar service: Service operations - :vartype service: azure.storage.file.datalake.operations.ServiceOperations + :vartype service: azure.storage.filedatalake.operations.ServiceOperations :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.file.datalake.operations.FileSystemOperations + :vartype file_system: azure.storage.filedatalake.operations.FileSystemOperations :ivar path: Path operations - :vartype path: azure.storage.file.datalake.operations.PathOperations + :vartype path: azure.storage.filedatalake.operations.PathOperations :param url: The URL of the service account, container, or blob that is the targe of the desired operation. @@ -47,7 +47,7 @@ def __init__(self, url, file_system, path1, **kwargs): self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-11-09' + self.api_version = '2019-12-12' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) @@ -58,6 +58,8 @@ def __init__(self, url, file_system, path1, **kwargs): self.path = PathOperations( self._client, self._config, self._serialize, self._deserialize) + def close(self): + self._client.close() def __enter__(self): self._client.__enter__() return self diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_configuration_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_configuration_async.py index 713b7ae41cb0..5aaa28bacb43 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_configuration_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_configuration_async.py @@ -39,8 +39,6 @@ def __init__(self, url, file_system, path1, **kwargs): if url is None: raise ValueError("Parameter 'url' must not be None.") - # if file_system is None: - # raise ValueError("Parameter 'file_system' must not be None.") super(DataLakeStorageClientConfiguration, self).__init__(**kwargs) self._configure(**kwargs) @@ -53,7 +51,7 @@ def __init__(self, url, file_system, path1, **kwargs): self.file_system = file_system self.path1 = path1 self.resource = "filesystem" - self.version = "2019-02-02" + self.version = "2019-12-12" def _configure(self, **kwargs): self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs) diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_data_lake_storage_client_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_data_lake_storage_client_async.py index 3f41f1bd7566..929fece9b8e9 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_data_lake_storage_client_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/_data_lake_storage_client_async.py @@ -25,11 +25,11 @@ class DataLakeStorageClient(object): :ivar service: Service operations - :vartype service: azure.storage.file.datalake.aio.operations_async.ServiceOperations + :vartype service: azure.storage.filedatalake.aio.operations_async.ServiceOperations :ivar file_system: FileSystem operations - :vartype file_system: azure.storage.file.datalake.aio.operations_async.FileSystemOperations + :vartype file_system: azure.storage.filedatalake.aio.operations_async.FileSystemOperations :ivar path: Path operations - :vartype path: azure.storage.file.datalake.aio.operations_async.PathOperations + :vartype path: azure.storage.filedatalake.aio.operations_async.PathOperations :param url: The URL of the service account, container, or blob that is the targe of the desired operation. @@ -48,7 +48,7 @@ def __init__( self._client = AsyncPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '2018-11-09' + self.api_version = '2019-12-12' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) @@ -59,14 +59,10 @@ def __init__( self.path = PathOperations( self._client, self._config, self._serialize, self._deserialize) + async def close(self): + await self._client.close() async def __aenter__(self): await self._client.__aenter__() return self async def __aexit__(self, *exc_details): await self._client.__aexit__(*exc_details) - - async def close(self): - """ This method is to close the sockets opened by the client. - It need not be used when using with a context manager. - """ - await self._client.close() diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_file_system_operations_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_file_system_operations_async.py index 032a4a4b308c..f1af068beb07 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_file_system_operations_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_file_system_operations_async.py @@ -67,7 +67,7 @@ async def create(self, properties=None, request_id=None, timeout=None, *, cls=No :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL @@ -144,13 +144,13 @@ async def set_properties(self, properties=None, request_id=None, timeout=None, m :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) if_modified_since = None @@ -226,7 +226,7 @@ async def get_properties(self, request_id=None, timeout=None, *, cls=None, **kwa :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL @@ -298,13 +298,13 @@ async def delete(self, request_id=None, timeout=None, modified_access_conditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) if_modified_since = None @@ -398,9 +398,9 @@ async def list_paths(self, recursive, continuation=None, path=None, max_results= :param callable cls: A custom type or function that will be passed the direct response :return: PathList or the result of cls(response) - :rtype: ~azure.storage.file.datalake.models.PathList + :rtype: ~azure.storage.filedatalake.models.PathList :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_path_operations_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_path_operations_async.py index 04ceffa81d7f..0e8a10986fdd 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_path_operations_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_path_operations_async.py @@ -51,7 +51,7 @@ async def create(self, resource=None, continuation=None, mode=None, rename_sourc The value must be "file" or "directory". Possible values include: 'directory', 'file' :type resource: str or - ~azure.storage.file.datalake.models.PathResourceType + ~azure.storage.filedatalake.models.PathResourceType :param continuation: Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token @@ -63,7 +63,7 @@ async def create(self, resource=None, continuation=None, mode=None, rename_sourc parameter determines the behavior of the rename operation. The value must be "legacy" or "posix", and the default value will be "posix". Possible values include: 'legacy', 'posix' - :type mode: str or ~azure.storage.file.datalake.models.PathRenameMode + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode :param rename_source: An optional file or directory to be renamed. The value must have the following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the @@ -115,25 +115,25 @@ async def create(self, resource=None, continuation=None, mode=None, rename_sourc :type timeout: int :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: - ~azure.storage.file.datalake.models.SourceModifiedAccessConditions + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) cache_control = None @@ -264,7 +264,7 @@ async def create(self, resource=None, continuation=None, mode=None, rename_sourc return cls(response, None, response_headers) create.metadata = {'url': '/{filesystem}/{path}'} - async def update(self, action, body, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): + async def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): """Append Data | Flush Data | Set Properties | Set Access Control. Uploads data to be appended to a file, flushes (writes) previously @@ -276,19 +276,41 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None :param action: The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data to a - file, "setProperties" to set the properties of a file or directory, or + file, "setProperties" to set the properties of a file or directory, "setAccessControl" to set the owner, group, permissions, or access - control list for a file or directory. Note that Hierarchical - Namespace must be enabled for the account in order to use access - control. Also note that the Access Control List (ACL) includes + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes permissions for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers are mutually exclusive. Possible values include: 'append', 'flush', 'setProperties', - 'setAccessControl' + 'setAccessControl', 'setAccessControlRecursive' :type action: str or - ~azure.storage.file.datalake.models.PathUpdateAction + ~azure.storage.filedatalake.models.PathUpdateAction :param body: Initial data :type body: Generator + :param mode: Optional. Valid and Required for + "SetAccessControlRecursive" operation. Mode "set" sets POSIX access + control rights on files and directories, "modify" modifies one or more + POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were + present earlier on files and directories. Possible values include: + 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str :param position: This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when @@ -366,21 +388,23 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None :type timeout: int :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response - :return: None or the result of cls(response) - :rtype: None + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) content_md5 = None @@ -427,6 +451,12 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None # Construct parameters query_parameters = {} query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') if position is not None: query_parameters['position'] = self._serialize.query("position", position, 'long') if retain_uncommitted_data is not None: @@ -438,6 +468,7 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if content_length is not None: header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) @@ -488,8 +519,11 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - if cls: - response_headers = { + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'ETag': self._deserialize('str', response.headers.get('ETag')), 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), @@ -503,11 +537,16 @@ async def update(self, action, body, position=None, retain_uncommitted_data=None 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } - return cls(response, None, response_headers) + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized update.metadata = {'url': '/{filesystem}/{path}'} async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): @@ -533,7 +572,7 @@ async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_br release a lease. Possible values include: 'acquire', 'break', 'change', 'renew', 'release' :type x_ms_lease_action: str or - ~azure.storage.file.datalake.models.PathLeaseAction + ~azure.storage.filedatalake.models.PathLeaseAction :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite @@ -561,17 +600,17 @@ async def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_br :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -679,17 +718,17 @@ async def read(self, range=None, x_ms_range_get_content_md5=None, request_id=Non :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: object or the result of cls(response) :rtype: Generator :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -827,7 +866,7 @@ async def get_properties(self, action=None, upn=None, request_id=None, timeout=N otherwise the properties are returned. Possible values include: 'getAccessControl', 'getStatus' :type action: str or - ~azure.storage.file.datalake.models.PathGetPropertiesAction + ~azure.storage.filedatalake.models.PathGetPropertiesAction :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be @@ -849,17 +888,17 @@ async def get_properties(self, action=None, upn=None, request_id=None, timeout=N :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -978,17 +1017,17 @@ async def delete(self, recursive=None, continuation=None, request_id=None, timeo :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -1091,17 +1130,17 @@ async def set_access_control(self, timeout=None, owner=None, group=None, permiss :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -1180,6 +1219,108 @@ async def set_access_control(self, timeout=None, owner=None, group=None, permiss return cls(response, None, response_headers) set_access_control.metadata = {'url': '/{filesystem}/{path}'} + async def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, *, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + async def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, *, cls=None, **kwargs): """Set the owner, group, permissions, or access control list for a path. @@ -1232,21 +1373,21 @@ async def flush_data(self, timeout=None, position=None, retain_uncommitted_data= :type request_id: str :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) content_md5 = None @@ -1387,17 +1528,17 @@ async def append_data(self, body, position=None, timeout=None, content_length=No :type request_id: str :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) transactional_content_hash = None diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_service_operations_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_service_operations_async.py index 2ac56b413907..b4cb9c5a7ee2 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_service_operations_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/aio/operations_async/_service_operations_async.py @@ -68,9 +68,9 @@ async def list_file_systems(self, prefix=None, continuation=None, max_results=No :param callable cls: A custom type or function that will be passed the direct response :return: FileSystemList or the result of cls(response) - :rtype: ~azure.storage.file.datalake.models.FileSystemList + :rtype: ~azure.storage.filedatalake.models.FileSystemList :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/__init__.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/__init__.py index ee37ff1ea870..4a3401ab7992 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/__init__.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/__init__.py @@ -10,6 +10,7 @@ # -------------------------------------------------------------------------- try: + from ._models_py3 import AclFailedEntry from ._models_py3 import FileSystem from ._models_py3 import FileSystemList from ._models_py3 import LeaseAccessConditions @@ -17,10 +18,12 @@ from ._models_py3 import Path from ._models_py3 import PathHTTPHeaders from ._models_py3 import PathList + from ._models_py3 import SetAccessControlRecursiveResponse from ._models_py3 import SourceModifiedAccessConditions from ._models_py3 import StorageError, StorageErrorException from ._models_py3 import StorageErrorError except (SyntaxError, ImportError): + from ._models import AclFailedEntry from ._models import FileSystem from ._models import FileSystemList from ._models import LeaseAccessConditions @@ -28,6 +31,7 @@ from ._models import Path from ._models import PathHTTPHeaders from ._models import PathList + from ._models import SetAccessControlRecursiveResponse from ._models import SourceModifiedAccessConditions from ._models import StorageError, StorageErrorException from ._models import StorageErrorError @@ -36,10 +40,12 @@ PathLeaseAction, PathRenameMode, PathResourceType, + PathSetAccessControlRecursiveMode, PathUpdateAction, ) __all__ = [ + 'AclFailedEntry', 'FileSystem', 'FileSystemList', 'LeaseAccessConditions', @@ -47,12 +53,14 @@ 'Path', 'PathHTTPHeaders', 'PathList', + 'SetAccessControlRecursiveResponse', 'SourceModifiedAccessConditions', 'StorageError', 'StorageErrorException', 'StorageErrorError', 'PathResourceType', 'PathRenameMode', 'PathUpdateAction', + 'PathSetAccessControlRecursiveMode', 'PathLeaseAction', 'PathGetPropertiesAction', ] diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_data_lake_storage_client_enums.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_data_lake_storage_client_enums.py index 9b844fae3485..35a1a57c853a 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_data_lake_storage_client_enums.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_data_lake_storage_client_enums.py @@ -30,6 +30,14 @@ class PathUpdateAction(str, Enum): flush = "flush" set_properties = "setProperties" set_access_control = "setAccessControl" + set_access_control_recursive = "setAccessControlRecursive" + + +class PathSetAccessControlRecursiveMode(str, Enum): + + set = "set" + modify = "modify" + remove = "remove" class PathLeaseAction(str, Enum): diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models.py index d59fa3d3a1d6..2f442792ba45 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models.py @@ -13,6 +13,30 @@ from azure.core.exceptions import HttpResponseError +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AclFailedEntry, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type = kwargs.get('type', None) + self.error_message = kwargs.get('error_message', None) + + class FileSystem(Model): """FileSystem. @@ -41,7 +65,7 @@ class FileSystemList(Model): """FileSystemList. :param filesystems: - :type filesystems: list[~azure.storage.file.datalake.models.FileSystem] + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] """ _attribute_map = { @@ -127,7 +151,7 @@ class Path(Model): 'name': {'key': 'name', 'type': 'str'}, 'is_directory': {'key': 'isDirectory', 'type': 'bool'}, 'last_modified': {'key': 'lastModified', 'type': 'str'}, - 'etag': {'key': 'etag', 'type': 'str'}, + 'e_tag': {'key': 'eTag', 'type': 'str'}, 'content_length': {'key': 'contentLength', 'type': 'long'}, 'owner': {'key': 'owner', 'type': 'str'}, 'group': {'key': 'group', 'type': 'str'}, @@ -139,7 +163,7 @@ def __init__(self, **kwargs): self.name = kwargs.get('name', None) self.is_directory = kwargs.get('is_directory', False) self.last_modified = kwargs.get('last_modified', None) - self.etag = kwargs.get('etag', None) + self.e_tag = kwargs.get('e_tag', None) self.content_length = kwargs.get('content_length', None) self.owner = kwargs.get('owner', None) self.group = kwargs.get('group', None) @@ -201,7 +225,7 @@ class PathList(Model): """PathList. :param paths: - :type paths: list[~azure.storage.file.datalake.models.Path] + :type paths: list[~azure.storage.filedatalake.models.Path] """ _attribute_map = { @@ -213,6 +237,35 @@ def __init__(self, **kwargs): self.paths = kwargs.get('paths', None) +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, **kwargs): + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = kwargs.get('directories_successful', None) + self.files_successful = kwargs.get('files_successful', None) + self.failure_count = kwargs.get('failure_count', None) + self.failed_entries = kwargs.get('failed_entries', None) + + class SourceModifiedAccessConditions(Model): """Additional parameters for create operation. @@ -249,7 +302,7 @@ class StorageError(Model): """StorageError. :param error: The service error response object. - :type error: ~azure.storage.file.datalake.models.StorageErrorError + :type error: ~azure.storage.filedatalake.models.StorageErrorError """ _attribute_map = { diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models_py3.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models_py3.py index 7fcabd5aecab..3ca8d843fd38 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models_py3.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/models/_models_py3.py @@ -13,6 +13,30 @@ from azure.core.exceptions import HttpResponseError +class AclFailedEntry(Model): + """AclFailedEntry. + + :param name: + :type name: str + :param type: + :type type: str + :param error_message: + :type error_message: str + """ + + _attribute_map = { + 'name': {'key': 'name', 'type': 'str'}, + 'type': {'key': 'type', 'type': 'str'}, + 'error_message': {'key': 'errorMessage', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type: str=None, error_message: str=None, **kwargs) -> None: + super(AclFailedEntry, self).__init__(**kwargs) + self.name = name + self.type = type + self.error_message = error_message + + class FileSystem(Model): """FileSystem. @@ -41,7 +65,7 @@ class FileSystemList(Model): """FileSystemList. :param filesystems: - :type filesystems: list[~azure.storage.file.datalake.models.FileSystem] + :type filesystems: list[~azure.storage.filedatalake.models.FileSystem] """ _attribute_map = { @@ -201,7 +225,7 @@ class PathList(Model): """PathList. :param paths: - :type paths: list[~azure.storage.file.datalake.models.Path] + :type paths: list[~azure.storage.filedatalake.models.Path] """ _attribute_map = { @@ -213,6 +237,35 @@ def __init__(self, *, paths=None, **kwargs) -> None: self.paths = paths +class SetAccessControlRecursiveResponse(Model): + """SetAccessControlRecursiveResponse. + + :param directories_successful: + :type directories_successful: int + :param files_successful: + :type files_successful: int + :param failure_count: + :type failure_count: int + :param failed_entries: + :type failed_entries: + list[~azure.storage.filedatalake.models.AclFailedEntry] + """ + + _attribute_map = { + 'directories_successful': {'key': 'directoriesSuccessful', 'type': 'int'}, + 'files_successful': {'key': 'filesSuccessful', 'type': 'int'}, + 'failure_count': {'key': 'failureCount', 'type': 'int'}, + 'failed_entries': {'key': 'failedEntries', 'type': '[AclFailedEntry]'}, + } + + def __init__(self, *, directories_successful: int=None, files_successful: int=None, failure_count: int=None, failed_entries=None, **kwargs) -> None: + super(SetAccessControlRecursiveResponse, self).__init__(**kwargs) + self.directories_successful = directories_successful + self.files_successful = files_successful + self.failure_count = failure_count + self.failed_entries = failed_entries + + class SourceModifiedAccessConditions(Model): """Additional parameters for create operation. @@ -249,7 +302,7 @@ class StorageError(Model): """StorageError. :param error: The service error response object. - :type error: ~azure.storage.file.datalake.models.StorageErrorError + :type error: ~azure.storage.filedatalake.models.StorageErrorError """ _attribute_map = { diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_file_system_operations.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_file_system_operations.py index 1e1f59d282ad..b0d17ffff699 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_file_system_operations.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_file_system_operations.py @@ -67,7 +67,7 @@ def create(self, properties=None, request_id=None, timeout=None, cls=None, **kwa :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL @@ -144,13 +144,13 @@ def set_properties(self, properties=None, request_id=None, timeout=None, modifie :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) if_modified_since = None @@ -226,7 +226,7 @@ def get_properties(self, request_id=None, timeout=None, cls=None, **kwargs): :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL @@ -298,13 +298,13 @@ def delete(self, request_id=None, timeout=None, modified_access_conditions=None, :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) if_modified_since = None @@ -398,9 +398,9 @@ def list_paths(self, recursive, continuation=None, path=None, max_results=None, :param callable cls: A custom type or function that will be passed the direct response :return: PathList or the result of cls(response) - :rtype: ~azure.storage.file.datalake.models.PathList + :rtype: ~azure.storage.filedatalake.models.PathList :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_path_operations.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_path_operations.py index 7f1380e5be9d..58e7d7e77321 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_path_operations.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_path_operations.py @@ -51,7 +51,7 @@ def create(self, resource=None, continuation=None, mode=None, rename_source=None The value must be "file" or "directory". Possible values include: 'directory', 'file' :type resource: str or - ~azure.storage.file.datalake.models.PathResourceType + ~azure.storage.filedatalake.models.PathResourceType :param continuation: Optional. When deleting a directory, the number of paths that are deleted with each invocation is limited. If the number of paths to be deleted exceeds this limit, a continuation token @@ -63,7 +63,7 @@ def create(self, resource=None, continuation=None, mode=None, rename_source=None parameter determines the behavior of the rename operation. The value must be "legacy" or "posix", and the default value will be "posix". Possible values include: 'legacy', 'posix' - :type mode: str or ~azure.storage.file.datalake.models.PathRenameMode + :type mode: str or ~azure.storage.filedatalake.models.PathRenameMode :param rename_source: An optional file or directory to be renamed. The value must have the following format: "/{filesystem}/{path}". If "x-ms-properties" is specified, the properties will overwrite the @@ -115,25 +115,25 @@ def create(self, resource=None, continuation=None, mode=None, rename_source=None :type timeout: int :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param source_modified_access_conditions: Additional parameters for the operation :type source_modified_access_conditions: - ~azure.storage.file.datalake.models.SourceModifiedAccessConditions + ~azure.storage.filedatalake.models.SourceModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) cache_control = None @@ -264,7 +264,7 @@ def create(self, resource=None, continuation=None, mode=None, rename_source=None return cls(response, None, response_headers) create.metadata = {'url': '/{filesystem}/{path}'} - def update(self, action, body, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): + def update(self, action, body, mode=None, max_records=None, continuation=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, properties=None, owner=None, group=None, permissions=None, acl=None, request_id=None, timeout=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): """Append Data | Flush Data | Set Properties | Set Access Control. Uploads data to be appended to a file, flushes (writes) previously @@ -276,19 +276,41 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos :param action: The action must be "append" to upload data to be appended to a file, "flush" to flush previously uploaded data to a - file, "setProperties" to set the properties of a file or directory, or + file, "setProperties" to set the properties of a file or directory, "setAccessControl" to set the owner, group, permissions, or access - control list for a file or directory. Note that Hierarchical - Namespace must be enabled for the account in order to use access - control. Also note that the Access Control List (ACL) includes + control list for a file or directory, or "setAccessControlRecursive" + to set the access control list for a directory recursively. Note that + Hierarchical Namespace must be enabled for the account in order to use + access control. Also note that the Access Control List (ACL) includes permissions for the owner, owning group, and others, so the x-ms-permissions and x-ms-acl request headers are mutually exclusive. Possible values include: 'append', 'flush', 'setProperties', - 'setAccessControl' + 'setAccessControl', 'setAccessControlRecursive' :type action: str or - ~azure.storage.file.datalake.models.PathUpdateAction + ~azure.storage.filedatalake.models.PathUpdateAction :param body: Initial data :type body: Generator + :param mode: Optional. Valid and Required for + "SetAccessControlRecursive" operation. Mode "set" sets POSIX access + control rights on files and directories, "modify" modifies one or more + POSIX access control rights that pre-exist on files and directories, + "remove" removes one or more POSIX access control rights that were + present earlier on files and directories. Possible values include: + 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param max_records: Optional. Valid for "SetAccessControlRecursive" + operation. It specifies the maximum number of files or directories on + which the acl change will be applied. If omitted or greater than + 2,000, the request will process up to 2,000 items + :type max_records: int + :param continuation: Optional. The number of paths processed with each + invocation is limited. If the number of paths to be processed exceeds + this limit, a continuation token is returned in the response header + x-ms-continuation. When a continuation token is returned in the + response, it must be percent-encoded and specified in a subsequent + invocation of setAcessControlRecursive operation. + :type continuation: str :param position: This parameter allows the caller to upload data in parallel and control the order in which it is appended to the file. It is required when uploading data to be appended to the file and when @@ -366,21 +388,23 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos :type timeout: int :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response - :return: None or the result of cls(response) - :rtype: None + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) content_md5 = None @@ -427,6 +451,12 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos # Construct parameters query_parameters = {} query_parameters['action'] = self._serialize.query("action", action, 'PathUpdateAction') + if mode is not None: + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') if position is not None: query_parameters['position'] = self._serialize.query("position", position, 'long') if retain_uncommitted_data is not None: @@ -438,6 +468,7 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos # Construct headers header_parameters = {} + header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/octet-stream' if content_length is not None: header_parameters['Content-Length'] = self._serialize.header("content_length", content_length, 'long', minimum=0) @@ -488,8 +519,11 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos map_error(status_code=response.status_code, response=response, error_map=error_map) raise models.StorageErrorException(response, self._deserialize) - if cls: - response_headers = { + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), 'ETag': self._deserialize('str', response.headers.get('ETag')), 'Last-Modified': self._deserialize('rfc-1123', response.headers.get('Last-Modified')), @@ -503,11 +537,16 @@ def update(self, action, body, position=None, retain_uncommitted_data=None, clos 'Content-Type': self._deserialize('str', response.headers.get('Content-Type')), 'Content-MD5': self._deserialize('str', response.headers.get('Content-MD5')), 'x-ms-properties': self._deserialize('str', response.headers.get('x-ms-properties')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), 'x-ms-error-code': self._deserialize('str', response.headers.get('x-ms-error-code')), } - return cls(response, None, response_headers) + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized update.metadata = {'url': '/{filesystem}/{path}'} def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_period=None, proposed_lease_id=None, request_id=None, timeout=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): @@ -533,7 +572,7 @@ def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_pe release a lease. Possible values include: 'acquire', 'break', 'change', 'renew', 'release' :type x_ms_lease_action: str or - ~azure.storage.file.datalake.models.PathLeaseAction + ~azure.storage.filedatalake.models.PathLeaseAction :param x_ms_lease_duration: The lease duration is required to acquire a lease, and specifies the duration of the lease in seconds. The lease duration must be between 15 and 60 seconds or -1 for infinite @@ -561,17 +600,17 @@ def lease(self, x_ms_lease_action, x_ms_lease_duration=None, x_ms_lease_break_pe :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -679,17 +718,17 @@ def read(self, range=None, x_ms_range_get_content_md5=None, request_id=None, tim :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: object or the result of cls(response) :rtype: Generator :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -826,7 +865,7 @@ def get_properties(self, action=None, upn=None, request_id=None, timeout=None, l otherwise the properties are returned. Possible values include: 'getAccessControl', 'getStatus' :type action: str or - ~azure.storage.file.datalake.models.PathGetPropertiesAction + ~azure.storage.filedatalake.models.PathGetPropertiesAction :param upn: Optional. Valid only when Hierarchical Namespace is enabled for the account. If "true", the user identity values returned in the x-ms-owner, x-ms-group, and x-ms-acl response headers will be @@ -848,17 +887,17 @@ def get_properties(self, action=None, upn=None, request_id=None, timeout=None, l :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -977,17 +1016,17 @@ def delete(self, recursive=None, continuation=None, request_id=None, timeout=Non :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -1090,17 +1129,17 @@ def set_access_control(self, timeout=None, owner=None, group=None, permissions=N :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) lease_id = None @@ -1179,6 +1218,108 @@ def set_access_control(self, timeout=None, owner=None, group=None, permissions=N return cls(response, None, response_headers) set_access_control.metadata = {'url': '/{filesystem}/{path}'} + def set_access_control_recursive(self, mode, timeout=None, continuation=None, max_records=None, acl=None, request_id=None, cls=None, **kwargs): + """Set the access control list for a path and subpaths. + + :param mode: Mode "set" sets POSIX access control rights on files and + directories, "modify" modifies one or more POSIX access control rights + that pre-exist on files and directories, "remove" removes one or more + POSIX access control rights that were present earlier on files and + directories. Possible values include: 'set', 'modify', 'remove' + :type mode: str or + ~azure.storage.filedatalake.models.PathSetAccessControlRecursiveMode + :param timeout: The timeout parameter is expressed in seconds. For + more information, see Setting + Timeouts for Blob Service Operations. + :type timeout: int + :param continuation: Optional. When deleting a directory, the number + of paths that are deleted with each invocation is limited. If the + number of paths to be deleted exceeds this limit, a continuation token + is returned in this response header. When a continuation token is + returned in the response, it must be specified in a subsequent + invocation of the delete operation to continue deleting the directory. + :type continuation: str + :param max_records: Optional. It specifies the maximum number of files + or directories on which the acl change will be applied. If omitted or + greater than 2,000, the request will process up to 2,000 items + :type max_records: int + :param acl: Sets POSIX access control rights on files and directories. + The value is a comma-separated list of access control entries. Each + access control entry (ACE) consists of a scope, a type, a user or + group identifier, and permissions in the format + "[scope:][type]:[id]:[permissions]". + :type acl: str + :param request_id: Provides a client-generated, opaque value with a 1 + KB character limit that is recorded in the analytics logs when storage + analytics logging is enabled. + :type request_id: str + :param callable cls: A custom type or function that will be passed the + direct response + :return: SetAccessControlRecursiveResponse or the result of + cls(response) + :rtype: + ~azure.storage.filedatalake.models.SetAccessControlRecursiveResponse + :raises: + :class:`StorageErrorException` + """ + error_map = kwargs.pop('error_map', None) + action = "setAccessControlRecursive" + + # Construct URL + url = self.set_access_control_recursive.metadata['url'] + path_format_arguments = { + 'url': self._serialize.url("self._config.url", self._config.url, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'int', minimum=0) + if continuation is not None: + query_parameters['continuation'] = self._serialize.query("continuation", continuation, 'str') + query_parameters['mode'] = self._serialize.query("mode", mode, 'PathSetAccessControlRecursiveMode') + if max_records is not None: + query_parameters['maxRecords'] = self._serialize.query("max_records", max_records, 'int', minimum=1) + query_parameters['action'] = self._serialize.query("action", action, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Accept'] = 'application/json' + if acl is not None: + header_parameters['x-ms-acl'] = self._serialize.header("acl", acl, 'str') + if request_id is not None: + header_parameters['x-ms-client-request-id'] = self._serialize.header("request_id", request_id, 'str') + header_parameters['x-ms-version'] = self._serialize.header("self._config.version", self._config.version, 'str') + + # Construct and send request + request = self._client.patch(url, query_parameters, header_parameters) + pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) + response = pipeline_response.http_response + + if response.status_code not in [200]: + map_error(status_code=response.status_code, response=response, error_map=error_map) + raise models.StorageErrorException(response, self._deserialize) + + header_dict = {} + deserialized = None + if response.status_code == 200: + deserialized = self._deserialize('SetAccessControlRecursiveResponse', response) + header_dict = { + 'Date': self._deserialize('rfc-1123', response.headers.get('Date')), + 'x-ms-client-request-id': self._deserialize('str', response.headers.get('x-ms-client-request-id')), + 'x-ms-continuation': self._deserialize('str', response.headers.get('x-ms-continuation')), + 'x-ms-request-id': self._deserialize('str', response.headers.get('x-ms-request-id')), + 'x-ms-version': self._deserialize('str', response.headers.get('x-ms-version')), + } + + if cls: + return cls(response, deserialized, header_dict) + + return deserialized + set_access_control_recursive.metadata = {'url': '/{filesystem}/{path}'} + def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, close=None, content_length=None, request_id=None, path_http_headers=None, lease_access_conditions=None, modified_access_conditions=None, cls=None, **kwargs): """Set the owner, group, permissions, or access control list for a path. @@ -1231,21 +1372,21 @@ def flush_data(self, timeout=None, position=None, retain_uncommitted_data=None, :type request_id: str :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param modified_access_conditions: Additional parameters for the operation :type modified_access_conditions: - ~azure.storage.file.datalake.models.ModifiedAccessConditions + ~azure.storage.filedatalake.models.ModifiedAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) content_md5 = None @@ -1386,17 +1527,17 @@ def append_data(self, body, position=None, timeout=None, content_length=None, re :type request_id: str :param path_http_headers: Additional parameters for the operation :type path_http_headers: - ~azure.storage.file.datalake.models.PathHTTPHeaders + ~azure.storage.filedatalake.models.PathHTTPHeaders :param lease_access_conditions: Additional parameters for the operation :type lease_access_conditions: - ~azure.storage.file.datalake.models.LeaseAccessConditions + ~azure.storage.filedatalake.models.LeaseAccessConditions :param callable cls: A custom type or function that will be passed the direct response :return: None or the result of cls(response) :rtype: None :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) transactional_content_hash = None diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_service_operations.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_service_operations.py index ad6b7d3ecd50..540079a2c736 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_service_operations.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/operations/_service_operations.py @@ -68,9 +68,9 @@ def list_file_systems(self, prefix=None, continuation=None, max_results=None, re :param callable cls: A custom type or function that will be passed the direct response :return: FileSystemList or the result of cls(response) - :rtype: ~azure.storage.file.datalake.models.FileSystemList + :rtype: ~azure.storage.filedatalake.models.FileSystemList :raises: - :class:`StorageErrorException` + :class:`StorageErrorException` """ error_map = kwargs.pop('error_map', None) # Construct URL diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/version.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/version.py index 3d08b008d2d6..be045899fa00 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/version.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_generated/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "2018-11-09" +VERSION = "2019-12-12" diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py index 79dc32d8b4d6..13e35f4e55ee 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_models.py @@ -17,7 +17,7 @@ from azure.storage.blob import AccessPolicy as BlobAccessPolicy from azure.storage.blob._generated.models import StorageErrorException from azure.storage.blob._models import ContainerPropertiesPaged -from ._deserialize import return_headers_and_deserialized_path_list +from ._deserialize import return_headers_and_deserialized_path_list, deserialize_metadata from ._generated.models import Path from ._shared.models import DictMixin from ._shared.response_handlers import process_storage_error @@ -129,33 +129,24 @@ class DirectoryProperties(DictMixin): :var ~azure.storage.filedatalake.ContentSettings content_settings: """ def __init__(self, **kwargs): - super(DirectoryProperties, self).__init__( - **kwargs - ) - self.name = None - self.etag = None + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') self.deleted = None - self.metadata = None - self.lease = None - self.last_modified = None - self.creation_time = None + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') self.deleted_time = None self.remaining_retention_days = None @classmethod - def _from_blob_properties(cls, blob_properties): - directory_props = DirectoryProperties() - directory_props.name = blob_properties.name - directory_props.etag = blob_properties.etag - directory_props.deleted = blob_properties.deleted - directory_props.metadata = blob_properties.metadata - directory_props.lease = blob_properties.lease - directory_props.lease.__class__ = LeaseProperties - directory_props.last_modified = blob_properties.last_modified - directory_props.creation_time = blob_properties.creation_time - directory_props.deleted_time = blob_properties.deleted_time - directory_props.remaining_retention_days = blob_properties.remaining_retention_days - return directory_props + def _deserialize_dir_properties(cls, response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + dir_properties = cls( + metadata=metadata, + **headers + ) + return dir_properties class FileProperties(DictMixin): @@ -177,37 +168,32 @@ class FileProperties(DictMixin): :var ~azure.storage.filedatalake.ContentSettings content_settings: """ def __init__(self, **kwargs): - super(FileProperties, self).__init__( - **kwargs - ) - self.name = None - self.etag = None + self.name = kwargs.get('name') + self.etag = kwargs.get('ETag') self.deleted = None - self.metadata = None - self.lease = None - self.last_modified = None - self.creation_time = None - self.size = None + self.metadata = kwargs.get('metadata') + self.lease = LeaseProperties(**kwargs) + self.last_modified = kwargs.get('Last-Modified') + self.creation_time = kwargs.get('x-ms-creation-time') + self.size = kwargs.get('Content-Length') self.deleted_time = None + self.expiry_time = kwargs.get("x-ms-expiry-time") self.remaining_retention_days = None - self.content_settings = None + self.content_settings = ContentSettings(**kwargs) @classmethod - def _from_blob_properties(cls, blob_properties): - file_props = FileProperties() - file_props.name = blob_properties.name - file_props.etag = blob_properties.etag - file_props.deleted = blob_properties.deleted - file_props.metadata = blob_properties.metadata - file_props.lease = blob_properties.lease - file_props.lease.__class__ = LeaseProperties - file_props.last_modified = blob_properties.last_modified - file_props.creation_time = blob_properties.creation_time - file_props.size = blob_properties.size - file_props.deleted_time = blob_properties.deleted_time - file_props.remaining_retention_days = blob_properties.remaining_retention_days - file_props.content_settings = blob_properties.content_settings - return file_props + def _deserialize_file_properties(cls, response, obj, headers): + metadata = deserialize_metadata(response, obj, headers) + file_properties = cls( + metadata=metadata, + **headers + ) + if 'Content-Range' in headers: + if 'x-ms-blob-content-md5' in headers: + file_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5'] + else: + file_properties.content_settings.content_md5 = None + return file_properties class PathProperties(object): @@ -326,10 +312,10 @@ class LeaseProperties(BlobLeaseProperties): :ivar str duration: When a file is leased, specifies whether the lease is of infinite or fixed duration. """ - def __init__(self): - self.status = None - self.state = None - self.duration = None + def __init__(self, **kwargs): + super(LeaseProperties, self).__init__( + **kwargs + ) class ContentSettings(BlobContentSettings): diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_path_client.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_path_client.py index f905a3241e8c..9c18d4da2ab8 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_path_client.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_path_client.py @@ -529,7 +529,6 @@ def _get_path_properties(self, **kwargs): :caption: Getting the properties for a file/directory. """ path_properties = self._blob_client.get_blob_properties(**kwargs) - path_properties.__class__ = DirectoryProperties return path_properties def set_metadata(self, metadata, # type: Dict[str, str] diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client.py index d5aa27fab499..53348f8ecc22 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client.py @@ -246,6 +246,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs) ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, Pipeline(config.transport, policies=policies) def _batch_send( diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client_async.py index 177225191739..d252ad063fb6 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_shared/base_client_async.py @@ -102,6 +102,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs), ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, AsyncPipeline(config.transport, policies=policies) async def _batch_send( diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_upload_helper.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_upload_helper.py index bf29cfc250a7..d1a98dd427fc 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_upload_helper.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/_upload_helper.py @@ -41,6 +41,7 @@ def upload_datalake_file( # pylint: disable=unused-argument permissions = kwargs.pop('permissions', None) path_http_headers = kwargs.pop('path_http_headers', None) modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) if not overwrite: # if customers didn't specify access conditions, they cannot flush data to existing file @@ -71,7 +72,7 @@ def upload_datalake_file( # pylint: disable=unused-argument service=client, uploader_class=DataLakeFileChunkUploader, total_size=length, - chunk_size=100 * 1024 * 1024, + chunk_size=chunk_size, stream=stream, max_concurrency=max_concurrency, validate_content=validate_content, diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py index f655adc0f791..b0495bf32434 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_directory_client_async.py @@ -198,8 +198,7 @@ async def get_directory_properties(self, **kwargs): :dedent: 4 :caption: Getting the properties for a file/directory. """ - blob_properties = await self._get_path_properties(**kwargs) - return DirectoryProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + return await self._get_path_properties(cls=DirectoryProperties._deserialize_dir_properties, **kwargs) # pylint: disable=protected-access async def rename_directory(self, new_name, # type: str **kwargs): diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_file_client_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_file_client_async.py index 98f96b1f67cc..d014716d0d6c 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_file_client_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_data_lake_file_client_async.py @@ -201,8 +201,22 @@ async def get_file_properties(self, **kwargs): :dedent: 4 :caption: Getting the properties for a file. """ - blob_properties = await self._get_path_properties(**kwargs) - return FileProperties._from_blob_properties(blob_properties) # pylint: disable=protected-access + return await self._get_path_properties(cls=FileProperties._deserialize_file_properties, **kwargs) # pylint: disable=protected-access + + async def set_file_expiry(self, expiry_options, expires_on=None, **kwargs): + # type: (**Any) -> None + """Sets the time a file will expire and be deleted. + + :param str expiry_options: + Required. Indicates mode of the expiry time. + Possible values include: 'NeverExpire', 'RelativeToCreation', 'RelativeToNow', 'Absolute' + :param datetime expires_on: + The time to set the file to expiry + :keyword int timeout: + The timeout parameter is expressed in seconds. + :rtype: None + """ + return await self._blob_client._client.blob.set_expiry(expiry_options, expires_on=expires_on, **kwargs) # pylint: disable=protected-access async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[AnyStr]] length=None, # type: Optional[int] @@ -255,6 +269,9 @@ async def upload_data(self, data, # type: Union[AnyStr, Iterable[AnyStr], IO[An The match condition to use upon the etag. :keyword int timeout: The timeout parameter is expressed in seconds. + :keyword int chunk_size: + The maximum chunk size for uploading a file in chunks. + Defaults to 100*1024*1024, or 100MB. :return: response dict (Etag and last modified). """ options = self._upload_options( diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_path_client_async.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_path_client_async.py index 6f9693090422..1094c04ffee0 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_path_client_async.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_path_client_async.py @@ -357,7 +357,6 @@ async def _get_path_properties(self, **kwargs): :rtype: DirectoryProperties or FileProperties """ path_properties = await self._blob_client.get_blob_properties(**kwargs) - path_properties.__class__ = DirectoryProperties return path_properties async def set_metadata(self, metadata, # type: Dict[str, str] diff --git a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py index b2f10df34ec3..93da7bf4e03f 100644 --- a/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py +++ b/sdk/storage/azure-storage-file-datalake/azure/storage/filedatalake/aio/_upload_helper.py @@ -41,6 +41,7 @@ async def upload_datalake_file( # pylint: disable=unused-argument permissions = kwargs.pop('permissions', None) path_http_headers = kwargs.pop('path_http_headers', None) modified_access_conditions = kwargs.pop('modified_access_conditions', None) + chunk_size = kwargs.pop('chunk_size', 100 * 1024 * 1024) if not overwrite: # if customers didn't specify access conditions, they cannot flush data to existing file @@ -71,7 +72,7 @@ async def upload_datalake_file( # pylint: disable=unused-argument service=client, uploader_class=DataLakeFileChunkUploader, total_size=length, - chunk_size=100 * 1024 * 1024, + chunk_size=chunk_size, stream=stream, max_concurrency=max_concurrency, validate_content=validate_content, diff --git a/sdk/storage/azure-storage-file-datalake/swagger/README.md b/sdk/storage/azure-storage-file-datalake/swagger/README.md index 2cb79feeb6ab..3897b09b4370 100644 --- a/sdk/storage/azure-storage-file-datalake/swagger/README.md +++ b/sdk/storage/azure-storage-file-datalake/swagger/README.md @@ -19,7 +19,7 @@ autorest --use=C:/work/autorest.python --version=2.0.4280 ### Settings ``` yaml -input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.StorageDataLake/stable/2018-11-09/DataLakeStorage.json +input-file: https://raw.githubusercontent.com/Azure/azure-rest-api-specs/storage-dataplane-preview/specification/storage/data-plane/Microsoft.StorageDataLake/stable/2019-12-12/DataLakeStorage.json output-folder: ../azure/storage/filedatalake/_generated namespace: azure.storage.filedatalake no-namespace-folders: true diff --git a/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file.test_set_expiry.yaml b/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file.test_set_expiry.yaml new file mode 100644 index 000000000000..f21edffa71c6 --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file.test_set_expiry.yaml @@ -0,0 +1,208 @@ +interactions: +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 08a85f0a-9f22-11ea-b31d-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:11 GMT + x-ms-properties: + - '' + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.dfs.core.windows.net/filesystem84ed0a59/directory84ed0a59?resource=directory + response: + body: + string: '' + headers: + Content-Length: + - '0' + Date: + - Tue, 26 May 2020 07:25:11 GMT + ETag: + - '"0x8D80145ED25E619"' + Last-Modified: + - Tue, 26 May 2020 07:25:11 GMT + Server: + - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: + - 9388e921-901f-0066-392e-330280000000 + x-ms-version: + - '2019-12-12' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 08fa93dc-9f22-11ea-bf47-001a7dda7113 + x-ms-content-disposition: + - inline + x-ms-content-language: + - spanish + x-ms-date: + - Tue, 26 May 2020 07:25:11 GMT + x-ms-properties: + - hello=d29ybGQ=,number=NDI= + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.dfs.core.windows.net/filesystem84ed0a59/directory84ed0a59%2Fnewfile?resource=file + response: + body: + string: '' + headers: + Content-Length: + - '0' + Date: + - Tue, 26 May 2020 07:25:11 GMT + ETag: + - '"0x8D80145ED335B0A"' + Last-Modified: + - Tue, 26 May 2020 07:25:11 GMT + Server: + - Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: + - 9388e922-901f-0066-3a2e-330280000000 + x-ms-version: + - '2019-12-12' + status: + code: 201 + message: Created +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 09086e8c-9f22-11ea-bd32-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:11 GMT + x-ms-expiry-option: + - Absolute + x-ms-expiry-time: + - Tue, 26 May 2020 08:25:11 GMT + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.blob.core.windows.net/filesystem84ed0a59/directory84ed0a59/newfile?comp=expiry + response: + body: + string: '' + headers: + Content-Length: + - '0' + Date: + - Tue, 26 May 2020 07:25:12 GMT + ETag: + - '"0x8D80145ED335B0A"' + Last-Modified: + - Tue, 26 May 2020 07:25:11 GMT + Server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: + - 8528af15-701e-009a-2a2e-33d379000000 + x-ms-version: + - '2019-12-12' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 098639e8-9f22-11ea-8208-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:12 GMT + x-ms-version: + - '2019-12-12' + method: HEAD + uri: https://storagename.blob.core.windows.net/filesystem84ed0a59/directory84ed0a59/newfile + response: + body: + string: '' + headers: + Accept-Ranges: + - bytes + Content-Disposition: + - inline + Content-Language: + - spanish + Content-Length: + - '0' + Content-Type: + - application/octet-stream + Date: + - Tue, 26 May 2020 07:25:12 GMT + ETag: + - '"0x8D80145ED335B0A"' + Last-Modified: + - Tue, 26 May 2020 07:25:11 GMT + Server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + x-ms-access-tier: + - Hot + x-ms-access-tier-inferred: + - 'true' + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 26 May 2020 07:25:11 GMT + x-ms-expiry-time: + - Tue, 26 May 2020 08:25:11 GMT + x-ms-lease-state: + - available + x-ms-lease-status: + - unlocked + x-ms-meta-hello: + - world + x-ms-meta-number: + - '42' + x-ms-request-id: + - 8528af6d-701e-009a-792e-33d379000000 + x-ms-server-encrypted: + - 'true' + x-ms-version: + - '2019-12-12' + status: + code: 200 + message: OK +version: 1 diff --git a/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file_async.test_set_expiry_async.yaml b/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file_async.test_set_expiry_async.yaml new file mode 100644 index 000000000000..eb65df676e12 --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/tests/recordings/test_file_async.test_set_expiry_async.yaml @@ -0,0 +1,140 @@ +interactions: +- request: + body: null + headers: + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 107e1ee6-9f22-11ea-b27b-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:24 GMT + x-ms-properties: + - '' + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.dfs.core.windows.net/filesystem217a0f53/directory217a0f53?resource=directory + response: + body: + string: '' + headers: + Content-Length: '0' + Date: Tue, 26 May 2020 07:25:23 GMT + Etag: '"0x8D80145F4DF209F"' + Last-Modified: Tue, 26 May 2020 07:25:24 GMT + Server: Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: c7f29fe2-401f-0028-752e-332c08000000 + x-ms-version: '2019-12-12' + status: + code: 201 + message: Created + url: https://emilyhnseuap.dfs.core.windows.net/filesystem217a0f53/directory217a0f53?resource=directory +- request: + body: null + headers: + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 10b47fba-9f22-11ea-993d-001a7dda7113 + x-ms-content-disposition: + - inline + x-ms-content-language: + - spanish + x-ms-date: + - Tue, 26 May 2020 07:25:24 GMT + x-ms-properties: + - hello=d29ybGQ=,number=NDI= + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.dfs.core.windows.net/filesystem217a0f53/directory217a0f53%2Fnewfile?resource=file + response: + body: + string: '' + headers: + Content-Length: '0' + Date: Tue, 26 May 2020 07:25:24 GMT + Etag: '"0x8D80145F4ECE3A1"' + Last-Modified: Tue, 26 May 2020 07:25:24 GMT + Server: Windows-Azure-HDFS/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: c7f29fe3-401f-0028-762e-332c08000000 + x-ms-version: '2019-12-12' + status: + code: 201 + message: Created + url: https://emilyhnseuap.dfs.core.windows.net/filesystem217a0f53/directory217a0f53%2Fnewfile?resource=file +- request: + body: null + headers: + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 10c24164-9f22-11ea-8bbd-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:24 GMT + x-ms-expiry-option: + - Absolute + x-ms-expiry-time: + - Tue, 26 May 2020 08:25:24 GMT + x-ms-version: + - '2019-12-12' + method: PUT + uri: https://storagename.blob.core.windows.net/filesystem217a0f53/directory217a0f53/newfile?comp=expiry + response: + body: + string: '' + headers: + Content-Length: '0' + Date: Tue, 26 May 2020 07:25:25 GMT + Etag: '"0x8D80145F4ECE3A1"' + Last-Modified: Tue, 26 May 2020 07:25:24 GMT + Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + x-ms-request-id: 5cbafaac-901e-0059-6d2e-33ca23000000 + x-ms-version: '2019-12-12' + status: + code: 200 + message: OK + url: https://emilyhnseuap.blob.core.windows.net/filesystem217a0f53/directory217a0f53/newfile?comp=expiry +- request: + body: null + headers: + User-Agent: + - azsdk-python-storage-dfs/12.0.2 Python/3.7.3 (Windows-10-10.0.18362-SP0) + x-ms-client-request-id: + - 113b38ca-9f22-11ea-9baf-001a7dda7113 + x-ms-date: + - Tue, 26 May 2020 07:25:25 GMT + x-ms-version: + - '2019-12-12' + method: HEAD + uri: https://storagename.blob.core.windows.net/filesystem217a0f53/directory217a0f53/newfile + response: + body: + string: '' + headers: + Accept-Ranges: bytes + Content-Disposition: inline + Content-Language: spanish + Content-Length: '0' + Content-Type: application/octet-stream + Date: Tue, 26 May 2020 07:25:25 GMT + Etag: '"0x8D80145F4ECE3A1"' + Last-Modified: Tue, 26 May 2020 07:25:24 GMT + Server: Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + x-ms-access-tier: Hot + x-ms-access-tier-inferred: 'true' + x-ms-blob-type: BlockBlob + x-ms-creation-time: Tue, 26 May 2020 07:25:24 GMT + x-ms-expiry-time: Tue, 26 May 2020 08:25:24 GMT + x-ms-lease-state: available + x-ms-lease-status: unlocked + x-ms-meta-hello: world + x-ms-meta-number: '42' + x-ms-request-id: 5cbafb24-901e-0059-502e-33ca23000000 + x-ms-server-encrypted: 'true' + x-ms-version: '2019-12-12' + status: + code: 200 + message: OK + url: https://emilyhnseuap.blob.core.windows.net/filesystem217a0f53/directory217a0f53/newfile +version: 1 diff --git a/sdk/storage/azure-storage-file-datalake/tests/test_file.py b/sdk/storage/azure-storage-file-datalake/tests/test_file.py index bf831b563b66..38af0e638e92 100644 --- a/sdk/storage/azure-storage-file-datalake/tests/test_file.py +++ b/sdk/storage/azure-storage-file-datalake/tests/test_file.py @@ -558,6 +558,24 @@ def test_get_properties(self): self.assertEqual(properties.metadata['hello'], metadata['hello']) self.assertEqual(properties.content_settings.content_language, content_settings.content_language) + @record + def test_set_expiry(self): + # Arrange + directory_client = self._create_directory_and_return_client() + + metadata = {'hello': 'world', 'number': '42'} + content_settings = ContentSettings( + content_language='spanish', + content_disposition='inline') + expires_on = datetime.utcnow() + timedelta(hours=1) + file_client = directory_client.create_file("newfile", metadata=metadata, content_settings=content_settings) + file_client.set_file_expiry("Absolute", expires_on=expires_on) + properties = file_client.get_file_properties() + + # Assert + self.assertTrue(properties) + self.assertIsNotNone(properties.expiry_time) + @record def test_rename_file_with_non_used_name(self): file_client = self._create_file_and_return_client() diff --git a/sdk/storage/azure-storage-file-datalake/tests/test_file_async.py b/sdk/storage/azure-storage-file-datalake/tests/test_file_async.py index 8bd1ef819a14..759d996fadd2 100644 --- a/sdk/storage/azure-storage-file-datalake/tests/test_file_async.py +++ b/sdk/storage/azure-storage-file-datalake/tests/test_file_async.py @@ -676,6 +676,28 @@ def test_get_properties_async(self): loop = asyncio.get_event_loop() loop.run_until_complete(self._test_get_properties()) + async def _test_set_expiry(self): + # Arrange + directory_client = await self._create_directory_and_return_client() + + metadata = {'hello': 'world', 'number': '42'} + content_settings = ContentSettings( + content_language='spanish', + content_disposition='inline') + expires_on = datetime.utcnow() + timedelta(hours=1) + file_client = await directory_client.create_file("newfile", metadata=metadata, content_settings=content_settings) + await file_client.set_file_expiry("Absolute", expires_on=expires_on) + properties = await file_client.get_file_properties() + + # Assert + self.assertTrue(properties) + self.assertIsNotNone(properties.expiry_time) + + @record + def test_set_expiry_async(self): + loop = asyncio.get_event_loop() + loop.run_until_complete(self._test_set_expiry()) + async def _test_rename_file_with_non_used_name(self): file_client = await self._create_file_and_return_client() data_bytes = b"abc" diff --git a/sdk/storage/azure-storage-file-datalake/tests/test_large_file.py b/sdk/storage/azure-storage-file-datalake/tests/test_large_file.py new file mode 100644 index 000000000000..272dd233b814 --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/tests/test_large_file.py @@ -0,0 +1,180 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import unittest +from os import urandom + +import pytest +import re +from azure.core.pipeline.policies import HTTPPolicy + +from azure.core.exceptions import ResourceExistsError +from azure.storage.blob._shared.base_client import format_shared_key_credential +from azure.storage.filedatalake import DataLakeServiceClient +from testcase import ( + StorageTestCase, + record, + TestMode +) + +# ------------------------------------------------------------------------------ +TEST_DIRECTORY_PREFIX = 'directory' +TEST_FILE_PREFIX = 'file' +FILE_PATH = 'file_output.temp.dat' +LARGEST_BLOCK_SIZE = 4000 * 1024 * 1024 +# ------------------------------------------------------------------------------ + + +class LargeFileTest(StorageTestCase): + def setUp(self): + super(LargeFileTest, self).setUp() + url = self._get_account_url() + self.payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([self.settings.STORAGE_DATA_LAKE_ACCOUNT_NAME, "dummy"], + self.settings.STORAGE_DATA_LAKE_ACCOUNT_KEY) + self.dsc = DataLakeServiceClient(url, + credential=self.settings.STORAGE_DATA_LAKE_ACCOUNT_KEY, + logging_enable=True, + _additional_pipeline_policies=[self.payload_dropping_policy, credential_policy]) + self.config = self.dsc._config + + self.file_system_name = self.get_resource_name('filesystem') + + if not self.is_playback(): + file_system = self.dsc.get_file_system_client(self.file_system_name) + try: + file_system.create_file_system(timeout=5) + except ResourceExistsError: + pass + + def tearDown(self): + if not self.is_playback(): + try: + self.dsc.delete_file_system(self.file_system_name) + except: + pass + + return super(LargeFileTest, self).tearDown() + + @pytest.mark.live_test_only + def test_append_large_stream_without_network(self): + directory_name = self.get_resource_name(TEST_DIRECTORY_PREFIX) + + # Create a directory to put the file under that + directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name) + directory_client.create_directory() + + file_client = directory_client.get_file_client('filename') + file_client.create_file() + + data = LargeStream(LARGEST_BLOCK_SIZE) + + # Act + response = file_client.append_data(data, 0, LARGEST_BLOCK_SIZE) + + self.assertIsNotNone(response) + self.assertEqual(self.payload_dropping_policy.append_counter, 1) + self.assertEqual(self.payload_dropping_policy.append_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + def test_upload_large_stream_without_network(self): + directory_name = self.get_resource_name(TEST_DIRECTORY_PREFIX) + + # Create a directory to put the file under that + directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name) + directory_client.create_directory() + + file_client = directory_client.get_file_client('filename') + file_client.create_file() + + length = 2*LARGEST_BLOCK_SIZE + data = LargeStream(length) + + # Act + response = file_client.upload_data(data, length, overwrite=True, chunk_size = LARGEST_BLOCK_SIZE) + + self.assertIsNotNone(response) + self.assertEqual(self.payload_dropping_policy.append_counter, 2) + self.assertEqual(self.payload_dropping_policy.append_sizes[0], LARGEST_BLOCK_SIZE) + self.assertEqual(self.payload_dropping_policy.append_sizes[1], LARGEST_BLOCK_SIZE) + + +class LargeStream: + def __init__(self, length, initial_buffer_length=1024*1024): + self._base_data = urandom(initial_buffer_length) + self._base_data_length = initial_buffer_length + self._position = 0 + self._remaining = length + + def read(self, size=None): + if self._remaining == 0: + return b"" + + if size is None: + e = self._base_data_length + else: + e = size + e = min(e, self._remaining) + if e > self._base_data_length: + self._base_data = urandom(e) + self._base_data_length = e + self._remaining = self._remaining - e + return self._base_data[:e] + + def remaining(self): + return self._remaining + + +class PayloadDroppingPolicy(HTTPPolicy): + def __init__(self): + super().__init__() + self.append_counter = 0 + self.append_sizes = [] + self.dummy_body = "dummy_body" + + def send(self, request): # type: (PipelineRequest) -> PipelineResponse + if _is_append_request(request): + if request.http_request.body: + position = self.append_counter*len(self.dummy_body) + request.http_request.url = re.sub(r'position=\d+', "position=" + str(position), request.http_request.url) + self.append_sizes.append(_get_body_length(request)) + replacement = self.dummy_body + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + self.append_counter = self.append_counter + 1 + if _is_flush_request(request): + position = self.append_counter * len(self.dummy_body) + request.http_request.url = re.sub(r'position=\d+', "position=" + str(position), request.http_request.url) + return self.next.send(request) + + +def _is_append_request(request): + query = request.http_request.query + return query and "action" in query and query["action"] == "append" + + +def _is_flush_request(request): + query = request.http_request.query + return query and "action" in query and query["action"] == "flush" + + +def _get_body_length(request): + body = request.http_request.body + length = 0 + if hasattr(body, "read"): + chunk = body.read(10*1024*1024) + while chunk: + length = length + len(chunk) + chunk = body.read(10 * 1024 * 1024) + else: + length = len(body) + return length + + +# ------------------------------------------------------------------------------ +if __name__ == '__main__': + unittest.main() diff --git a/sdk/storage/azure-storage-file-datalake/tests/test_large_file_async.py b/sdk/storage/azure-storage-file-datalake/tests/test_large_file_async.py new file mode 100644 index 000000000000..2bfdef448ab2 --- /dev/null +++ b/sdk/storage/azure-storage-file-datalake/tests/test_large_file_async.py @@ -0,0 +1,207 @@ +# coding: utf-8 + +# ------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# -------------------------------------------------------------------------- +import re +import unittest +import asyncio +from io import BytesIO +from os import urandom + +import pytest + +from azure.core.exceptions import ResourceExistsError +from azure.core.pipeline.policies import SansIOHTTPPolicy +from azure.storage.blob._shared.base_client import format_shared_key_credential +from azure.storage.filedatalake.aio import DataLakeServiceClient +from testcase import ( + StorageTestCase, + record, + TestMode +) + +# ------------------------------------------------------------------------------ +TEST_DIRECTORY_PREFIX = 'directory' +TEST_FILE_PREFIX = 'file' +FILE_PATH = 'file_output.temp.dat' +LARGEST_BLOCK_SIZE = 4000 * 1024 * 1024 +# ------------------------------------------------------------------------------ + + +class LargeFileTest(StorageTestCase): + def setUp(self): + super(LargeFileTest, self).setUp() + url = self._get_account_url() + self.payload_dropping_policy = PayloadDroppingPolicy() + credential_policy = format_shared_key_credential([self.settings.STORAGE_DATA_LAKE_ACCOUNT_NAME, "dummy"], + self.settings.STORAGE_DATA_LAKE_ACCOUNT_KEY) + self.dsc = DataLakeServiceClient(url, + credential=self.settings.STORAGE_DATA_LAKE_ACCOUNT_KEY, + _additional_pipeline_policies=[self.payload_dropping_policy, credential_policy]) + loop = asyncio.get_event_loop() + loop.run_until_complete(self.dsc.__aenter__()) + + self.config = self.dsc._config + + self.file_system_name = self.get_resource_name('filesystem') + + if not self.is_playback(): + file_system = self.dsc.get_file_system_client(self.file_system_name) + try: + loop = asyncio.get_event_loop() + loop.run_until_complete(file_system.create_file_system(timeout=5)) + + except ResourceExistsError: + pass + + def tearDown(self): + if not self.is_playback(): + try: + loop = asyncio.get_event_loop() + loop.run_until_complete(self.dsc.delete_file_system(self.file_system_name)) + loop.run_until_complete(self.dsc.__aexit__()) + except: + pass + + return super(LargeFileTest, self).tearDown() + + # --Helpers----------------------------------------------------------------- + def _get_directory_reference(self, prefix=TEST_DIRECTORY_PREFIX): + directory_name = self.get_resource_name(prefix) + return directory_name + + # --Helpers----------------------------------------------------------------- + + async def _test_append_large_stream_without_network(self): + directory_name = self._get_directory_reference() + + # Create a directory to put the file under that + directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name) + await directory_client.create_directory() + + file_client = directory_client.get_file_client('filename') + await file_client.create_file() + + data = LargeStream(LARGEST_BLOCK_SIZE) + + # Act + response = await file_client.append_data(data, 0, LARGEST_BLOCK_SIZE) + + self.assertIsNotNone(response) + self.assertEqual(self.payload_dropping_policy.append_counter, 1) + self.assertEqual(self.payload_dropping_policy.append_sizes[0], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + def test_append_large_stream_without_network(self): + loop = asyncio.get_event_loop() + loop.run_until_complete(self._test_append_large_stream_without_network()) + + async def _test_upload_large_stream_without_network(self): + directory_name = self.get_resource_name(TEST_DIRECTORY_PREFIX) + + # Create a directory to put the file under that + directory_client = self.dsc.get_directory_client(self.file_system_name, directory_name) + await directory_client.create_directory() + + file_client = directory_client.get_file_client('filename') + await file_client.create_file() + + length = 2*LARGEST_BLOCK_SIZE + data = LargeStream(length) + + # Act + response = await file_client.upload_data(data, length, overwrite=True, chunk_size = LARGEST_BLOCK_SIZE) + + self.assertIsNotNone(response) + self.assertEqual(self.payload_dropping_policy.append_counter, 2) + self.assertEqual(self.payload_dropping_policy.append_sizes[0], LARGEST_BLOCK_SIZE) + self.assertEqual(self.payload_dropping_policy.append_sizes[1], LARGEST_BLOCK_SIZE) + + @pytest.mark.live_test_only + def test_upload_large_stream_without_network(self): + loop = asyncio.get_event_loop() + loop.run_until_complete(self._test_upload_large_stream_without_network()) + + +class LargeStream(BytesIO): + def __init__(self, length, initial_buffer_length=1024 * 1024): + super().__init__() + self._base_data = urandom(initial_buffer_length) + self._base_data_length = initial_buffer_length + self._position = 0 + self._remaining = length + self._closed = False + + def read(self, size=None): + if self._remaining == 0: + return b"" + + if size is None: + e = self._base_data_length + else: + e = size + e = min(e, self._remaining) + if e > self._base_data_length: + self._base_data = urandom(e) + self._base_data_length = e + self._remaining = self._remaining - e + return self._base_data[:e] + + def remaining(self): + return self._remaining + + def close(self): + self._closed = True + + +class PayloadDroppingPolicy(SansIOHTTPPolicy): + def __init__(self): + super().__init__() + self.append_counter = 0 + self.append_sizes = [] + self.dummy_body = "dummy_body" + + def on_request(self, request): # type: (PipelineRequest) -> Union[None, Awaitable[None]] + if _is_append_request(request): + if request.http_request.body: + position = self.append_counter*len(self.dummy_body) + request.http_request.url = re.sub(r'position=\d+', "position=" + str(position), request.http_request.url) + self.append_sizes.append(_get_body_length(request)) + replacement = self.dummy_body + request.http_request.body = replacement + request.http_request.headers["Content-Length"] = str(len(replacement)) + self.append_counter = self.append_counter + 1 + elif _is_flush_request(request): + position = self.append_counter * len(self.dummy_body) + request.http_request.url = re.sub(r'position=\d+', "position=" + str(position), request.http_request.url) + + +def _is_append_request(request): + query = request.http_request.query + return query and "action" in query and query["action"] == "append" + + +def _is_flush_request(request): + query = request.http_request.query + return query and "action" in query and query["action"] == "flush" + + +def _get_body_length(request): + body = request.http_request.body + length = 0 + if hasattr(body, "read"): + chunk = body.read(10*1024*1024) + while chunk: + length = length + len(chunk) + chunk = body.read(10 * 1024 * 1024) + else: + length = len(body) + return length + + +# ------------------------------------------------------------------------------ +if __name__ == '__main__': + unittest.main() diff --git a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client.py b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client.py index d5aa27fab499..53348f8ecc22 100644 --- a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client.py +++ b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client.py @@ -246,6 +246,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs) ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, Pipeline(config.transport, policies=policies) def _batch_send( diff --git a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client_async.py b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client_async.py index 177225191739..d252ad063fb6 100644 --- a/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client_async.py +++ b/sdk/storage/azure-storage-file-share/azure/storage/fileshare/_shared/base_client_async.py @@ -102,6 +102,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs), ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, AsyncPipeline(config.transport, policies=policies) async def _batch_send( diff --git a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client.py b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client.py index b81ced71e5ee..f7656f89b73f 100644 --- a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client.py +++ b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client.py @@ -246,6 +246,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs) ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, Pipeline(config.transport, policies=policies) def _batch_send( diff --git a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client_async.py b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client_async.py index 177225191739..d252ad063fb6 100644 --- a/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client_async.py +++ b/sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client_async.py @@ -102,6 +102,8 @@ def _create_pipeline(self, credential, **kwargs): DistributedTracingPolicy(**kwargs), HttpLoggingPolicy(**kwargs), ] + if kwargs.get("_additional_pipeline_policies"): + policies = policies + kwargs.get("_additional_pipeline_policies") return config, AsyncPipeline(config.transport, policies=policies) async def _batch_send(