about summary refs log tree commit diff
path: root/.venv/lib/python3.12/site-packages/azure/storage/blob/aio
diff options
context:
space:
mode:
authorS. Solomon Darnell2025-03-28 21:52:21 -0500
committerS. Solomon Darnell2025-03-28 21:52:21 -0500
commit4a52a71956a8d46fcb7294ac71734504bb09bcc2 (patch)
treeee3dc5af3b6313e921cd920906356f5d4febc4ed /.venv/lib/python3.12/site-packages/azure/storage/blob/aio
parentcc961e04ba734dd72309fb548a2f97d67d578813 (diff)
downloadgn-ai-master.tar.gz
two version of R2R are here HEAD master
Diffstat (limited to '.venv/lib/python3.12/site-packages/azure/storage/blob/aio')
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py166
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py3215
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py799
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py1611
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py872
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py72
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py346
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py249
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py199
-rw-r--r--.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py334
10 files changed, 7863 insertions, 0 deletions
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py
new file mode 100644
index 00000000..a755e6a2
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/__init__.py
@@ -0,0 +1,166 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import os
+
+from typing import Any, AnyStr, Dict, cast, IO, Iterable, Optional, Union, TYPE_CHECKING
+from ._list_blobs_helper import BlobPrefix
+from .._models import BlobType
+from .._shared.policies_async import ExponentialRetry, LinearRetry
+from ._blob_client_async import BlobClient
+from ._container_client_async import ContainerClient
+from ._blob_service_client_async import BlobServiceClient
+from ._lease_async import BlobLeaseClient
+from ._download_async import StorageStreamDownloader
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+
+
+async def upload_blob_to_url(
+    blob_url: str,
+    data: Union[Iterable[AnyStr], IO[AnyStr]],
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+    **kwargs: Any
+) -> Dict[str, Any]:
+    """Upload data to a given URL
+
+    The data will be uploaded as a block blob.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param data:
+        The data to upload. This can be bytes, text, an iterable or a file-like object.
+    :type data: bytes or str or Iterable
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the blob to be uploaded should overwrite the current data.
+        If True, upload_blob_to_url will overwrite any existing data. If set to False, the
+        operation will fail with a ResourceExistsError.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword dict(str,str) metadata:
+        Name-value pairs associated with the blob as metadata.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :keyword str encoding:
+        Encoding to use if text is supplied as input. Defaults to UTF-8.
+    :returns: Blob-updated property dict (Etag and last modified)
+    :rtype: dict[str, Any]
+    """
+    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        return await cast(BlobClient, client).upload_blob(
+            data=data,
+            blob_type=BlobType.BLOCKBLOB,
+            **kwargs)
+
+
+# Download data to specified open file-handle.
+async def _download_to_stream(client, handle, **kwargs):
+    stream = await client.download_blob(**kwargs)
+    await stream.readinto(handle)
+
+
+async def download_blob_from_url(
+    blob_url: str,
+    output: str,
+    credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None, # pylint: disable=line-too-long
+    **kwargs: Any
+) -> None:
+    """Download the contents of a blob to a local file or stream.
+
+    :param str blob_url:
+        The full URI to the blob. This can also include a SAS token.
+    :param output:
+        Where the data should be downloaded to. This could be either a file path to write to,
+        or an open IO handle to write to.
+    :type output: str or writable stream
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        blob URL already has a SAS token or the blob is public. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :type credential:
+        ~azure.core.credentials.AzureNamedKeyCredential or
+        ~azure.core.credentials.AzureSasCredential or
+        ~azure.core.credentials.TokenCredential or
+        str or dict[str, str] or None
+    :keyword bool overwrite:
+        Whether the local file should be overwritten if it already exists. The default value is
+        `False` - in which case a ValueError will be raised if the file already exists. If set to
+        `True`, an attempt will be made to write to the existing file. If a stream handle is passed
+        in, this value is ignored.
+    :keyword int max_concurrency:
+        The number of parallel connections with which to download.
+    :keyword int offset:
+        Start of byte range to use for downloading a section of the blob.
+        Must be set if length is provided.
+    :keyword int length:
+        Number of bytes to read from the stream. This is optional, but
+        should be supplied for optimal performance.
+    :keyword bool validate_content:
+        If true, calculates an MD5 hash for each chunk of the blob. The storage
+        service checks the hash of the content that has arrived with the hash
+        that was sent. This is primarily valuable for detecting bitflips on
+        the wire if using http instead of https as https (the default) will
+        already validate. Note that this MD5 hash is not stored with the
+        blob. Also note that if enabled, the memory-efficient upload algorithm
+        will not be used, because computing the MD5 hash requires buffering
+        entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+    :rtype: None
+    """
+    overwrite = kwargs.pop('overwrite', False)
+    async with BlobClient.from_blob_url(blob_url, credential=credential) as client:
+        if hasattr(output, 'write'):
+            await _download_to_stream(client, output, **kwargs)
+        else:
+            if not overwrite and os.path.isfile(output):
+                raise ValueError(f"The file '{output}' already exists.")
+            with open(output, 'wb') as file_handle:
+                await _download_to_stream(client, file_handle, **kwargs)
+
+
+__all__ = [
+    'upload_blob_to_url',
+    'download_blob_from_url',
+    'BlobServiceClient',
+    'BlobPrefix',
+    'ContainerClient',
+    'BlobClient',
+    'BlobLeaseClient',
+    'ExponentialRetry',
+    'LinearRetry',
+    'StorageStreamDownloader'
+]
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py
new file mode 100644
index 00000000..7cb07448
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_client_async.py
@@ -0,0 +1,3215 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import warnings
+from datetime import datetime
+from functools import partial
+from typing import (
+    Any, AnyStr, AsyncIterable, cast, Dict, IO, Iterable, List, Optional, overload, Tuple, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import ResourceNotFoundError, HttpResponseError, ResourceExistsError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._download_async import StorageStreamDownloader
+from ._lease_async import BlobLeaseClient
+from ._models import PageRangePaged
+from ._upload_helpers import (
+    upload_append_blob,
+    upload_block_blob,
+    upload_page_blob
+)
+from .._blob_client import StorageAccountHostsMixin
+from .._blob_client_helpers import (
+    _abort_copy_options,
+    _append_block_from_url_options,
+    _append_block_options,
+    _clear_page_options,
+    _commit_block_list_options,
+    _create_append_blob_options,
+    _create_page_blob_options,
+    _create_snapshot_options,
+    _delete_blob_options,
+    _download_blob_options,
+    _format_url,
+    _from_blob_url,
+    _get_blob_tags_options,
+    _get_block_list_result,
+    _get_page_ranges_options,
+    _parse_url,
+    _resize_blob_options,
+    _seal_append_blob_options,
+    _set_blob_metadata_options,
+    _set_blob_tags_options,
+    _set_http_headers_options,
+    _set_sequence_number_options,
+    _stage_block_from_url_options,
+    _stage_block_options,
+    _start_copy_from_url_options,
+    _upload_blob_from_url_options,
+    _upload_blob_options,
+    _upload_page_options,
+    _upload_pages_from_url_options
+)
+from .._deserialize import (
+    deserialize_blob_properties,
+    deserialize_pipeline_response_into_cls,
+    get_page_ranges_result,
+    parse_tags
+)
+from .._encryption import StorageEncryptionMixin, _ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import CpkInfo
+from .._models import BlobType, BlobBlock, BlobProperties, PageRange
+from .._serialize import get_access_conditions, get_api_version, get_modify_conditions, get_version_id
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.response_handlers import process_storage_error, return_response_headers
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.policies import AsyncHTTPPolicy
+    from azure.storage.blob.aio import ContainerClient
+    from .._models import (
+        ContentSettings,
+        ImmutabilityPolicy,
+        PremiumPageBlobTier,
+        SequenceNumberAction,
+        StandardBlobTier
+    )
+
+
+class BlobClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin, StorageEncryptionMixin):  # type: ignore [misc] # pylint: disable=too-many-public-methods
+    """A client to interact with a specific blob, although that blob may not yet exist.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the blob,
+        use the :func:`from_blob_url` classmethod.
+    :param container_name: The container name for the blob.
+    :type container_name: str
+    :param blob_name: The name of the blob with which to interact. If specified, this value will override
+        a blob value specified in the blob URL.
+    :type blob_name: str
+    :param str snapshot:
+        The optional blob snapshot on which to operate. This can be the snapshot ID string
+        or the response returned from :func:`create_snapshot`.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+        specifies the version of the blob to operate on.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_client]
+            :end-before: [END create_blob_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a URL to a public blob (no auth needed).
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_client_sas_url]
+            :end-before: [END create_blob_client_sas_url]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobClient from a SAS URL to a blob.
+    """
+    def __init__(
+            self, account_url: str,
+            container_name: str,
+            blob_name: str,
+            snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+            credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+            **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token, path_snapshot = _parse_url(
+            account_url=account_url,
+            container_name=container_name,
+            blob_name=blob_name)
+        self.container_name = container_name
+        self.blob_name = blob_name
+
+        if snapshot is not None and hasattr(snapshot, 'snapshot'):
+            self.snapshot = snapshot.snapshot
+        elif isinstance(snapshot, dict):
+            self.snapshot = snapshot['snapshot']
+        else:
+            self.snapshot = snapshot or path_snapshot
+        self.version_id = kwargs.pop('version_id', None)
+
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential, snapshot=self.snapshot)
+        super(BlobClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname: str) -> str:
+        return _format_url(
+            container_name=self.container_name,
+            scheme=self.scheme,
+            blob_name=self.blob_name,
+            query_str=self._query_str,
+            hostname=hostname
+        )
+
+    @classmethod
+    def from_blob_url(
+        cls, blob_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a blob url. This doesn't support customized blob url with '/' in blob name.
+
+        :param str blob_url:
+            The full endpoint URL to the Blob, including SAS token and snapshot if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type blob_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]]  # pylint: disable=line-too-long
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`. If specified, this will override
+            the snapshot in the url.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+        """
+        account_url, container_name, blob_name, path_snapshot = _from_blob_url(blob_url=blob_url, snapshot=snapshot)
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=path_snapshot, credential=credential, **kwargs
+        )
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        blob_name: str,
+        snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name: The container name for the blob.
+        :type container_name: str
+        :param blob_name: The name of the blob with which to interact.
+        :type blob_name: str
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`create_snapshot`.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]]  # pylint: disable=line-too-long
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob client.
+        :rtype: ~azure.storage.blob.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_blob]
+                :end-before: [END auth_from_connection_string_blob]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, blob_name=blob_name,
+            snapshot=snapshot, credential=credential, **kwargs
+        )
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account in which the blob resides.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return cast(Dict[str, str],
+                        await self._client.blob.get_account_info(cls=return_response_headers, **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_blob_from_url(
+        self, source_url: str,
+        *,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        Creates a new Block Blob where the content of the blob is read from a given URL.
+        The content of an existing blob is overwritten with the new blob.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            The source must either be public or must be authenticated via a shared
+            access signature as part of the url or using the source_authorization keyword.
+            If the source is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :keyword dict(str, str) metadata:
+            Name-value pairs associated with the blob as metadata.
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError.
+        :keyword bool include_source_blob_properties:
+            Indicates if properties from the source blob should be copied. Defaults to True.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :paramtype tags: dict(str, str)
+        :keyword bytearray source_content_md5:
+            Specify the md5 that is used to verify the integrity of the source bytes.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Response from creating a new block blob for a given URL.
+        :rtype: Dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.put_blob_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_blob(
+        self, data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[bytes]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            If specified, upload_blob only succeeds if the
+            blob's lease is active and matches this ID.
+            Required if the blob has an active lease.
+        :paramtype: ~azure.storage.blob.aio.BlobLeaseClient
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+            Currently this parameter of upload_blob() API is for BlockBlob only.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START upload_a_blob]
+                :end-before: [END upload_a_blob]
+                :language: python
+                :dedent: 16
+                :caption: Upload a blob to the container.
+        """
+        if self.require_encryption and not self.key_encryption_key:
+            raise ValueError("Encryption required but no key was provided.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_blob_options(
+            data=data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        if blob_type == BlobType.BlockBlob:
+            return cast(Dict[str, Any], await upload_block_blob(**options))
+        if blob_type == BlobType.PageBlob:
+            return cast(Dict[str, Any], await upload_page_blob(**options))
+        return cast(Dict[str, Any], await upload_append_blob(**options))
+
+    @overload
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace_async
+    async def download_blob(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when transferring the blob in chunks.
+            This option does not affect the underlying connection pool, and may
+            require a separate configuration of the connection pool.
+        :keyword str encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START download_a_blob]
+                :end-before: [END download_a_blob]
+                :language: python
+                :dedent: 16
+                :caption: Download a blob.
+        """
+        if self.require_encryption and not (self.key_encryption_key or self.key_resolver_function):
+            raise ValueError("Encryption required but no key was provided.")
+        if length is not None and offset is None:
+            raise ValueError("Offset value must not be None if length is set.")
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _download_blob_options(
+            blob_name=self.blob_name,
+            container_name=self.container_name,
+            version_id=get_version_id(self.version_id, kwargs),
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            encryption_options={
+                'required': self.require_encryption,
+                'version': self.encryption_version,
+                'key': self.key_encryption_key,
+                'resolver': self.key_resolver_function
+            },
+            config=self._config,
+            sdk_moniker=self._sdk_moniker,
+            client=self._client,
+            **kwargs)
+        downloader = StorageStreamDownloader(**options)
+        await downloader._setup()  # pylint: disable=protected-access
+        return downloader
+
+    @distributed_trace_async
+    async def delete_blob(self, delete_snapshots: Optional[str] = None, **kwargs: Any) -> None:
+        """Marks the specified blob for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob()
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob
+        and retains the blob for a specified number of days.
+        After the specified number of days, the blob's data is removed from the service during garbage collection.
+        Soft deleted blob is accessible through :func:`~ContainerClient.list_blobs()` specifying `include=['deleted']`
+        option. Soft-deleted blob can be restored using :func:`undelete` operation.
+
+        :param str delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. If specified, delete_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START delete_blob]
+                :end-before: [END delete_blob]
+                :language: python
+                :dedent: 16
+                :caption: Delete a blob.
+        """
+        options = _delete_blob_options(
+            snapshot=self.snapshot,
+            version_id=get_version_id(self.version_id, kwargs),
+            delete_snapshots=delete_snapshots,
+            **kwargs)
+        try:
+            await self._client.blob.delete(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def undelete_blob(self, **kwargs: Any) -> None:
+        """Restores soft-deleted blobs or snapshots.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        If blob versioning is enabled, the base blob cannot be restored using this
+        method. Instead use :func:`start_copy_from_url` with the URL of the blob version
+        you wish to promote to the current version.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START undelete_blob]
+                :end-before: [END undelete_blob]
+                :language: python
+                :dedent: 12
+                :caption: Undeleting a blob.
+        """
+        try:
+            await self._client.blob.undelete(timeout=kwargs.pop('timeout', None), **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a blob exists with the defined parameters, and returns
+        False otherwise.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        try:
+            await self._client.blob.get_properties(
+                snapshot=self.snapshot,
+                version_id=version_id,
+                **kwargs)
+            return True
+        # Encrypted with CPK
+        except ResourceExistsError:
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def get_blob_properties(self, **kwargs: Any) -> BlobProperties:
+        """Returns all user-defined metadata, standard HTTP properties, and
+        system properties for the blob. It does not return the content of the blob.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to get properties.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: BlobProperties
+        :rtype: ~azure.storage.blob.BlobProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START get_blob_properties]
+                :end-before: [END get_blob_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting the properties for a blob.
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        cpk = kwargs.pop('cpk', None)
+        cpk_info = None
+        if cpk:
+            if self.scheme.lower() != 'https':
+                raise ValueError("Customer provided encryption key must be used over HTTPS.")
+            cpk_info = CpkInfo(encryption_key=cpk.key_value, encryption_key_sha256=cpk.key_hash,
+                               encryption_algorithm=cpk.algorithm)
+        try:
+            cls_method = kwargs.pop('cls', None)
+            if cls_method:
+                kwargs['cls'] = partial(deserialize_pipeline_response_into_cls, cls_method)
+            blob_props = await self._client.blob.get_properties(
+                timeout=kwargs.pop('timeout', None),
+                version_id=version_id,
+                snapshot=self.snapshot,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=kwargs.pop('cls', None) or deserialize_blob_properties,
+                cpk_info=cpk_info,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        blob_props.name = self.blob_name
+        if isinstance(blob_props, BlobProperties):
+            blob_props.container = self.container_name
+            blob_props.snapshot = self.snapshot
+        return cast(BlobProperties, blob_props)
+
+    @distributed_trace_async
+    async def set_http_headers(
+        self, content_settings: Optional["ContentSettings"] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Sets system properties on the blob.
+
+        If one property is set for the content_settings, all properties will be overridden.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        options = _set_http_headers_options(content_settings=content_settings, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.set_http_headers(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_blob_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets user-defined metadata for the blob as one or more name-value pairs.
+
+        :param metadata:
+            Dict containing name and value pairs. Each call to this operation
+            replaces all existing metadata attached to the blob. To remove all
+            metadata from the blob, call this operation with no metadata headers.
+        :type metadata: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Union[str, datetime]]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _set_blob_metadata_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.blob.set_metadata(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_immutability_policy(
+        self, immutability_policy: "ImmutabilityPolicy",
+        **kwargs: Any
+    ) -> Dict[str, str]:
+        """The Set Immutability Policy operation sets the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        kwargs['immutability_policy_expiry'] = immutability_policy.expiry_time
+        kwargs['immutability_policy_mode'] = immutability_policy.policy_mode
+        return cast(Dict[str, str], await self._client.blob.set_immutability_policy(
+            cls=return_response_headers,version_id=version_id, **kwargs))
+
+    @distributed_trace_async
+    async def delete_immutability_policy(self, **kwargs: Any) -> None:
+        """The Delete Immutability Policy operation deletes the immutability policy on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        await self._client.blob.delete_immutability_policy(version_id=version_id, **kwargs)
+
+    @distributed_trace_async
+    async def set_legal_hold(self, legal_hold: bool, **kwargs: Any) -> Dict[str, Union[str, datetime, bool]]:
+        """The Set Legal Hold operation sets a legal hold on the blob.
+
+        .. versionadded:: 12.10.0
+            This operation was introduced in API version '2020-10-02'.
+
+        :param bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to check if it exists.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, Union[str, datetime, bool]]
+        """
+
+        version_id = get_version_id(self.version_id, kwargs)
+        return cast(Dict[str, Union[str, datetime, bool]], await self._client.blob.set_legal_hold(
+            legal_hold, version_id=version_id, cls=return_response_headers, **kwargs))
+
+    @distributed_trace_async
+    async def create_page_blob(
+        self, size: int,
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        premium_page_blob_tier: Optional[Union[str, "PremiumPageBlobTier"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Page Blob of the specified size.
+
+        :param int size:
+            This specifies the maximum size for the page blob, up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :param ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword int sequence_number:
+            Only for Page blobs. The sequence number is a user-controlled value that you can use to
+            track requests. The value of the sequence number must be between 0
+            and 2^63 - 1.The default value is 0.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_page_blob_options(
+            size=size,
+            content_settings=content_settings,
+            metadata=metadata,
+            premium_page_blob_tier=premium_page_blob_tier,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_append_blob(
+        self, content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a new Append Blob. This operation creates a new 0-length append blob. The content
+        of any existing blob is overwritten with the newly initialized append blob. To add content to
+        the append blob, call the :func:`append_block` or :func:`append_block_from_url` method.
+
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_append_blob_options(
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.append_blob.create(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def create_snapshot(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Creates a snapshot of the blob.
+
+        A snapshot is a read-only version of a blob that's taken at a point in time.
+        It can be read, copied, or deleted, but not modified. Snapshots provide a way
+        to back up a blob as it appears at a moment in time.
+
+        A snapshot of a blob has the same name as the base blob from which the snapshot
+        is taken, with a DateTime value appended to indicate the time at which the
+        snapshot was taken.
+
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Snapshot ID, Etag, and last modified).
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START create_blob_snapshot]
+                :end-before: [END create_blob_snapshot]
+                :language: python
+                :dedent: 12
+                :caption: Create a snapshot of the blob.
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _create_snapshot_options(metadata=metadata, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.create_snapshot(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def start_copy_from_url(
+        self, source_url: str,
+        metadata: Optional[Dict[str, str]] = None,
+        incremental_copy: bool = False,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Copies a blob from the given URL.
+
+        This operation returns a dictionary containing `copy_status` and `copy_id`,
+        which can be used to check the status of or abort the copy operation.
+        `copy_status` will be 'success' if the copy completed synchronously or
+        'pending' if the copy has been started asynchronously. For asynchronous copies,
+        the status can be checked by polling the :func:`get_blob_properties` method and
+        checking the copy status. Set `requires_sync` to True to force the copy to be synchronous.
+        The Blob service copies blobs on a best-effort basis.
+
+        The source blob for a copy operation may be a block blob, an append blob,
+        or a page blob. If the destination blob already exists, it must be of the
+        same blob type as the source blob. Any existing destination blob will be
+        overwritten. The destination blob cannot be modified while a copy operation
+        is in progress.
+
+        When copying from a page blob, the Blob service creates a destination page
+        blob of the source blob's length, initially containing all zeroes. Then
+        the source page ranges are enumerated, and non-empty ranges are copied.
+
+        For a block blob or an append blob, the Blob service creates a committed
+        blob of zero length before returning from this operation. When copying
+        from a block blob, all committed blocks and their block IDs are copied.
+        Uncommitted blocks are not copied. At the end of the copy operation, the
+        destination blob will have the same committed block count as the source.
+
+        When copying from an append blob, all committed blocks are copied. At the
+        end of the copy operation, the destination blob will have the same committed
+        block count as the source.
+
+        :param str source_url:
+            A URL of up to 2 KB in length that specifies a file or blob.
+            The value should be URL-encoded as it would appear in a request URI.
+            If the source is in another account, the source must either be public
+            or must be authenticated via a shared access signature. If the source
+            is public, no authentication is required.
+            Examples:
+            https://myaccount.blob.core.windows.net/mycontainer/myblob
+
+            https://myaccount.blob.core.windows.net/mycontainer/myblob?snapshot=<DateTime>
+
+            https://otheraccount.blob.core.windows.net/mycontainer/myblob?sastoken
+        :param metadata:
+            Name-value pairs associated with the blob as metadata. If no name-value
+            pairs are specified, the operation will copy the metadata from the
+            source blob or file to the destination blob. If one or more name-value
+            pairs are specified, the destination blob is created with the specified
+            metadata, and metadata is not copied from the source blob or file.
+        :type metadata: dict(str, str)
+        :param bool incremental_copy:
+            Copies the snapshot of the source page blob to a destination page blob.
+            The snapshot is copied such that only the differential changes between
+            the previously copied snapshot are transferred to the destination.
+            The copied snapshots are complete copies of the original snapshot and
+            can be read or copied from as usual. Defaults to False.
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_).
+
+            The (case-sensitive) literal "COPY" can instead be passed to copy tags from the source blob.
+            This option is only available when `incremental_copy=False` and `requires_sync=True`.
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str) or Literal["COPY"]
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source
+            blob has been modified since the specified date/time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only if the source blob
+            has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has been modified since the specified date/time.
+            If the destination blob has not been modified, the Blob service returns
+            status code 412 (Precondition Failed).
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this conditional header to copy the blob only
+            if the destination blob has not been modified since the specified
+            date/time. If the destination blob has been modified, the Blob service
+            returns status code 412 (Precondition Failed).
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword destination_lease:
+            The lease ID specified for this header must match the lease ID of the
+            destination blob. If the request does not include the lease ID or it is not
+            valid, the operation fails with status code 412 (Precondition Failed).
+        :paramtype destination_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword source_lease:
+            Specify this to perform the Copy Blob operation only if
+            the lease ID given matches the active lease ID of the source blob.
+        :paramtype source_lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword bool seal_destination_blob:
+            Seal the destination append blob. This operation is only for append blob.
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool requires_sync:
+            Enforces that the service will not return a response until the copy is complete.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string. This option is only available when `incremental_copy` is
+            set to False and `requires_sync` is set to True.
+
+            .. versionadded:: 12.9.0
+
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the sync copied blob. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.10.0
+
+        :returns: A dictionary of copy properties (etag, last_modified, copy_id, copy_status).
+        :rtype: dict[str, Union[str, ~datetime.datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START copy_blob_from_url]
+                :end-before: [END copy_blob_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Copy a blob from a URL.
+        """
+        options = _start_copy_from_url_options(
+            source_url=source_url,
+            metadata=metadata,
+            incremental_copy=incremental_copy,
+            **kwargs)
+        try:
+            if incremental_copy:
+                return cast(Dict[str, Union[str, datetime]], await self._client.page_blob.copy_incremental(**options))
+            return cast(Dict[str, Union[str, datetime]], await self._client.blob.start_copy_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def abort_copy(
+        self, copy_id: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> None:
+        """Abort an ongoing copy operation.
+
+        This will leave a destination blob with zero length and full metadata.
+        This will raise an error if the copy operation has already ended.
+
+        :param copy_id:
+            The copy operation to abort. This can be either an ID, or an
+            instance of BlobProperties.
+        :type copy_id: str or ~azure.storage.blob.BlobProperties
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START abort_copy_blob_from_url]
+                :end-before: [END abort_copy_blob_from_url]
+                :language: python
+                :dedent: 16
+                :caption: Abort copying a blob from URL.
+        """
+        options = _abort_copy_options(copy_id, **kwargs)
+        try:
+            await self._client.blob.abort_copy_from_url(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self, lease_duration: int =-1,
+        lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> BlobLeaseClient:
+        """Requests a new lease.
+
+        If the blob does not have an active lease, the Blob
+        Service creates a lease on the blob and returns a new lease.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob Service
+            returns 400 (Invalid request) if the proposed lease ID is not
+            in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object.
+        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START acquire_lease_on_blob]
+                :end-before: [END acquire_lease_on_blob]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on a blob.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id)
+        await lease.acquire(lease_duration=lease_duration, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def set_standard_blob_tier(self, standard_blob_tier: Union[str, "StandardBlobTier"], **kwargs: Any) -> None:
+        """This operation sets the tier on a block blob.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on the blob. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        version_id = get_version_id(self.version_id, kwargs)
+        if standard_blob_tier is None:
+            raise ValueError("A StandardBlobTier must be specified")
+        try:
+            await self._client.blob.set_tier(
+                tier=standard_blob_tier,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                lease_access_conditions=access_conditions,
+                version_id=version_id,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def stage_block(
+        self, block_id: str,
+        data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param data: The blob data.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length: Size of the block.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob property dict.
+        :rtype: Dict[str, Any]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_options(
+            block_id=block_id,
+            data=data,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.stage_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def stage_block_from_url(
+        self, block_id: str,
+        source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        source_content_md5: Optional[Union[bytes, bytearray]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """Creates a new block to be committed as part of a blob where
+        the contents are read from a URL.
+
+        :param str block_id: A string value that identifies the block.
+             The string should be less than or equal to 64 bytes in size.
+             For a given blob, the block_id must be the same size for each block.
+        :param str source_url: The URL.
+        :param int source_offset:
+            Start of byte range to use for the block.
+            Must be set if source length is provided.
+        :param int source_length: The size of the block in bytes.
+        :param bytearray source_content_md5:
+            Specify the md5 calculated for the range of
+            bytes that must be read from the copy source.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Blob property dict.
+        :rtype: Dict[str, Any]
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _stage_block_from_url_options(
+            block_id=block_id,
+            source_url=source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            source_content_md5=source_content_md5,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.stage_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_block_list(
+        self, block_list_type: str = "committed",
+        **kwargs: Any
+    ) -> Tuple[List[BlobBlock], List[BlobBlock]]:
+        """The Get Block List operation retrieves the list of blocks that have
+        been uploaded as part of a block blob.
+
+        :param str block_list_type:
+            Specifies whether to return the list of committed
+            blocks, the list of uncommitted blocks, or both lists together.
+            Possible values include: 'committed', 'uncommitted', 'all'
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A tuple of two lists - committed and uncommitted blocks
+        :rtype: Tuple[List[BlobBlock], List[BlobBlock]]
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            blocks = await self._client.block_blob.get_block_list(
+                list_type=block_list_type,
+                snapshot=self.snapshot,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return _get_block_list_result(blocks)
+
+    @distributed_trace_async
+    async def commit_block_list(
+        self, block_list: List[BlobBlock],
+        content_settings: Optional["ContentSettings"] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Commit Block List operation writes a blob by specifying the list of
+        block IDs that make up the blob.
+
+        :param list block_list:
+            List of Blockblobs.
+        :param ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict[str, str]
+        :keyword tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+
+            .. versionadded:: 12.4.0
+
+        :paramtype tags: dict(str, str)
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~azure.storage.blob.ImmutabilityPolicy immutability_policy:
+            Specifies the immutability policy of a blob, blob snapshot or blob version.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool legal_hold:
+            Specified if a legal hold should be set on the blob.
+
+            .. versionadded:: 12.10.0
+                This was introduced in API version '2020-10-02'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _commit_block_list_options(
+            block_list=block_list,
+            content_settings=content_settings,
+            metadata=metadata,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.block_blob.commit_block_list(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_premium_page_blob_tier(self, premium_page_blob_tier: "PremiumPageBlobTier", **kwargs: Any) -> None:
+        """Sets the page blob tiers on the blob. This API is only supported for page blobs on premium accounts.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :rtype: None
+        """
+        access_conditions = get_access_conditions(kwargs.pop('lease', None))
+        mod_conditions = get_modify_conditions(kwargs)
+        if premium_page_blob_tier is None:
+            raise ValueError("A PremiumPageBlobTiermust be specified")
+        try:
+            await self._client.blob.set_tier(
+                tier=premium_page_blob_tier,
+                timeout=kwargs.pop('timeout', None),
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_blob_tags(self, tags: Optional[Dict[str, str]] = None, **kwargs: Any) -> Dict[str, Any]:
+        """The Set Tags operation enables users to set tags on a blob or specific blob version, but not snapshot.
+            Each call to this operation replaces all existing tags attached to the blob. To remove all
+            tags from the blob, call this operation with no tags set.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param tags:
+            Name-value pairs associated with the blob as tag. Tags are case-sensitive.
+            The tag set may contain at most 10 tags.  Tag keys must be between 1 and 128 characters,
+            and tag values must be between 0 and 256 characters.
+            Valid tag key and value characters include: lowercase and uppercase letters, digits (0-9),
+            space (' '), plus (+), minus (-), period (.), solidus (/), colon (:), equals (=), underscore (_)
+        :type tags: dict(str, str)
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the tags content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified)
+        :rtype: Dict[str, Any]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _set_blob_tags_options(version_id=version_id, tags=tags, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.blob.set_tags(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_blob_tags(self, **kwargs: Any) -> Dict[str, str]:
+        """The Get Tags operation enables users to get tags on a blob or specific blob version, but not snapshot.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to add tags to.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Key value pairs of blob tags.
+        :rtype: Dict[str, str]
+        """
+        version_id = get_version_id(self.version_id, kwargs)
+        options = _get_blob_tags_options(version_id=version_id, snapshot=self.snapshot, **kwargs)
+        try:
+            _, tags = await self._client.blob.get_tags(**options)
+            return cast(Dict[str, str], parse_tags(tags))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_page_ranges(
+        self, offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot_diff: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """DEPRECATED: Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob.
+
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param str previous_snapshot_diff:
+            The snapshot diff parameter that contains an opaque DateTime value that
+            specifies a previous blob snapshot to be compared
+            against a more recent snapshot or the current blob.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        warnings.warn(
+            "get_page_ranges is deprecated, use list_page_ranges instead",
+            DeprecationWarning
+        )
+
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot_diff,
+            **kwargs)
+        try:
+            if previous_snapshot_diff:
+                ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+            else:
+                ranges = await self._client.page_blob.get_page_ranges(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace
+    def list_page_ranges(
+        self,
+        *,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        previous_snapshot: Optional[Union[str, Dict[str, Any]]] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[PageRange]:
+        """Returns the list of valid page ranges for a Page Blob or snapshot
+        of a page blob. If `previous_snapshot` is specified, the result will be
+        a diff of changes between the target blob and the previous snapshot.
+
+        :keyword int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword previous_snapshot:
+            A snapshot value that specifies that the response will contain only pages that were changed
+            between target blob and previous snapshot. Changed pages include both updated and cleared
+            pages. The target blob may be a snapshot, as long as the snapshot specified by `previous_snapshot`
+            is the older of the two.
+        :paramtype previous_snapshot: str or Dict[str, Any]
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int results_per_page:
+            The maximum number of page ranges to retrieve per API call.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of PageRange.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.PageRange]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            previous_snapshot_diff=previous_snapshot,
+            **kwargs)
+
+        if previous_snapshot:
+            command = partial(
+                self._client.page_blob.get_page_ranges_diff,
+                **options)
+        else:
+            command = partial(
+                self._client.page_blob.get_page_ranges,
+                **options)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=PageRangePaged)
+
+    @distributed_trace_async
+    async def get_page_range_diff_for_managed_disk(
+        self, previous_snapshot_url: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]:
+        """Returns the list of valid page ranges for a managed disk or snapshot.
+
+        .. note::
+            This operation is only available for managed disk accounts.
+
+        .. versionadded:: 12.2.0
+            This operation was introduced in API version '2019-07-07'.
+
+        :param str previous_snapshot_url:
+            Specifies the URL of a previous snapshot of the managed disk.
+            The response will only contain pages that were changed between the target blob and
+            its previous snapshot.
+        :param int offset:
+            Start of byte range to use for getting valid page ranges.
+            If no length is given, all bytes after the offset will be searched.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for getting valid page ranges.
+            If length is given, offset must be provided.
+            This range will return valid page ranges from the offset start up to
+            the specified length.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns:
+            A tuple of two lists of page ranges as dictionaries with 'start' and 'end' keys.
+            The first element are filled page ranges, the 2nd element is cleared page ranges.
+        :rtype: tuple(list(dict(str, str), list(dict(str, str))
+        """
+        options = _get_page_ranges_options(
+            snapshot=self.snapshot,
+            offset=offset,
+            length=length,
+            prev_snapshot_url=previous_snapshot_url,
+            **kwargs)
+        try:
+            ranges = await self._client.page_blob.get_page_ranges_diff(**options)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return get_page_ranges_result(ranges)
+
+    @distributed_trace_async
+    async def set_sequence_number(
+        self, sequence_number_action: Union[str, "SequenceNumberAction"],
+        sequence_number: Optional[str] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the blob sequence number.
+
+        :param str sequence_number_action:
+            This property indicates how the service should modify the blob's sequence
+            number. See :class:`~azure.storage.blob.SequenceNumberAction` for more information.
+        :param str sequence_number:
+            This property sets the blob's sequence number. The sequence number is a
+            user-controlled property that you can use to track requests and manage
+            concurrency issues.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        options = _set_sequence_number_options(sequence_number_action, sequence_number=sequence_number, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.update_sequence_number(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def resize_blob(self, size: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Resizes a page blob to the specified size.
+
+        If the specified value is less than the current size of the blob,
+        then all pages above the specified value are cleared.
+
+        :param int size:
+            Size used to resize blob. Maximum size for a page blob is up to 1 TB.
+            The page blob size must be aligned to a 512-byte boundary.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _resize_blob_options(size=size, **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.resize(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_page(
+        self, page: bytes,
+        offset: int,
+        length: int,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """The Upload Pages operation writes a range of pages to a page blob.
+
+        :param bytes page:
+            Content of the page.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the page content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_page_options(
+            page=page,
+            offset=offset,
+            length=length,
+            **kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.upload_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def upload_pages_from_url(
+        self, source_url: str,
+        offset: int,
+        length: int,
+        source_offset: int,
+        **kwargs: Any
+    ) -> Dict[str, Any]:
+        """
+        The Upload Pages operation writes a range of pages to a page blob where
+        the contents are read from a URL.
+
+        :param str source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+            The service will read the same number of bytes as the destination range (length-offset).
+        :keyword bytes source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Response after uploading pages from specified URL.
+        :rtype: Dict[str, Any]
+        """
+
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _upload_pages_from_url_options(
+            source_url=source_url,
+            offset=offset,
+            length=length,
+            source_offset=source_offset,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.upload_pages_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def clear_page(self, offset: int, length: int, **kwargs: Any) -> Dict[str, Union[str, datetime]]:
+        """Clears a range of pages.
+
+        :param int offset:
+            Start of byte range to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length  must be a modulus of
+            512.
+        :param int length:
+            Number of bytes to use for writing to a section of the blob.
+            Pages must be aligned with 512-byte boundaries, the start offset
+            must be a modulus of 512 and the length must be a modulus of
+            512.
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int if_sequence_number_lte:
+            If the blob's sequence number is less than or equal to
+            the specified value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_lt:
+            If the blob's sequence number is less than the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword int if_sequence_number_eq:
+            If the blob's sequence number is equal to the specified
+            value, the request proceeds; otherwise it fails.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag and last modified).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _clear_page_options(
+            offset=offset,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.page_blob.clear_pages(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def append_block(
+        self, data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]],
+        length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """Commits a new block of data to the end of the existing append blob.
+
+        :param data:
+            Content of the block.
+        :type data: Union[bytes, str, Iterable[AnyStr], IO[AnyStr]]
+        :param int length:
+            Size of the block in bytes.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash of the block content. The storage
+            service checks the hash of the content that has arrived
+            with the hash that was sent. This is primarily valuable for detecting
+            bitflips on the wire if using http instead of https, as https (the default),
+            will already validate. Note that this MD5 hash is not stored with the
+            blob.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_options(
+            data=data,
+            length=length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Any], await self._client.append_blob.append_block(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def append_block_from_url(
+        self, copy_source_url: str,
+        source_offset: Optional[int] = None,
+        source_length: Optional[int] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime, int]]:
+        """
+        Creates a new block to be committed as part of a blob, where the contents are read from a source url.
+
+        :param str copy_source_url:
+            The URL of the source data. It can point to any Azure Blob or File, that is either public or has a
+            shared access signature attached.
+        :param int source_offset:
+            This indicates the start of the range of bytes(inclusive) that has to be taken from the copy source.
+        :param int source_length:
+            This indicates the end of the range of bytes that has to be taken from the copy source.
+        :keyword bytearray source_content_md5:
+            If given, the service will calculate the MD5 hash of the block content and compare against this value.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the
+            AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            The destination ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The destination match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~datetime.datetime source_if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the source resource has been modified since the specified time.
+        :keyword ~datetime.datetime source_if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the source resource has not been modified since the specified date/time.
+        :keyword str source_etag:
+            The source ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions source_match_condition:
+            The source match condition to use upon the etag.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword str source_authorization:
+            Authenticate as a service principal using a client secret to access a source blob. Ensure "bearer " is
+            the prefix of the source_authorization string.
+        :returns: Result after appending a new block.
+        :rtype: Dict[str, Union[str, datetime, int]]
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        if kwargs.get('cpk') and self.scheme.lower() != 'https':
+            raise ValueError("Customer provided encryption key must be used over HTTPS.")
+        options = _append_block_from_url_options(
+            copy_source_url=copy_source_url,
+            source_offset=source_offset,
+            source_length=source_length,
+            **kwargs
+        )
+        try:
+            return cast(Dict[str, Union[str, datetime, int]],
+                        await self._client.append_blob.append_block_from_url(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def seal_append_blob(self, **kwargs: Any) -> Dict[str, Union[str, datetime, int]]:
+        """The Seal operation seals the Append Blob to make it read-only.
+
+            .. versionadded:: 12.4.0
+
+        :keyword int appendpos_condition:
+            Optional conditional header, used only for the Append Block operation.
+            A number indicating the byte offset to compare. Append Block will
+            succeed only if the append position is equal to this number. If it
+            is not, the request will fail with the AppendPositionConditionNotMet error
+            (HTTP status code 412 - Precondition Failed).
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Blob-updated property dict (Etag, last modified, append offset, committed block count).
+        :rtype: dict(str, Any)
+        """
+        if self.require_encryption or (self.key_encryption_key is not None):
+            raise ValueError(_ERROR_UNSUPPORTED_METHOD_FOR_ENCRYPTION)
+        options = _seal_append_blob_options(**kwargs)
+        try:
+            return cast(Dict[str, Any], await self._client.append_blob.seal(**options))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def _get_container_client(self) -> "ContainerClient":
+        """Get a client to interact with the blob's parent container.
+
+        The container need not already exist. Defaults to current blob's credentials.
+
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_client_from_blob_client]
+                :end-before: [END get_container_client_from_blob_client]
+                :language: python
+                :dedent: 12
+                :caption: Get container client from blob object.
+        """
+        from ._container_client_async import ContainerClient
+        if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
+            _pipeline = AsyncPipeline(
+                transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=cast(Iterable["AsyncHTTPPolicy"],
+                              self._pipeline._impl_policies) # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return ContainerClient(
+            f"{self.scheme}://{self.primary_hostname}", container_name=self.container_name,
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py
new file mode 100644
index 00000000..8f76aa98
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_blob_service_client_async.py
@@ -0,0 +1,799 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from typing import (
+    Any, cast, Dict, Iterable, List, Optional, Union,
+    TYPE_CHECKING
+)
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged
+from azure.core.exceptions import HttpResponseError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._blob_client_async import BlobClient
+from ._container_client_async import ContainerClient
+from ._models import ContainerPropertiesPaged, FilteredBlobPaged
+from .._blob_service_client_helpers import _parse_url
+from .._deserialize import service_properties_deserialize, service_stats_deserialize
+from .._encryption import StorageEncryptionMixin
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import StorageServiceProperties, KeyInfo
+from .._models import BlobProperties, ContainerProperties, CorsRule
+from .._serialize import get_api_version
+from .._shared.base_client import parse_query, StorageAccountHostsMixin
+from .._shared.base_client_async import parse_connection_str
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper
+from .._shared.response_handlers import (
+    parse_to_internal_user_delegation_key,
+    process_storage_error,
+    return_response_headers,
+)
+from .._shared.models import LocationMode
+from .._shared.parser import _to_utc_datetime
+from .._shared.policies_async import ExponentialRetry
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from azure.core.pipeline.policies import AsyncHTTPPolicy
+    from datetime import datetime
+    from ._lease_async import BlobLeaseClient
+    from .._models import (
+        BlobAnalyticsLogging,
+        FilteredBlob,
+        Metrics,
+        PublicAccess,
+        RetentionPolicy,
+        StaticWebsite
+    )
+    from .._shared.models import UserDelegationKey
+
+
+class BlobServiceClient(  # type: ignore [misc]
+    AsyncStorageAccountHostsMixin,
+    StorageAccountHostsMixin,
+    StorageEncryptionMixin
+):
+    """A client to interact with the Blob Service at the account level.
+
+    This client provides operations to retrieve and configure the account properties
+    as well as list, create and delete containers within the account.
+    For operations relating to a specific container or blob, clients for those entities
+    can also be retrieved using the `get_client` functions.
+
+    :param str account_url:
+        The URL to the blob storage account. Any other entities included
+        in the URL path (e.g. container or blob) will be discarded. This URL can be optionally
+        authenticated with a SAS token.
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_service_client]
+            :end-before: [END create_blob_service_client]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with account url and credential.
+
+        .. literalinclude:: ../samples/blob_samples_authentication_async.py
+            :start-after: [START create_blob_service_client_oauth]
+            :end-before: [END create_blob_service_client_oauth]
+            :language: python
+            :dedent: 8
+            :caption: Creating the BlobServiceClient with Azure Identity credentials.
+    """
+
+    def __init__(
+        self, account_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token = _parse_url(account_url=account_url)
+        _, sas_token = parse_query(parsed_url.query)
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(BlobServiceClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        self._client._config.version = get_api_version(kwargs)  # type: ignore [assignment]
+        self._configure_encryption(kwargs)
+
+    def _format_url(self, hostname):
+        """Format the endpoint URL according to the current location
+        mode hostname.
+
+        :param str hostname:
+            The hostname of the current location mode.
+        :returns: A formatted endpoint URL including current location mode hostname.
+        :rtype: str
+        """
+        return f"{self.scheme}://{hostname}/{self._query_str}"
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create BlobServiceClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A Blob service client.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string]
+                :end-before: [END auth_from_connection_string]
+                :language: python
+                :dedent: 8
+                :caption: Creating the BlobServiceClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(account_url, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def get_user_delegation_key(
+        self, key_start_time: "datetime",
+        key_expiry_time: "datetime",
+        **kwargs: Any
+    ) -> "UserDelegationKey":
+        """
+        Obtain a user delegation key for the purpose of signing SAS tokens.
+        A token credential must be present on the service object for this request to succeed.
+
+        :param ~datetime.datetime key_start_time:
+            A DateTime value. Indicates when the key becomes valid.
+        :param ~datetime.datetime key_expiry_time:
+            A DateTime value. Indicates when the key stops being valid.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: The user delegation key.
+        :rtype: ~azure.storage.blob.UserDelegationKey
+        """
+        key_info = KeyInfo(start=_to_utc_datetime(key_start_time), expiry=_to_utc_datetime(key_expiry_time))
+        timeout = kwargs.pop('timeout', None)
+        try:
+            user_delegation_key = await self._client.service.get_user_delegation_key(key_info=key_info,
+                                                                                     timeout=timeout,
+                                                                                     **kwargs)  # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+        return parse_to_internal_user_delegation_key(user_delegation_key)  # type: ignore
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_account_info]
+                :end-before: [END get_blob_service_account_info]
+                :language: python
+                :dedent: 12
+                :caption: Getting account information for the blob service.
+        """
+        try:
+            return await self._client.service.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_service_stats(self, **kwargs: Any) -> Dict[str, Any]:
+        """Retrieves statistics related to replication for the Blob service.
+
+        It is only available when read-access geo-redundant replication is enabled for
+        the storage account.
+
+        With geo-redundant replication, Azure Storage maintains your data durable
+        in two locations. In both locations, Azure Storage constantly maintains
+        multiple healthy replicas of your data. The location where you read,
+        create, update, or delete data is the primary storage account location.
+        The primary location exists in the region you choose at the time you
+        create an account via the Azure Management Azure classic portal, for
+        example, North Central US. The location to which your data is replicated
+        is the secondary location. The secondary location is automatically
+        determined based on the location of the primary; it is in a second data
+        center that resides in the same region as the primary location. Read-only
+        access is available from the secondary location, if read-access geo-redundant
+        replication is enabled for your storage account.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: The blob service stats.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_stats]
+                :end-before: [END get_blob_service_stats]
+                :language: python
+                :dedent: 12
+                :caption: Getting service stats for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            stats = await self._client.service.get_statistics( # type: ignore
+                timeout=timeout, use_location=LocationMode.SECONDARY, **kwargs)
+            return service_stats_deserialize(stats)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_service_properties(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An object containing blob service properties such as
+            analytics logging, hour/minute metrics, cors rules, etc.
+        :rtype: Dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_properties]
+                :end-before: [END get_blob_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Getting service properties for the blob service.
+        """
+        timeout = kwargs.pop('timeout', None)
+        try:
+            service_props = await self._client.service.get_properties(timeout=timeout, **kwargs)
+            return service_properties_deserialize(service_props)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def set_service_properties(
+        self, analytics_logging: Optional["BlobAnalyticsLogging"] = None,
+        hour_metrics: Optional["Metrics"] = None,
+        minute_metrics: Optional["Metrics"] = None,
+        cors: Optional[List[CorsRule]] = None,
+        target_version: Optional[str] = None,
+        delete_retention_policy: Optional["RetentionPolicy"] = None,
+        static_website: Optional["StaticWebsite"] = None,
+        **kwargs: Any
+    ) -> None:
+        """Sets the properties of a storage account's Blob service, including
+        Azure Storage Analytics.
+
+        If an element (e.g. analytics_logging) is left as None, the
+        existing settings on the service for that functionality are preserved.
+
+        :param analytics_logging:
+            Groups the Azure Analytics Logging settings.
+        :type analytics_logging: ~azure.storage.blob.BlobAnalyticsLogging
+        :param hour_metrics:
+            The hour metrics settings provide a summary of request
+            statistics grouped by API in hourly aggregates for blobs.
+        :type hour_metrics: ~azure.storage.blob.Metrics
+        :param minute_metrics:
+            The minute metrics settings provide request statistics
+            for each minute for blobs.
+        :type minute_metrics: ~azure.storage.blob.Metrics
+        :param cors:
+            You can include up to five CorsRule elements in the
+            list. If an empty list is specified, all CORS rules will be deleted,
+            and CORS will be disabled for the service.
+        :type cors: list[~azure.storage.blob.CorsRule]
+        :param str target_version:
+            Indicates the default version to use for requests if an incoming
+            request's version is not specified.
+        :param delete_retention_policy:
+            The delete retention policy specifies whether to retain deleted blobs.
+            It also specifies the number of days and versions of blob to keep.
+        :type delete_retention_policy: ~azure.storage.blob.RetentionPolicy
+        :param static_website:
+            Specifies whether the static website feature is enabled,
+            and if yes, indicates the index document and 404 error document to use.
+        :type static_website: ~azure.storage.blob.StaticWebsite
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START set_blob_service_properties]
+                :end-before: [END set_blob_service_properties]
+                :language: python
+                :dedent: 12
+                :caption: Setting service properties for the blob service.
+        """
+        if all(parameter is None for parameter in [
+                    analytics_logging, hour_metrics, minute_metrics, cors,
+                    target_version, delete_retention_policy, static_website]):
+            raise ValueError("set_service_properties should be called with at least one parameter")
+
+        props = StorageServiceProperties(
+            logging=analytics_logging,
+            hour_metrics=hour_metrics,
+            minute_metrics=minute_metrics,
+            cors=CorsRule._to_generated(cors), # pylint: disable=protected-access
+            default_service_version=target_version,
+            delete_retention_policy=delete_retention_policy,
+            static_website=static_website
+        )
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.service.set_properties(props, timeout=timeout, **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_containers(
+        self, name_starts_with: Optional[str] = None,
+        include_metadata: bool = False,
+        **kwargs: Any
+    ) -> AsyncItemPaged[ContainerProperties]:
+        """Returns a generator to list the containers under the specified account.
+
+        The generator will lazily follow the continuation tokens returned by
+        the service and stop when all containers have been returned.
+
+        :param str name_starts_with:
+            Filters the results to return only containers whose names
+            begin with the specified prefix.
+        :param bool include_metadata:
+            Specifies that container metadata to be returned in the response.
+            The default value is `False`.
+        :keyword bool include_deleted:
+            Specifies that deleted containers to be returned in the response. This is for container restore enabled
+            account. The default value is `False`.
+            .. versionadded:: 12.4.0
+        :keyword bool include_system:
+            Flag specifying that system containers should be included.
+            .. versionadded:: 12.10.0
+        :keyword int results_per_page:
+            The maximum number of container names to retrieve per API
+            call. If the request does not specify the server will return up to 5,000 items.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) of ContainerProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.ContainerProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_list_containers]
+                :end-before: [END bsc_list_containers]
+                :language: python
+                :dedent: 16
+                :caption: Listing the containers in the blob service.
+        """
+        include = ['metadata'] if include_metadata else []
+        include_deleted = kwargs.pop('include_deleted', None)
+        if include_deleted:
+            include.append("deleted")
+        include_system = kwargs.pop('include_system', None)
+        if include_system:
+            include.append("system")
+        timeout = kwargs.pop('timeout', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        command = functools.partial(
+            self._client.service.list_containers_segment,
+            prefix=name_starts_with,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            page_iterator_class=ContainerPropertiesPaged
+        )
+
+    @distributed_trace
+    def find_blobs_by_tags(self, filter_expression: str, **kwargs: Any) -> AsyncItemPaged["FilteredBlob"]:
+        """The Filter Blobs operation enables callers to list blobs across all
+        containers whose tags match a given search expression.  Filter blobs
+        searches across all containers within a storage account but can be
+        scoped within the expression to a single container.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+            To specify a container, eg. "@container='containerName' and \"Name\"='C'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.FilteredBlob]
+        """
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.service.filter_blobs,
+            where=filter_expression,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace_async
+    async def create_container(
+        self, name: str,
+        metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Creates a new container under the specified account.
+
+        If the container with the same name already exists, a ResourceExistsError will
+        be raised. This method returns a client with which to interact with the newly
+        created container.
+
+        :param str name: The name of the container to create.
+        :param metadata:
+            A dict with name-value pairs to associate with the
+            container as metadata. Example: `{'Category':'test'}`
+        :type metadata: dict(str, str)
+        :param public_access:
+            Possible values include: 'container', 'blob'.
+        :type public_access: str or ~azure.storage.blob.PublicAccess
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client to interact with the newly created container.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_create_container]
+                :end-before: [END bsc_create_container]
+                :language: python
+                :dedent: 16
+                :caption: Creating a container in the blob service.
+        """
+        container = self.get_container_client(name)
+        timeout = kwargs.pop('timeout', None)
+        kwargs.setdefault('merge_span', True)
+        await container.create_container(
+            metadata=metadata, public_access=public_access, timeout=timeout, **kwargs)
+        return container
+
+    @distributed_trace_async
+    async def delete_container(
+        self, container: Union[ContainerProperties, str],
+        lease: Optional[Union["BlobLeaseClient", str]] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified container for deletion.
+
+        The container and any blobs contained within it are later deleted during garbage collection.
+        If the container is not found, a ResourceNotFoundError will be raised.
+
+        :param container:
+            The container to delete. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :type lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_delete_container]
+                :end-before: [END bsc_delete_container]
+                :language: python
+                :dedent: 16
+                :caption: Deleting a container in the blob service.
+        """
+        container_client = self.get_container_client(container)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await container_client.delete_container(
+            lease=lease,
+            timeout=timeout,
+            **kwargs)
+
+    @distributed_trace_async
+    async def _rename_container(self, name: str, new_name: str, **kwargs: Any) -> ContainerClient:
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str name:
+            The name of the container to rename.
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A container client for the renamed container.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        renamed_container = self.get_container_client(new_name)
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            await renamed_container._client.container.rename(name, **kwargs)   # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def undelete_container(
+        self, deleted_container_name: str,
+        deleted_container_version: str,
+        **kwargs: Any
+    ) -> ContainerClient:
+        """Restores soft-deleted container.
+
+        Operation will only be successful if used within the specified number of days
+        set in the delete retention policy.
+
+        .. versionadded:: 12.4.0
+            This operation was introduced in API version '2019-12-12'.
+
+        :param str deleted_container_name:
+            Specifies the name of the deleted container to restore.
+        :param str deleted_container_version:
+            Specifies the version of the deleted container to restore.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The recovered soft-deleted ContainerClient.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+        """
+        new_name = kwargs.pop('new_name', None)
+        if new_name:
+            warnings.warn("`new_name` is no longer supported.", DeprecationWarning)
+        container = self.get_container_client(new_name or deleted_container_name)
+        try:
+            await container._client.container.restore(deleted_container_name=deleted_container_name, # pylint: disable = protected-access
+                                                      deleted_container_version=deleted_container_version,
+                                                      timeout=kwargs.pop('timeout', None), **kwargs)
+            return container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    def get_container_client(self, container: Union[ContainerProperties, str]) -> ContainerClient:
+        """Get a client to interact with the specified container.
+
+        The container need not already exist.
+
+        :param container:
+            The container. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :returns: A ContainerClient.
+        :rtype: ~azure.storage.blob.aio.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_get_container_client]
+                :end-before: [END bsc_get_container_client]
+                :language: python
+                :dedent: 12
+                :caption: Getting the container client to interact with a specific container.
+        """
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies #type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return ContainerClient(
+            self.url, container_name=container_name,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+
+    def get_blob_client(
+        self, container: Union[ContainerProperties, str],
+        blob: str,
+        snapshot: Optional[Union[Dict[str, Any], str]] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param container:
+            The container that the blob is in. This can either be the name of the container,
+            or an instance of ContainerProperties.
+        :type container: str or ~azure.storage.blob.ContainerProperties
+        :param str blob:
+            The blob with which to interact.
+        :param snapshot:
+            The optional blob snapshot on which to operate. This can either be the ID of the snapshot,
+            or a dictionary output returned by
+            :func:`~azure.storage.blob.aio.BlobClient.create_snapshot()`.
+        :type snapshot: str or dict(str, Any)
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START bsc_get_blob_client]
+                :end-before: [END bsc_get_blob_client]
+                :language: python
+                :dedent: 16
+                :caption: Getting the blob client to interact with a specific blob.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.name
+        else:
+            blob_name = blob
+        if isinstance(container, ContainerProperties):
+            container_name = container.name
+        else:
+            container_name = container
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=cast(Iterable["AsyncHTTPPolicy"],
+                          self._pipeline._impl_policies) # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py
new file mode 100644
index 00000000..306e3acf
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_container_client_async.py
@@ -0,0 +1,1611 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-many-lines, docstring-keyword-should-match-keyword-only
+
+import functools
+import warnings
+from datetime import datetime
+from typing import (
+    Any, AnyStr, AsyncIterable, AsyncIterator, cast, Dict, List, IO, Iterable, Optional, overload, Union,
+    TYPE_CHECKING
+)
+from urllib.parse import unquote, urlparse
+from typing_extensions import Self
+
+from azure.core.async_paging import AsyncItemPaged, AsyncList
+from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
+from azure.core.pipeline import AsyncPipeline
+from azure.core.pipeline.transport import AsyncHttpResponse  # pylint: disable=C4756
+from azure.core.tracing.decorator import distributed_trace
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from ._blob_client_async import BlobClient
+from ._download_async import StorageStreamDownloader
+from ._lease_async import BlobLeaseClient
+from ._list_blobs_helper import BlobNamesPaged, BlobPropertiesPaged, BlobPrefix
+from ._models import FilteredBlobPaged
+from .._container_client_helpers import (
+    _format_url,
+    _generate_delete_blobs_options,
+    _generate_set_tiers_options,
+    _parse_url
+)
+from .._deserialize import deserialize_container_properties
+from .._encryption import StorageEncryptionMixin
+from .._generated.aio import AzureBlobStorage
+from .._generated.models import SignedIdentifier
+from .._list_blobs_helper import IgnoreListBlobsDeserializer
+from .._models import ContainerProperties, BlobType, BlobProperties, FilteredBlob
+from .._serialize import get_modify_conditions, get_container_cpk_scope_info, get_api_version, get_access_conditions
+from .._shared.base_client import StorageAccountHostsMixin
+from .._shared.base_client_async import AsyncStorageAccountHostsMixin, AsyncTransportWrapper, parse_connection_str
+from .._shared.policies_async import ExponentialRetry
+from .._shared.request_handlers import add_metadata_headers, serialize_iso
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_headers_and_deserialized,
+    return_response_headers
+)
+
+if TYPE_CHECKING:
+    from azure.core.credentials import AzureNamedKeyCredential, AzureSasCredential
+    from azure.core.credentials_async import AsyncTokenCredential
+    from ._blob_service_client_async import BlobServiceClient
+    from .._models import (
+        AccessPolicy,
+        StandardBlobTier,
+        PremiumPageBlobTier,
+        PublicAccess
+    )
+
+
+class ContainerClient(AsyncStorageAccountHostsMixin, StorageAccountHostsMixin, StorageEncryptionMixin):  # type: ignore [misc]  # pylint: disable=too-many-public-methods
+    """A client to interact with a specific container, although that container
+    may not yet exist.
+
+    For operations relating to a specific blob within this container, a blob client can be
+    retrieved using the :func:`~get_blob_client` function.
+
+    :param str account_url:
+        The URI to the storage account. In order to create a client given the full URI to the container,
+        use the :func:`from_container_url` classmethod.
+    :param container_name:
+        The name of the container for the blob.
+    :type container_name: str
+    :param credential:
+        The credentials with which to authenticate. This is optional if the
+        account URL already has a SAS token. The value can be a SAS token string,
+        an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+        an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+        If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+        - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+        If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+        should be the storage account key.
+    :keyword str api_version:
+        The Storage API version to use for requests. Default value is the most recent service version that is
+        compatible with the current SDK. Setting to an older version may result in reduced feature compatibility.
+
+        .. versionadded:: 12.2.0
+
+    :keyword str secondary_hostname:
+        The hostname of the secondary endpoint.
+    :keyword int max_block_size: The maximum chunk size for uploading a block blob in chunks.
+        Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_put_size: If the blob size is less than or equal max_single_put_size, then the blob will be
+        uploaded with only one http PUT request. If the blob size is larger than max_single_put_size,
+        the blob will be uploaded in chunks. Defaults to 64*1024*1024, or 64MB.
+    :keyword int min_large_block_upload_threshold: The minimum chunk size required to use the memory efficient
+        algorithm when uploading a block blob. Defaults to 4*1024*1024+1.
+    :keyword bool use_byte_buffer: Use a byte buffer for block blob uploads. Defaults to False.
+    :keyword int max_page_size: The maximum chunk size for uploading a page blob. Defaults to 4*1024*1024, or 4MB.
+    :keyword int max_single_get_size: The maximum size for a blob to be downloaded in a single call,
+        the exceeded part will be downloaded in chunks (could be parallel). Defaults to 32*1024*1024, or 32MB.
+    :keyword int max_chunk_get_size: The maximum chunk size used for downloading a blob. Defaults to 4*1024*1024,
+        or 4MB.
+    :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+        authentication. Only has an effect when credential is of type TokenCredential. The value could be
+        https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+
+    .. admonition:: Example:
+
+        .. literalinclude:: ../samples/blob_samples_containers_async.py
+            :start-after: [START create_container_client_from_service]
+            :end-before: [END create_container_client_from_service]
+            :language: python
+            :dedent: 8
+            :caption: Get a ContainerClient from an existing BlobServiceClient.
+
+        .. literalinclude:: ../samples/blob_samples_containers_async.py
+            :start-after: [START create_container_client_sasurl]
+            :end-before: [END create_container_client_sasurl]
+            :language: python
+            :dedent: 12
+            :caption: Creating the container client directly.
+    """
+    def __init__(
+        self, account_url: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> None:
+        kwargs['retry_policy'] = kwargs.get('retry_policy') or ExponentialRetry(**kwargs)
+        parsed_url, sas_token = _parse_url(account_url=account_url, container_name=container_name)
+
+        self.container_name = container_name
+        # This parameter is used for the hierarchy traversal. Give precedence to credential.
+        self._raw_credential = credential if credential else sas_token
+        self._query_str, credential = self._format_query_string(sas_token, credential)
+        super(ContainerClient, self).__init__(parsed_url, service='blob', credential=credential, **kwargs)
+        self._api_version = get_api_version(kwargs)
+        self._client = self._build_generated_client()
+        self._configure_encryption(kwargs)
+
+    def _build_generated_client(self) -> AzureBlobStorage:
+        client = AzureBlobStorage(self.url, base_url=self.url, pipeline=self._pipeline)
+        client._config.version = self._api_version  # type: ignore [assignment] # pylint: disable=protected-access
+        return client
+
+    def _format_url(self, hostname):
+        return _format_url(
+            container_name=self.container_name,
+            hostname=hostname,
+            scheme=self.scheme,
+            query_str=self._query_str
+        )
+
+    @classmethod
+    def from_container_url(
+        cls, container_url: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a container url.
+
+        :param str container_url:
+            The full endpoint URL to the Container, including SAS token if used. This could be
+            either the primary endpoint, or the secondary endpoint depending on the current `location_mode`.
+        :type container_url: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            If the resource URI already contains a SAS token, this will be ignored in favor of an explicit credential
+            - except in the case of AzureSasCredential, where the conflicting SAS tokens will raise a ValueError.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        try:
+            if not container_url.lower().startswith('http'):
+                container_url = "https://" + container_url
+        except AttributeError as exc:
+            raise ValueError("Container URL must be a string.") from exc
+        parsed_url = urlparse(container_url)
+        if not parsed_url.netloc:
+            raise ValueError(f"Invalid URL: {container_url}")
+
+        container_path = parsed_url.path.strip('/').split('/')
+        account_path = ""
+        if len(container_path) > 1:
+            account_path = "/" + "/".join(container_path[:-1])
+        account_url = f"{parsed_url.scheme}://{parsed_url.netloc.rstrip('/')}{account_path}?{parsed_url.query}"
+        container_name = unquote(container_path[-1])
+        if not container_name:
+            raise ValueError("Invalid URL. Please provide a URL with a valid container name")
+        return cls(account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @classmethod
+    def from_connection_string(
+        cls, conn_str: str,
+        container_name: str,
+        credential: Optional[Union[str, Dict[str, str], "AzureNamedKeyCredential", "AzureSasCredential", "AsyncTokenCredential"]] = None,  # pylint: disable=line-too-long
+        **kwargs: Any
+    ) -> Self:
+        """Create ContainerClient from a Connection String.
+
+        :param str conn_str:
+            A connection string to an Azure Storage account.
+        :param container_name:
+            The container name for the blob.
+        :type container_name: str
+        :param credential:
+            The credentials with which to authenticate. This is optional if the
+            account URL already has a SAS token, or the connection string already has shared
+            access key values. The value can be a SAS token string,
+            an instance of a AzureSasCredential or AzureNamedKeyCredential from azure.core.credentials,
+            an account shared access key, or an instance of a TokenCredentials class from azure.identity.
+            Credentials provided here will take precedence over those in the connection string.
+            If using an instance of AzureNamedKeyCredential, "name" should be the storage account name, and "key"
+            should be the storage account key.
+        :type credential:
+            ~azure.core.credentials.AzureNamedKeyCredential or
+            ~azure.core.credentials.AzureSasCredential or
+            ~azure.core.credentials_async.AsyncTokenCredential or
+            str or dict[str, str] or None
+        :keyword str audience: The audience to use when requesting tokens for Azure Active Directory
+            authentication. Only has an effect when credential is of type TokenCredential. The value could be
+            https://storage.azure.com/ (default) or https://<account>.blob.core.windows.net.
+        :returns: A container client.
+        :rtype: ~azure.storage.blob.ContainerClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_authentication.py
+                :start-after: [START auth_from_connection_string_container]
+                :end-before: [END auth_from_connection_string_container]
+                :language: python
+                :dedent: 8
+                :caption: Creating the ContainerClient from a connection string.
+        """
+        account_url, secondary, credential = parse_connection_str(conn_str, credential, 'blob')
+        if 'secondary_hostname' not in kwargs:
+            kwargs['secondary_hostname'] = secondary
+        return cls(
+            account_url, container_name=container_name, credential=credential, **kwargs)
+
+    @distributed_trace_async
+    async def create_container(
+        self, metadata: Optional[Dict[str, str]] = None,
+        public_access: Optional[Union["PublicAccess", str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """
+        Creates a new container under the specified account. If the container
+        with the same name already exists, the operation fails.
+
+        :param metadata:
+            A dict with name_value pairs to associate with the
+            container as metadata. Example:{'Category':'test'}
+        :type metadata: dict[str, str]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword container_encryption_scope:
+            Specifies the default encryption scope to set on the container and use for
+            all future writes.
+
+            .. versionadded:: 12.2.0
+
+        :paramtype container_encryption_scope: dict or ~azure.storage.blob.ContainerEncryptionScope
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A dictionary of response headers.
+        :rtype: Dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START create_container]
+                :end-before: [END create_container]
+                :language: python
+                :dedent: 16
+                :caption: Creating a container to store blobs.
+        """
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata)) # type: ignore
+        timeout = kwargs.pop('timeout', None)
+        container_cpk_scope_info = get_container_cpk_scope_info(kwargs)
+        try:
+            return await self._client.container.create( # type: ignore
+                timeout=timeout,
+                access=public_access,
+                container_cpk_scope_info=container_cpk_scope_info,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def _rename_container(self, new_name: str, **kwargs: Any) -> "ContainerClient":
+        """Renames a container.
+
+        Operation is successful only if the source container exists.
+
+        :param str new_name:
+            The new container name the user wants to rename to.
+        :keyword lease:
+            Specify this to perform only if the lease ID given
+            matches the active lease ID of the source container.
+        :paramtype lease: ~azure.storage.blob.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: The renamed container.
+        :rtype: ~azure.storage.blob.ContainerClient
+        """
+        lease = kwargs.pop('lease', None)
+        try:
+            kwargs['source_lease_id'] = lease.id
+        except AttributeError:
+            kwargs['source_lease_id'] = lease
+        try:
+            renamed_container = ContainerClient(
+                f"{self.scheme}://{self.primary_hostname}", container_name=new_name,
+                credential=self.credential, api_version=self.api_version, _configuration=self._config,
+                _pipeline=self._pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+                require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+                key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function)
+            await renamed_container._client.container.rename(self.container_name, **kwargs)   # pylint: disable = protected-access
+            return renamed_container
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def delete_container(self, **kwargs: Any) -> None:
+        """
+        Marks the specified container for deletion. The container and any blobs
+        contained within it are later deleted during garbage collection.
+
+        :keyword lease:
+            If specified, delete_container only succeeds if the
+            container's lease is active and matches this ID.
+            Required if the container has an active lease.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START delete_container]
+                :end-before: [END delete_container]
+                :language: python
+                :dedent: 16
+                :caption: Delete a container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            await self._client.container.delete(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def acquire_lease(
+        self, lease_duration: int =-1,
+        lease_id: Optional[str] = None,
+        **kwargs: Any
+    ) -> BlobLeaseClient:
+        """
+        Requests a new lease. If the container does not have an active lease,
+        the Blob service creates a lease on the container and returns a new
+        lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :param str lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns
+            400 (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: A BlobLeaseClient object, that can be run in a context manager.
+        :rtype: ~azure.storage.blob.aio.BlobLeaseClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START acquire_lease_on_container]
+                :end-before: [END acquire_lease_on_container]
+                :language: python
+                :dedent: 12
+                :caption: Acquiring a lease on the container.
+        """
+        lease = BlobLeaseClient(self, lease_id=lease_id) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await lease.acquire(lease_duration=lease_duration, timeout=timeout, **kwargs)
+        return lease
+
+    @distributed_trace_async
+    async def get_account_information(self, **kwargs: Any) -> Dict[str, str]:
+        """Gets information related to the storage account.
+
+        The information can also be retrieved if the user has a SAS to a container or blob.
+        The keys in the returned dictionary include 'sku_name' and 'account_kind'.
+
+        :returns: A dict of account information (SKU and account type).
+        :rtype: dict(str, str)
+        """
+        try:
+            return await self._client.container.get_account_info(cls=return_response_headers, **kwargs) # type: ignore
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace_async
+    async def get_container_properties(self, **kwargs: Any) -> ContainerProperties:
+        """Returns all user-defined metadata and system properties for the specified
+        container. The data returned does not include the container's list of blobs.
+
+        :keyword lease:
+            If specified, get_container_properties only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Properties for the specified container within a container object.
+        :rtype: ~azure.storage.blob.ContainerProperties
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_properties]
+                :end-before: [END get_container_properties]
+                :language: python
+                :dedent: 16
+                :caption: Getting properties on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response = await self._client.container.get_properties(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=deserialize_container_properties,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        response.name = self.container_name
+        return response # type: ignore
+
+    @distributed_trace_async
+    async def exists(self, **kwargs: Any) -> bool:
+        """
+        Returns True if a container exists and returns False otherwise.
+
+        :kwarg int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: boolean
+        :rtype: bool
+        """
+        try:
+            await self._client.container.get_properties(**kwargs)
+            return True
+        except HttpResponseError as error:
+            try:
+                process_storage_error(error)
+            except ResourceNotFoundError:
+                return False
+
+    @distributed_trace_async
+    async def set_container_metadata(
+        self, metadata: Optional[Dict[str, str]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets one or more user-defined name-value pairs for the specified
+        container. Each call to this operation replaces all existing metadata
+        attached to the container. To remove all metadata from the container,
+        call this operation with no metadata dict.
+
+        :param metadata:
+            A dict containing name-value pairs to associate with the container as
+            metadata. Example: {'category':'test'}
+        :type metadata: dict[str, str]
+        :keyword lease:
+            If specified, set_container_metadata only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: Dict[str, Union[str, datetime]]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START set_container_metadata]
+                :end-before: [END set_container_metadata]
+                :language: python
+                :dedent: 16
+                :caption: Setting metadata on the container.
+        """
+        headers = kwargs.pop('headers', {})
+        headers.update(add_metadata_headers(metadata))
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        mod_conditions = get_modify_conditions(kwargs)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            return await self._client.container.set_metadata(  # type: ignore
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                headers=headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def _get_blob_service_client(self) -> "BlobServiceClient":
+        """Get a client to interact with the container's parent service account.
+
+        Defaults to current container's credentials.
+
+        :returns: A BlobServiceClient.
+        :rtype: ~azure.storage.blob.BlobServiceClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_service_async.py
+                :start-after: [START get_blob_service_client_from_container_client]
+                :end-before: [END get_blob_service_client_from_container_client]
+                :language: python
+                :dedent: 8
+                :caption: Get blob service client from container object.
+        """
+        from ._blob_service_client_async import BlobServiceClient
+        if not isinstance(self._pipeline._transport, AsyncTransportWrapper): # pylint: disable = protected-access
+            _pipeline = AsyncPipeline(
+                transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+                policies=self._pipeline._impl_policies #type: ignore [arg-type] # pylint: disable = protected-access
+            )
+        else:
+            _pipeline = self._pipeline
+        return BlobServiceClient(
+            f"{self.scheme}://{self.primary_hostname}",
+            credential=self._raw_credential, api_version=self.api_version, _configuration=self._config,
+            _location_mode=self._location_mode, _hosts=self._hosts, require_encryption=self.require_encryption,
+            encryption_version=self.encryption_version, key_encryption_key=self.key_encryption_key,
+            key_resolver_function=self.key_resolver_function, _pipeline=_pipeline)
+
+
+    @distributed_trace_async
+    async def get_container_access_policy(self, **kwargs: Any) -> Dict[str, Any]:
+        """Gets the permissions for the specified container.
+        The permissions indicate whether container data may be accessed publicly.
+
+        :keyword lease:
+            If specified, get_container_access_policy only succeeds if the
+            container's lease is active and matches this ID.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Access policy information in a dict.
+        :rtype: dict[str, Any]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_container_access_policy]
+                :end-before: [END get_container_access_policy]
+                :language: python
+                :dedent: 16
+                :caption: Getting the access policy on the container.
+        """
+        lease = kwargs.pop('lease', None)
+        access_conditions = get_access_conditions(lease)
+        timeout = kwargs.pop('timeout', None)
+        try:
+            response, identifiers = await self._client.container.get_access_policy(
+                timeout=timeout,
+                lease_access_conditions=access_conditions,
+                cls=return_headers_and_deserialized,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return {
+            'public_access': response.get('blob_public_access'),
+            'signed_identifiers': identifiers or []
+        }
+
+    @distributed_trace_async
+    async def set_container_access_policy(
+        self, signed_identifiers: Dict[str, "AccessPolicy"],
+        public_access: Optional[Union[str, "PublicAccess"]] = None,
+        **kwargs: Any
+    ) -> Dict[str, Union[str, datetime]]:
+        """Sets the permissions for the specified container or stored access
+        policies that may be used with Shared Access Signatures. The permissions
+        indicate whether blobs in a container may be accessed publicly.
+
+        :param signed_identifiers:
+            A dictionary of access policies to associate with the container. The
+            dictionary may contain up to 5 elements. An empty dictionary
+            will clear the access policies set on the service.
+        :type signed_identifiers: dict[str, ~azure.storage.blob.AccessPolicy]
+        :param ~azure.storage.blob.PublicAccess public_access:
+            Possible values include: 'container', 'blob'.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified date/time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A datetime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: Container-updated property dict (Etag and last modified).
+        :rtype: dict[str, str or ~datetime.datetime]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START set_container_access_policy]
+                :end-before: [END set_container_access_policy]
+                :language: python
+                :dedent: 16
+                :caption: Setting access policy on the container.
+        """
+        timeout = kwargs.pop('timeout', None)
+        lease = kwargs.pop('lease', None)
+        if len(signed_identifiers) > 5:
+            raise ValueError(
+                'Too many access policies provided. The server does not support setting '
+                'more than 5 access policies on a single resource.')
+        identifiers = []
+        for key, value in signed_identifiers.items():
+            if value:
+                value.start = serialize_iso(value.start)
+                value.expiry = serialize_iso(value.expiry)
+            identifiers.append(SignedIdentifier(id=key, access_policy=value)) # type: ignore
+        signed_identifiers = identifiers # type: ignore
+
+        mod_conditions = get_modify_conditions(kwargs)
+        access_conditions = get_access_conditions(lease)
+        try:
+            return cast(Dict[str, Union[str, datetime]], await self._client.container.set_access_policy(
+                container_acl=signed_identifiers or None,
+                timeout=timeout,
+                access=public_access,
+                lease_access_conditions=access_conditions,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs))
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    @distributed_trace
+    def list_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[str, List[str]]] = None,
+        **kwargs: Any
+    ) -> AsyncItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START list_blobs_in_container]
+                :end-before: [END list_blobs_in_container]
+                :language: python
+                :dedent: 12
+                :caption: List the blobs in the container.
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_flat_segment,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=BlobPropertiesPaged
+        )
+
+    @distributed_trace
+    def list_blob_names(self, **kwargs: Any) -> AsyncItemPaged[str]:
+        """Returns a generator to list the names of blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        Note that no additional properties or metadata will be returned when using this API.
+        Additionally this API does not have an option to include additional blobs such as snapshots,
+        versions, soft-deleted blobs, etc. To get any of this data, use :func:`list_blobs()`.
+
+        :keyword str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of blob names as strings.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[str]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        name_starts_with = kwargs.pop('name_starts_with', None)
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+
+        # For listing only names we need to create a one-off generated client and
+        # override its deserializer to prevent deserialization of the full response.
+        client = self._build_generated_client()
+        client.container._deserialize = IgnoreListBlobsDeserializer()  # pylint: disable=protected-access
+
+        command = functools.partial(
+            client.container.list_blob_flat_segment,
+            timeout=timeout,
+            **kwargs)
+        return AsyncItemPaged(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=BlobNamesPaged)
+
+    @distributed_trace
+    def walk_blobs(
+        self, name_starts_with: Optional[str] = None,
+        include: Optional[Union[List[str], str]] = None,
+        delimiter: str = "/",
+        **kwargs: Any
+    ) -> AsyncItemPaged[BlobProperties]:
+        """Returns a generator to list the blobs under the specified container.
+        The generator will lazily follow the continuation tokens returned by
+        the service. This operation will list blobs in accordance with a hierarchy,
+        as delimited by the specified delimiter character.
+
+        :param str name_starts_with:
+            Filters the results to return only blobs whose names
+            begin with the specified prefix.
+        :param include:
+            Specifies one or more additional datasets to include in the response.
+            Options include: 'snapshots', 'metadata', 'uncommittedblobs', 'copy', 'deleted', 'deletedwithversions',
+            'tags', 'versions', 'immutabilitypolicy', 'legalhold'.
+        :type include: list[str] or str
+        :param str delimiter:
+            When the request includes this parameter, the operation returns a BlobPrefix
+            element in the response body that acts as a placeholder for all blobs whose
+            names begin with the same substring up to the appearance of the delimiter
+            character. The delimiter may be a single character or a string.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of BlobProperties.
+        :rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        if kwargs.pop('prefix', None):
+            raise ValueError("Passing 'prefix' has no effect on filtering, " +
+                             "please use the 'name_starts_with' parameter instead.")
+
+        if include and not isinstance(include, list):
+            include = [include]
+
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.list_blob_hierarchy_segment,
+            delimiter=delimiter,
+            include=include,
+            timeout=timeout,
+            **kwargs)
+        return BlobPrefix(
+            command,
+            prefix=name_starts_with,
+            results_per_page=results_per_page,
+            container=self.container_name,
+            delimiter=delimiter)
+
+    @distributed_trace
+    def find_blobs_by_tags(
+        self, filter_expression: str,
+        **kwargs: Any
+    ) -> AsyncItemPaged[FilteredBlob]:
+        """Returns a generator to list the blobs under the specified container whose tags
+        match the given search expression.
+        The generator will lazily follow the continuation tokens returned by
+        the service.
+
+        :param str filter_expression:
+            The expression to find blobs whose tags matches the specified condition.
+            eg. "\"yourtagname\"='firsttag' and \"yourtagname2\"='secondtag'"
+        :keyword int results_per_page:
+            The max result per page when paginating.
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :returns: An iterable (auto-paging) response of FilteredBlob.
+        :rtype: ~azure.core.paging.ItemPaged[~azure.storage.blob.BlobProperties]
+        """
+        results_per_page = kwargs.pop('results_per_page', None)
+        timeout = kwargs.pop('timeout', None)
+        command = functools.partial(
+            self._client.container.filter_blobs,
+            timeout=timeout,
+            where=filter_expression,
+            **kwargs)
+        return AsyncItemPaged(
+            command, results_per_page=results_per_page,
+            container=self.container_name,
+            page_iterator_class=FilteredBlobPaged)
+
+    @distributed_trace_async
+    async def upload_blob(
+        self, name: str,
+        data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]],
+        blob_type: Union[str, BlobType] = BlobType.BLOCKBLOB,
+        length: Optional[int] = None,
+        metadata: Optional[Dict[str, str]] = None,
+        **kwargs
+    ) -> BlobClient:
+        """Creates a new blob from a data source with automatic chunking.
+
+        :param str name: The blob with which to interact.
+        :param data: The blob data to upload.
+        :type data: Union[bytes, str, Iterable[AnyStr], AsyncIterable[AnyStr], IO[AnyStr]]
+        :param ~azure.storage.blob.BlobType blob_type: The type of the blob. This can be
+            either BlockBlob, PageBlob or AppendBlob. The default value is BlockBlob.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :param metadata:
+            Name-value pairs associated with the blob as metadata.
+        :type metadata: dict(str, str)
+        :keyword bool overwrite: Whether the blob to be uploaded should overwrite the current data.
+            If True, upload_blob will overwrite the existing data. If set to False, the
+            operation will fail with ResourceExistsError. The exception to the above is with Append
+            blob types: if set to False and the data already exists, an error will not be raised
+            and the data will be appended to the existing blob. If set overwrite=True, then the existing
+            append blob will be deleted, and a new one created. Defaults to False.
+        :keyword ~azure.storage.blob.ContentSettings content_settings:
+            ContentSettings object used to set blob properties. Used to set content type, encoding,
+            language, disposition, md5, and cache control.
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used, because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the container has an active lease. Value can be a BlobLeaseClient object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :keyword ~azure.storage.blob.PremiumPageBlobTier premium_page_blob_tier:
+            A page blob tier value to set the blob to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+        :keyword ~azure.storage.blob.StandardBlobTier standard_blob_tier:
+            A standard blob tier value to set the blob to. For this version of the library,
+            this is only applicable to block blobs on standard storage accounts.
+        :keyword int maxsize_condition:
+            Optional conditional header. The max length in bytes permitted for
+            the append blob. If the Append Block operation would cause the blob
+            to exceed that limit or if the blob size is already greater than the
+            value specified in this header, the request will fail with
+            MaxBlobSizeConditionNotMet error (HTTP status code 412 - Precondition Failed).
+        :keyword int max_concurrency:
+            Maximum number of parallel connections to use when the blob size exceeds
+            64MB.
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword str encryption_scope:
+            A predefined encryption scope used to encrypt the data on the service. An encryption
+            scope can be created using the Management API and referenced here by name. If a default
+            encryption scope has been defined at the container, this value will override it if the
+            container-level scope is configured to allow overrides. Otherwise an error will be raised.
+
+            .. versionadded:: 12.2.0
+
+        :keyword str encoding:
+            Defaults to UTF-8.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running upload. The signature is
+            function(current: int, total: Optional[int]) where current is the number of bytes transferred
+            so far, and total is the size of the blob or None if the size is unknown.
+        :paramtype progress_hook: Callable[[int, Optional[int]], Awaitable[None]]
+        :returns: A BlobClient to interact with the newly uploaded blob.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START upload_blob_to_container]
+                :end-before: [END upload_blob_to_container]
+                :language: python
+                :dedent: 12
+                :caption: Upload blob to the container.
+        """
+        if isinstance(name, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param name is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob = self.get_blob_client(name)
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        encoding = kwargs.pop('encoding', 'UTF-8')
+        await blob.upload_blob(
+            data,
+            blob_type=blob_type,
+            length=length,
+            metadata=metadata,
+            timeout=timeout,
+            encoding=encoding,
+            **kwargs
+        )
+        return blob
+
+    @distributed_trace_async
+    async def delete_blob(
+        self, blob: str,
+        delete_snapshots: Optional[str] = None,
+        **kwargs: Any
+    ) -> None:
+        """Marks the specified blob or snapshot for deletion.
+
+        The blob is later deleted during garbage collection.
+        Note that in order to delete a blob, you must delete all of its
+        snapshots. You can delete both at the same time with the delete_blob
+        operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blob or snapshot
+        and retains the blob or snapshot for specified number of days.
+        After specified number of days, blob's data is removed from the service during garbage collection.
+        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        Soft-deleted blob or snapshot can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()`
+
+        :param str blob: The blob with which to interact.
+        :param str delete_snapshots:
+            Required if the blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to delete.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword lease:
+            Required if the blob has an active lease. Value can be a Lease object
+            or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        timeout = kwargs.pop('timeout', None)
+        await blob.delete_blob( # type: ignore
+            delete_snapshots=delete_snapshots,
+            timeout=timeout,
+            **kwargs)
+
+    @overload
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: str,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[str]:
+        ...
+
+    @overload
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: None = None,
+        **kwargs: Any
+    ) -> StorageStreamDownloader[bytes]:
+        ...
+
+    @distributed_trace_async
+    async def download_blob(
+        self, blob: str,
+        offset: Optional[int] = None,
+        length: Optional[int] = None,
+        *,
+        encoding: Union[str, None] = None,
+        **kwargs: Any
+    ) -> Union[StorageStreamDownloader[str], StorageStreamDownloader[bytes]]:
+        """Downloads a blob to the StorageStreamDownloader. The readall() method must
+        be used to read all the content or readinto() must be used to download the blob into
+        a stream. Using chunks() returns an async iterator which allows the user to iterate over the content in chunks.
+
+        :param str blob: The blob with which to interact.
+        :param int offset:
+            Start of byte range to use for downloading a section of the blob.
+            Must be set if length is provided.
+        :param int length:
+            Number of bytes to read from the stream. This is optional, but
+            should be supplied for optimal performance.
+        :keyword str version_id:
+            The version id parameter is an opaque DateTime
+            value that, when present, specifies the version of the blob to download.
+
+            .. versionadded:: 12.4.0
+
+            This keyword argument was introduced in API version '2019-12-12'.
+
+        :keyword bool validate_content:
+            If true, calculates an MD5 hash for each chunk of the blob. The storage
+            service checks the hash of the content that has arrived with the hash
+            that was sent. This is primarily valuable for detecting bitflips on
+            the wire if using http instead of https, as https (the default), will
+            already validate. Note that this MD5 hash is not stored with the
+            blob. Also note that if enabled, the memory-efficient upload algorithm
+            will not be used because computing the MD5 hash requires buffering
+            entire blocks, and doing so defeats the purpose of the memory-efficient algorithm.
+        :keyword lease:
+            Required if the blob has an active lease. If specified, download_blob only
+            succeeds if the blob's lease is active and matches this ID. Value can be a
+            BlobLeaseClient object or the lease ID as a string.
+        :paramtype lease: ~azure.storage.blob.aio.BlobLeaseClient or str
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword ~azure.storage.blob.CustomerProvidedEncryptionKey cpk:
+            Encrypts the data on the service-side with the given key.
+            Use of customer-provided keys must be done over HTTPS.
+            As the encryption key itself is provided in the request,
+            a secure connection must be established to transfer the key.
+        :keyword int max_concurrency:
+            The number of parallel connections with which to download.
+        :keyword str encoding:
+            Encoding to decode the downloaded bytes. Default is None, i.e. no decoding.
+        :keyword progress_hook:
+            An async callback to track the progress of a long running download. The signature is
+            function(current: int, total: int) where current is the number of bytes transferred
+            so far, and total is the total size of the download.
+        :paramtype progress_hook: Callable[[int, int], Awaitable[None]]
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__. This method may make multiple calls to the service and
+            the timeout will apply to each call individually.
+            multiple calls to the Azure service and the timeout will apply to
+            each call individually.
+        :returns: A streaming object. (StorageStreamDownloader)
+        :rtype: ~azure.storage.blob.aio.StorageStreamDownloader
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+        blob_client = self.get_blob_client(blob) # type: ignore
+        kwargs.setdefault('merge_span', True)
+        return await blob_client.download_blob(
+            offset=offset,
+            length=length,
+            encoding=encoding,
+            **kwargs)
+
+    @distributed_trace_async
+    async def delete_blobs(
+        self, *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """Marks the specified blobs or snapshots for deletion.
+
+        The blobs are later deleted during garbage collection.
+        Note that in order to delete blobs, you must delete all of their
+        snapshots. You can delete both at the same time with the delete_blobs operation.
+
+        If a delete retention policy is enabled for the service, then this operation soft deletes the blobs or snapshots
+        and retains the blobs or snapshots for specified number of days.
+        After specified number of days, blobs' data is removed from the service during garbage collection.
+        Soft deleted blobs or snapshots are accessible through :func:`list_blobs()` specifying `include=["deleted"]`
+        Soft-deleted blobs or snapshots can be restored using :func:`~azure.storage.blob.aio.BlobClient.undelete()`
+
+        The maximum number of blobs that can be deleted in a single request is 256.
+
+        :param blobs:
+            The blobs to delete. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                snapshot you want to delete:
+                    key: 'snapshot', value type: str
+                version id:
+                    key: 'version_id', value type: str
+                whether to delete snapshots when deleting blob:
+                    key: 'delete_snapshots', value: 'include' or 'only'
+                if the blob modified or not:
+                    key: 'if_modified_since', 'if_unmodified_since', value type: datetime
+                etag:
+                    key: 'etag', value type: str
+                match the etag or not:
+                    key: 'match_condition', value type: MatchConditions
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: Union[str, Dict[str, Any], BlobProperties]
+        :keyword str delete_snapshots:
+            Required if a blob has associated snapshots. Values include:
+             - "only": Deletes only the blobs snapshots.
+             - "include": Deletes the blob along with all snapshots.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_common_async.py
+                :start-after: [START delete_multiple_blobs]
+                :end-before: [END delete_multiple_blobs]
+                :language: python
+                :dedent: 12
+                :caption: Deleting multiple blobs.
+        """
+        if len(blobs) == 0:
+            return AsyncList([])
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+
+        reqs, options = _generate_delete_blobs_options(
+            self._query_str,
+            self.container_name,
+            self._client,
+            *blobs,
+            **kwargs
+        )
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    @distributed_trace_async
+    async def set_standard_blob_tier_blobs(
+        self, standard_blob_tier: Union[str, 'StandardBlobTier'],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """This operation sets the tier on block blobs.
+
+        A block blob's tier determines Hot/Cool/Archive storage type.
+        This operation does not update the blob's ETag.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param standard_blob_tier:
+            Indicates the tier to be set on all blobs. Options include 'Hot', 'Cool',
+            'Archive'. The hot tier is optimized for storing data that is accessed
+            frequently. The cool storage tier is optimized for storing data that
+            is infrequently accessed and stored for at least a month. The archive
+            tier is optimized for storing data that is rarely accessed and stored
+            for at least six months with flexible latency requirements.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type standard_blob_tier: str or ~azure.storage.blob.StandardBlobTier
+        :param blobs:
+            The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                standard blob tier:
+                    key: 'blob_tier', value type: StandardBlobTier
+                rehydrate priority:
+                    key: 'rehydrate_priority', value type: RehydratePriority
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                tags match condition:
+                    key: 'if_tags_match_condition', value type: str
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword ~azure.storage.blob.RehydratePriority rehydrate_priority:
+            Indicates the priority with which to rehydrate an archived blob
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            standard_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    @distributed_trace_async
+    async def set_premium_page_blob_tier_blobs(
+        self, premium_page_blob_tier: Union[str, 'PremiumPageBlobTier'],
+        *blobs: Union[str, Dict[str, Any], BlobProperties],
+        **kwargs: Any
+    ) -> AsyncIterator[AsyncHttpResponse]:
+        """Sets the page blob tiers on the blobs. This API is only supported for page blobs on premium accounts.
+
+        The maximum number of blobs that can be updated in a single request is 256.
+
+        :param premium_page_blob_tier:
+            A page blob tier value to set on all blobs to. The tier correlates to the size of the
+            blob and number of allowed IOPS. This is only applicable to page blobs on
+            premium storage accounts.
+
+            .. note::
+                If you want to set different tier on different blobs please set this positional parameter to None.
+                Then the blob tier on every BlobProperties will be taken.
+
+        :type premium_page_blob_tier: ~azure.storage.blob.PremiumPageBlobTier
+        :param blobs: The blobs with which to interact. This can be a single blob, or multiple values can
+            be supplied, where each value is either the name of the blob (str) or BlobProperties.
+
+            .. note::
+                When the blob type is dict, here's a list of keys, value rules.
+
+                blob name:
+                    key: 'name', value type: str
+                premium blob tier:
+                    key: 'blob_tier', value type: PremiumPageBlobTier
+                lease:
+                    key: 'lease_id', value type: Union[str, LeaseClient]
+                timeout for subrequest:
+                    key: 'timeout', value type: int
+
+        :type blobs: str or dict(str, Any) or ~azure.storage.blob.BlobProperties
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :keyword bool raise_on_any_failure:
+            This is a boolean param which defaults to True. When this is set, an exception
+            is raised even if there is a single operation failure. For optimal performance,
+            this should be set to False.
+        :return: An async iterator of responses, one for each blob in order
+        :rtype: asynciterator[~azure.core.pipeline.transport.AsyncHttpResponse]
+        """
+        if self._is_localhost:
+            kwargs['url_prepend'] = self.account_name
+        reqs, options = _generate_set_tiers_options(
+            self._query_str,
+            self.container_name,
+            premium_page_blob_tier,
+            self._client,
+            *blobs,
+            **kwargs)
+
+        return cast(AsyncIterator[AsyncHttpResponse], await self._batch_send(*reqs, **options))
+
+    def get_blob_client(
+        self, blob: str,
+        snapshot: Optional[str] = None,
+        *,
+        version_id: Optional[str] = None
+    ) -> BlobClient:
+        """Get a client to interact with the specified blob.
+
+        The blob need not already exist.
+
+        :param str blob:
+            The blob with which to interact.
+        :param str snapshot:
+            The optional blob snapshot on which to operate. This can be the snapshot ID string
+            or the response returned from :func:`~BlobClient.create_snapshot()`.
+        :keyword str version_id: The version id parameter is an opaque DateTime value that, when present,
+            specifies the version of the blob to operate on.
+        :returns: A BlobClient.
+        :rtype: ~azure.storage.blob.aio.BlobClient
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_containers_async.py
+                :start-after: [START get_blob_client]
+                :end-before: [END get_blob_client]
+                :language: python
+                :dedent: 12
+                :caption: Get the blob client.
+        """
+        if isinstance(blob, BlobProperties):
+            warnings.warn(
+                "The use of a 'BlobProperties' instance for param blob is deprecated. " +
+                "Please use 'BlobProperties.name' or any other str input type instead.",
+                DeprecationWarning
+            )
+            blob_name = blob.get('name')
+        else:
+            blob_name = blob
+        _pipeline = AsyncPipeline(
+            transport=AsyncTransportWrapper(self._pipeline._transport), # pylint: disable = protected-access
+            policies=self._pipeline._impl_policies  # type: ignore [arg-type] # pylint: disable = protected-access
+        )
+        return BlobClient(
+            self.url, container_name=self.container_name, blob_name=blob_name, snapshot=snapshot,
+            credential=self.credential, api_version=self.api_version, _configuration=self._config,
+            _pipeline=_pipeline, _location_mode=self._location_mode, _hosts=self._hosts,
+            require_encryption=self.require_encryption, encryption_version=self.encryption_version,
+            key_encryption_key=self.key_encryption_key, key_resolver_function=self.key_resolver_function,
+            version_id=version_id)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py
new file mode 100644
index 00000000..4676c2e6
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_download_async.py
@@ -0,0 +1,872 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=invalid-overridden-method
+# mypy: disable-error-code=override
+
+import asyncio
+import codecs
+import sys
+import warnings
+from io import BytesIO, StringIO
+from itertools import islice
+from typing import (
+    Any, AsyncIterator, Awaitable,
+    Generator, Callable, cast, Dict,
+    Generic, IO, Optional, overload,
+    Tuple, TypeVar, Union, TYPE_CHECKING
+)
+
+from azure.core.exceptions import DecodeError, HttpResponseError, IncompleteReadError
+
+from .._shared.request_handlers import validate_and_format_range_headers
+from .._shared.response_handlers import parse_length_from_content_range, process_storage_error
+from .._deserialize import deserialize_blob_properties, get_page_ranges_result
+from .._download import process_range_and_offset, _ChunkDownloader
+from .._encryption import (
+    adjust_blob_size_for_encryption,
+    decrypt_blob,
+    is_encryption_v2,
+    parse_encryption_data
+)
+
+if TYPE_CHECKING:
+    from codecs import IncrementalDecoder
+    from .._encryption import _EncryptionData
+    from .._generated.aio import AzureBlobStorage
+    from .._models import BlobProperties
+    from .._shared.models import StorageConfiguration
+
+
+T = TypeVar('T', bytes, str)
+
+
+async def process_content(data: Any, start_offset: int, end_offset: int, encryption: Dict[str, Any]) -> bytes:
+    if data is None:
+        raise ValueError("Response cannot be None.")
+    await data.response.load_body()
+    content = cast(bytes, data.response.body())
+    if encryption.get('key') is not None or encryption.get('resolver') is not None:
+        try:
+            return decrypt_blob(
+                encryption.get('required') or False,
+                encryption.get('key'),
+                encryption.get('resolver'),
+                content,
+                start_offset,
+                end_offset,
+                data.response.headers)
+        except Exception as error:
+            raise HttpResponseError(
+                message="Decryption failed.",
+                response=data.response,
+                error=error) from error
+    return content
+
+
+class _AsyncChunkDownloader(_ChunkDownloader):
+    def __init__(self, **kwargs: Any) -> None:
+        super(_AsyncChunkDownloader, self).__init__(**kwargs)
+        self.stream_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+        self.progress_lock_async = asyncio.Lock() if kwargs.get('parallel') else None
+
+    async def process_chunk(self, chunk_start: int) -> None:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        chunk_data, _ = await self._download_chunk(chunk_start, chunk_end - 1)
+        length = chunk_end - chunk_start
+        if length > 0:
+            await self._write_to_stream(chunk_data, chunk_start)
+            await self._update_progress(length)
+
+    async def yield_chunk(self, chunk_start: int) -> Tuple[bytes, int]:
+        chunk_start, chunk_end = self._calculate_range(chunk_start)
+        return await self._download_chunk(chunk_start, chunk_end - 1)
+
+    async def _update_progress(self, length: int) -> None:
+        if self.progress_lock_async:
+            async with self.progress_lock_async:
+                self.progress_total += length
+        else:
+            self.progress_total += length
+
+        if self.progress_hook:
+            await cast(Callable[[int, Optional[int]], Awaitable[Any]], self.progress_hook)(
+                self.progress_total, self.total_size)
+
+    async def _write_to_stream(self, chunk_data: bytes, chunk_start: int) -> None:
+        if self.stream_lock_async:
+            async with self.stream_lock_async:
+                self.stream.seek(self.stream_start + (chunk_start - self.start_index))
+                self.stream.write(chunk_data)
+        else:
+            self.stream.write(chunk_data)
+
+    async def _download_chunk(self, chunk_start: int, chunk_end: int) -> Tuple[bytes, int]:
+        if self.encryption_options is None:
+            raise ValueError("Required argument is missing: encryption_options")
+        download_range, offset = process_range_and_offset(
+            chunk_start, chunk_end, chunk_end, self.encryption_options, self.encryption_data
+        )
+
+        # No need to download the empty chunk from server if there's no data in the chunk to be downloaded.
+        # Do optimize and create empty chunk locally if condition is met.
+        if self._do_optimize(download_range[0], download_range[1]):
+            content_length = download_range[1] - download_range[0] + 1
+            chunk_data = b"\x00" * content_length
+        else:
+            range_header, range_validation = validate_and_format_range_headers(
+                download_range[0],
+                download_range[1],
+                check_content_md5=self.validate_content
+            )
+
+            retry_active = True
+            retry_total = 3
+            while retry_active:
+                try:
+                    _, response = await cast(Awaitable[Any], self.client.download(
+                        range=range_header,
+                        range_get_content_md5=range_validation,
+                        validate_content=self.validate_content,
+                        data_stream_total=self.total_size,
+                        download_stream_current=self.progress_total,
+                        **self.request_options
+                    ))
+                except HttpResponseError as error:
+                    process_storage_error(error)
+
+                try:
+                    chunk_data = await process_content(response, offset[0], offset[1], self.encryption_options)
+                    retry_active = False
+                except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                    retry_total -= 1
+                    if retry_total <= 0:
+                        raise HttpResponseError(error, error=error) from error
+                    await asyncio.sleep(1)
+            content_length = response.content_length
+
+            # This makes sure that if_match is set so that we can validate
+            # that subsequent downloads are to an unmodified blob
+            if self.request_options.get('modified_access_conditions'):
+                self.request_options['modified_access_conditions'].if_match = response.properties.etag
+
+        return chunk_data, content_length
+
+
+class _AsyncChunkIterator(object):
+    """Async iterator for chunks in blob download stream."""
+
+    def __init__(self, size: int, content: bytes, downloader: Optional[_AsyncChunkDownloader], chunk_size: int) -> None:
+        self.size = size
+        self._chunk_size = chunk_size
+        self._current_content = content
+        self._iter_downloader = downloader
+        self._iter_chunks: Optional[Generator[int, None, None]] = None
+        self._complete = size == 0
+
+    def __len__(self) -> int:
+        return self.size
+
+    def __iter__(self) -> None:
+        raise TypeError("Async stream must be iterated asynchronously.")
+
+    def __aiter__(self) -> AsyncIterator[bytes]:
+        return self
+
+    # Iterate through responses.
+    async def __anext__(self) -> bytes:
+        if self._complete:
+            raise StopAsyncIteration("Download complete")
+        if not self._iter_downloader:
+            # cut the data obtained from initial GET into chunks
+            if len(self._current_content) > self._chunk_size:
+                return self._get_chunk_data()
+            self._complete = True
+            return self._current_content
+
+        if not self._iter_chunks:
+            self._iter_chunks = self._iter_downloader.get_chunk_offsets()
+
+        # initial GET result still has more than _chunk_size bytes of data
+        if len(self._current_content) >= self._chunk_size:
+            return self._get_chunk_data()
+
+        try:
+            chunk = next(self._iter_chunks)
+            self._current_content += (await self._iter_downloader.yield_chunk(chunk))[0]
+        except StopIteration as exc:
+            self._complete = True
+            # it's likely that there some data left in self._current_content
+            if self._current_content:
+                return self._current_content
+            raise StopAsyncIteration("Download complete") from exc
+
+        return self._get_chunk_data()
+
+    def _get_chunk_data(self) -> bytes:
+        chunk_data = self._current_content[: self._chunk_size]
+        self._current_content = self._current_content[self._chunk_size:]
+        return chunk_data
+
+
+class StorageStreamDownloader(Generic[T]):  # pylint: disable=too-many-instance-attributes
+    """
+    A streaming object to download from Azure Storage.
+    """
+
+    name: str
+    """The name of the blob being downloaded."""
+    container: str
+    """The name of the container where the blob is."""
+    properties: "BlobProperties"
+    """The properties of the blob being downloaded. If only a range of the data is being
+    downloaded, this will be reflected in the properties."""
+    size: int
+    """The size of the total data in the stream. This will be the byte range if specified,
+    otherwise the total size of the blob."""
+
+    def __init__(
+        self,
+        clients: "AzureBlobStorage" = None,  # type: ignore [assignment]
+        config: "StorageConfiguration" = None,  # type: ignore [assignment]
+        start_range: Optional[int] = None,
+        end_range: Optional[int] = None,
+        validate_content: bool = None,  # type: ignore [assignment]
+        encryption_options: Dict[str, Any] = None,  # type: ignore [assignment]
+        max_concurrency: int = 1,
+        name: str = None,  # type: ignore [assignment]
+        container: str = None,  # type: ignore [assignment]
+        encoding: Optional[str] = None,
+        download_cls: Optional[Callable] = None,
+        **kwargs: Any
+    ) -> None:
+        self.name = name
+        self.container = container
+        self.size = 0
+
+        self._clients = clients
+        self._config = config
+        self._start_range = start_range
+        self._end_range = end_range
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        self._validate_content = validate_content
+        self._encryption_options = encryption_options or {}
+        self._progress_hook = kwargs.pop('progress_hook', None)
+        self._request_options = kwargs
+        self._response = None
+        self._location_mode = None
+        self._current_content: Union[str, bytes] = b''
+        self._file_size = 0
+        self._non_empty_ranges = None
+        self._encryption_data: Optional["_EncryptionData"] = None
+
+        # The content download offset, after any processing (decryption), in bytes
+        self._download_offset = 0
+        # The raw download offset, before processing (decryption), in bytes
+        self._raw_download_offset = 0
+        # The offset the stream has been read to in bytes or chars depending on mode
+        self._read_offset = 0
+        # The offset into current_content that has been consumed in bytes or chars depending on mode
+        self._current_content_offset = 0
+
+        self._text_mode: Optional[bool] = None
+        self._decoder: Optional["IncrementalDecoder"] = None
+        # Whether the current content is the first chunk of download content or not
+        self._first_chunk = True
+        self._download_start = self._start_range or 0
+
+        # The cls is passed in via download_cls to avoid conflicting arg name with Generic.__new__
+        # but needs to be changed to cls in the request options.
+        self._request_options['cls'] = download_cls
+
+    def __len__(self):
+        return self.size
+
+    async def _get_encryption_data_request(self) -> None:
+        # Save current request cls
+        download_cls = self._request_options.pop('cls', None)
+        # Adjust cls for get_properties
+        self._request_options['cls'] = deserialize_blob_properties
+
+        properties = cast("BlobProperties", await self._clients.blob.get_properties(**self._request_options))
+        # This will return None if there is no encryption metadata or there are parsing errors.
+        # That is acceptable here, the proper error will be caught and surfaced when attempting
+        # to decrypt the blob.
+        self._encryption_data = parse_encryption_data(properties.metadata)
+
+        # Restore cls for download
+        self._request_options['cls'] = download_cls
+
+    async def _setup(self) -> None:
+        if self._encryption_options.get("key") is not None or self._encryption_options.get("resolver") is not None:
+            await self._get_encryption_data_request()
+
+        # The service only provides transactional MD5s for chunks under 4MB.
+        # If validate_content is on, get only self.MAX_CHUNK_GET_SIZE for the first
+        # chunk so a transactional MD5 can be retrieved.
+        first_get_size = (
+            self._config.max_single_get_size if not self._validate_content else self._config.max_chunk_get_size
+        )
+        initial_request_start = self._start_range if self._start_range is not None else 0
+        if self._end_range is not None and self._end_range - initial_request_start < first_get_size:
+            initial_request_end = self._end_range
+        else:
+            initial_request_end = initial_request_start + first_get_size - 1
+
+        # pylint: disable-next=attribute-defined-outside-init
+        self._initial_range, self._initial_offset = process_range_and_offset(
+            initial_request_start,
+            initial_request_end,
+            self._end_range,
+            self._encryption_options,
+            self._encryption_data
+        )
+
+        self._response = await self._initial_request()
+        self.properties = cast("BlobProperties", self._response.properties)  # type: ignore [attr-defined]
+        self.properties.name = self.name
+        self.properties.container = self.container
+
+        # Set the content length to the download size instead of the size of the last range
+        self.properties.size = self.size
+        self.properties.content_range = (f"bytes {self._download_start}-"
+                                         f"{self._end_range if self._end_range is not None else self._file_size - 1}/"
+                                         f"{self._file_size}")
+
+        # Overwrite the content MD5 as it is the MD5 for the last range instead
+        # of the stored MD5
+        # TODO: Set to the stored MD5 when the service returns this
+        self.properties.content_md5 = None  # type: ignore [attr-defined]
+
+    @property
+    def _download_complete(self):
+        if is_encryption_v2(self._encryption_data):
+            return self._download_offset >= self.size
+        return self._raw_download_offset >= self.size
+
+    async def _initial_request(self):
+        range_header, range_validation = validate_and_format_range_headers(
+            self._initial_range[0],
+            self._initial_range[1],
+            start_range_required=False,
+            end_range_required=False,
+            check_content_md5=self._validate_content
+        )
+
+        retry_active = True
+        retry_total = 3
+        while retry_active:
+            try:
+                location_mode, response = cast(Tuple[Optional[str], Any], await self._clients.blob.download(
+                    range=range_header,
+                    range_get_content_md5=range_validation,
+                    validate_content=self._validate_content,
+                    data_stream_total=None,
+                    download_stream_current=0,
+                    **self._request_options
+                ))
+
+                # Check the location we read from to ensure we use the same one
+                # for subsequent requests.
+                self._location_mode = location_mode
+
+                # Parse the total file size and adjust the download size if ranges
+                # were specified
+                self._file_size = parse_length_from_content_range(response.properties.content_range)
+                if self._file_size is None:
+                    raise ValueError("Required Content-Range response header is missing or malformed.")
+                # Remove any extra encryption data size from blob size
+                self._file_size = adjust_blob_size_for_encryption(self._file_size, self._encryption_data)
+
+                if self._end_range is not None and self._start_range is not None:
+                    # Use the length unless it is over the end of the file
+                    self.size = min(self._file_size - self._start_range, self._end_range - self._start_range + 1)
+                elif self._start_range is not None:
+                    self.size = self._file_size - self._start_range
+                else:
+                    self.size = self._file_size
+
+            except HttpResponseError as error:
+                if self._start_range is None and error.response and error.status_code == 416:
+                    # Get range will fail on an empty file. If the user did not
+                    # request a range, do a regular get request in order to get
+                    # any properties.
+                    try:
+                        _, response = cast(Tuple[Optional[Any], Any], await self._clients.blob.download(
+                            validate_content=self._validate_content,
+                            data_stream_total=0,
+                            download_stream_current=0,
+                            **self._request_options))
+                    except HttpResponseError as e:
+                        process_storage_error(e)
+
+                    # Set the download size to empty
+                    self.size = 0
+                    self._file_size = 0
+                else:
+                    process_storage_error(error)
+
+            try:
+                if self.size == 0:
+                    self._current_content = b""
+                else:
+                    self._current_content = await process_content(
+                        response,
+                        self._initial_offset[0],
+                        self._initial_offset[1],
+                        self._encryption_options
+                    )
+                retry_active = False
+            except (IncompleteReadError, HttpResponseError, DecodeError) as error:
+                retry_total -= 1
+                if retry_total <= 0:
+                    raise HttpResponseError(error, error=error) from error
+                await asyncio.sleep(1)
+        self._download_offset += len(self._current_content)
+        self._raw_download_offset += response.content_length
+
+        # get page ranges to optimize downloading sparse page blob
+        if response.properties.blob_type == 'PageBlob':
+            try:
+                page_ranges = await self._clients.page_blob.get_page_ranges()
+                self._non_empty_ranges = get_page_ranges_result(page_ranges)[0]
+            except HttpResponseError:
+                pass
+
+        if not self._download_complete and self._request_options.get("modified_access_conditions"):
+            self._request_options["modified_access_conditions"].if_match = response.properties.etag
+
+        return response
+
+    def chunks(self) -> AsyncIterator[bytes]:
+        """
+        Iterate over chunks in the download stream. Note, the iterator returned will
+        iterate over the entire download content, regardless of any data that was
+        previously read.
+
+        NOTE: If the stream has been partially read, some data may be re-downloaded by the iterator.
+
+        :returns: An async iterator of the chunks in the download stream.
+        :rtype: AsyncIterator[bytes]
+
+        .. admonition:: Example:
+
+            .. literalinclude:: ../samples/blob_samples_hello_world_async.py
+                :start-after: [START download_a_blob_in_chunk]
+                :end-before: [END download_a_blob_in_chunk]
+                :language: python
+                :dedent: 16
+                :caption: Download a blob using chunks().
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. chunks is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with chunks as only bytes are supported.")
+
+        iter_downloader = None
+        # If we still have the first chunk buffered, use it. Otherwise, download all content again
+        if not self._first_chunk or not self._download_complete:
+            if self._first_chunk:
+                start = self._download_start + len(self._current_content)
+                current_progress = len(self._current_content)
+            else:
+                start = self._download_start
+                current_progress = 0
+
+            end = self._download_start + self.size
+
+            iter_downloader = _AsyncChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=current_progress,
+                start_range=start,
+                end_range=end,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                **self._request_options
+            )
+
+        initial_content = self._current_content if self._first_chunk else b''
+        return _AsyncChunkIterator(
+            size=self.size,
+            content=cast(bytes, initial_content),
+            downloader=iter_downloader,
+            chunk_size=self._config.max_chunk_get_size)
+
+    @overload
+    async def read(self, size: int = -1) -> T:
+        ...
+
+    @overload
+    async def read(self, *, chars: Optional[int] = None) -> T:
+        ...
+
+    # pylint: disable-next=too-many-statements,too-many-branches
+    async def read(self, size: int = -1, *, chars: Optional[int] = None) -> T:
+        """
+        Read the specified bytes or chars from the stream. If `encoding`
+        was specified on `download_blob`, it is recommended to use the
+        chars parameter to read a specific number of chars to avoid decoding
+        errors. If size/chars is unspecified or negative all bytes will be read.
+
+        :param int size:
+            The number of bytes to download from the stream. Leave unspecified
+            or set negative to download all bytes.
+        :keyword Optional[int] chars:
+            The number of chars to download from the stream. Leave unspecified
+            or set negative to download all chars. Note, this can only be used
+            when encoding is specified on `download_blob`.
+        :returns:
+            The requested data as bytes or a string if encoding was specified. If
+            the return value is empty, there is no more data to read.
+        :rtype: T
+        """
+        if size > -1 and self._encoding:
+            warnings.warn(
+                "Size parameter specified with text encoding enabled. It is recommended to use chars "
+                "to read a specific number of characters instead."
+            )
+        if size > -1 and chars is not None:
+            raise ValueError("Cannot specify both size and chars.")
+        if not self._encoding and chars is not None:
+            raise ValueError("Must specify encoding to read chars.")
+        if self._text_mode and size > -1:
+            raise ValueError("Stream has been partially read in text mode. Please use chars.")
+        if self._text_mode is False and chars is not None:
+            raise ValueError("Stream has been partially read in bytes mode. Please use size.")
+
+        # Empty blob or already read to the end
+        if (size == 0 or chars == 0 or
+                (self._download_complete and self._current_content_offset >= len(self._current_content))):
+            return b'' if not self._encoding else ''  # type: ignore [return-value]
+
+        if not self._text_mode and chars is not None and self._encoding is not None:
+            self._text_mode = True
+            self._decoder = codecs.getincrementaldecoder(self._encoding)('strict')
+            self._current_content = self._decoder.decode(
+                cast(bytes, self._current_content), final=self._download_complete)
+        elif self._text_mode is None:
+            self._text_mode = False
+
+        output_stream: Union[BytesIO, StringIO]
+        if self._text_mode:
+            output_stream = StringIO()
+            size = sys.maxsize if chars is None or chars <= 0 else chars
+        else:
+            output_stream = BytesIO()
+            size = size if size > 0 else sys.maxsize
+        readall = size == sys.maxsize
+        count = 0
+
+        # Start by reading from current_content
+        start = self._current_content_offset
+        length = min(len(self._current_content) - self._current_content_offset, size - count)
+        read = output_stream.write(self._current_content[start:start + length])  # type: ignore [arg-type]
+
+        count += read
+        self._current_content_offset += read
+        self._read_offset += read
+        await self._check_and_report_progress()
+
+        remaining = size - count
+        if remaining > 0 and not self._download_complete:
+            # Create a downloader than can download the rest of the file
+            start = self._download_start + self._download_offset
+            end = self._download_start + self.size
+
+            parallel = self._max_concurrency > 1
+            downloader = _AsyncChunkDownloader(
+                client=self._clients.blob,
+                non_empty_ranges=self._non_empty_ranges,
+                total_size=self.size,
+                chunk_size=self._config.max_chunk_get_size,
+                current_progress=self._read_offset,
+                start_range=start,
+                end_range=end,
+                stream=output_stream,
+                parallel=parallel,
+                validate_content=self._validate_content,
+                encryption_options=self._encryption_options,
+                encryption_data=self._encryption_data,
+                use_location=self._location_mode,
+                progress_hook=self._progress_hook,
+                **self._request_options
+            )
+            self._first_chunk = False
+
+            # When reading all data, have the downloader read everything into the stream.
+            # Else, read one chunk at a time (using the downloader as an iterator) until
+            # the requested size is reached.
+            chunks_iter = downloader.get_chunk_offsets()
+            if readall and not self._text_mode:
+                running_futures: Any = [
+                    asyncio.ensure_future(downloader.process_chunk(d))
+                    for d in islice(chunks_iter, 0, self._max_concurrency)
+                ]
+                while running_futures:
+                    # Wait for some download to finish before adding a new one
+                    done, running_futures = await asyncio.wait(
+                        running_futures, return_when=asyncio.FIRST_COMPLETED)
+                    try:
+                        for task in done:
+                            task.result()
+                    except HttpResponseError as error:
+                        process_storage_error(error)
+                    try:
+                        for _ in range(0, len(done)):
+                            next_chunk = next(chunks_iter)
+                            running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+                    except StopIteration:
+                        break
+
+                if running_futures:
+                    # Wait for the remaining downloads to finish
+                    done, _running_futures = await asyncio.wait(running_futures)
+                    try:
+                        for task in done:
+                            task.result()
+                    except HttpResponseError as error:
+                        process_storage_error(error)
+
+                self._complete_read()
+
+            else:
+                while (chunk := next(chunks_iter, None)) is not None and remaining > 0:
+                    chunk_data, content_length = await downloader.yield_chunk(chunk)
+                    self._download_offset += len(chunk_data)
+                    self._raw_download_offset += content_length
+                    if self._text_mode and self._decoder is not None:
+                        self._current_content = self._decoder.decode(chunk_data, final=self._download_complete)
+                    else:
+                        self._current_content = chunk_data
+
+                    if remaining < len(self._current_content):
+                        read = output_stream.write(self._current_content[:remaining])  # type: ignore [arg-type]
+                    else:
+                        read = output_stream.write(self._current_content)  # type: ignore [arg-type]
+
+                    self._current_content_offset = read
+                    self._read_offset += read
+                    remaining -= read
+                    await self._check_and_report_progress()
+
+        data = output_stream.getvalue()
+        if not self._text_mode and self._encoding:
+            try:
+                # This is technically incorrect to do, but we have it for backwards compatibility.
+                data = cast(bytes, data).decode(self._encoding)
+            except UnicodeDecodeError:
+                warnings.warn(
+                    "Encountered a decoding error while decoding blob data from a partial read. "
+                    "Try using the `chars` keyword instead to read in text mode."
+                )
+                raise
+
+        return data  # type: ignore [return-value]
+
+    async def readall(self) -> T:
+        """
+        Read the entire contents of this blob.
+        This operation is blocking until all data is downloaded.
+
+        :returns: The requested data as bytes or a string if encoding was specified.
+        :rtype: T
+        """
+        return await self.read()
+
+    async def readinto(self, stream: IO[bytes]) -> int:
+        """Download the contents of this blob to a stream.
+
+        :param IO[bytes] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :returns: The number of bytes read.
+        :rtype: int
+        """
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. readinto is not supported in text mode.")
+        if self._encoding:
+            warnings.warn("Encoding is ignored with readinto as only byte streams are supported.")
+
+        # the stream must be seekable if parallel download is required
+        parallel = self._max_concurrency > 1
+        if parallel:
+            error_message = "Target stream handle must be seekable."
+            if sys.version_info >= (3,) and not stream.seekable():
+                raise ValueError(error_message)
+
+            try:
+                stream.seek(stream.tell())
+            except (NotImplementedError, AttributeError) as exc:
+                raise ValueError(error_message) from exc
+
+        # If some data has been streamed using `read`, only stream the remaining data
+        remaining_size = self.size - self._read_offset
+        # Already read to the end
+        if remaining_size <= 0:
+            return 0
+
+        # Write the current content to the user stream
+        current_remaining = len(self._current_content) - self._current_content_offset
+        start = self._current_content_offset
+        count = stream.write(cast(bytes, self._current_content[start:start + current_remaining]))
+
+        self._current_content_offset += count
+        self._read_offset += count
+        if self._progress_hook:
+            await self._progress_hook(self._read_offset, self.size)
+
+        # If all the data was already downloaded/buffered
+        if self._download_complete:
+            return remaining_size
+
+        data_start = self._download_start + self._read_offset
+        data_end = self._download_start + self.size
+
+        downloader = _AsyncChunkDownloader(
+            client=self._clients.blob,
+            non_empty_ranges=self._non_empty_ranges,
+            total_size=self.size,
+            chunk_size=self._config.max_chunk_get_size,
+            current_progress=self._read_offset,
+            start_range=data_start,
+            end_range=data_end,
+            stream=stream,
+            parallel=parallel,
+            validate_content=self._validate_content,
+            encryption_options=self._encryption_options,
+            encryption_data=self._encryption_data,
+            use_location=self._location_mode,
+            progress_hook=self._progress_hook,
+            **self._request_options
+        )
+
+        dl_tasks = downloader.get_chunk_offsets()
+        running_futures = {
+            asyncio.ensure_future(downloader.process_chunk(d))
+            for d in islice(dl_tasks, 0, self._max_concurrency)
+        }
+        while running_futures:
+            # Wait for some download to finish before adding a new one
+            done, running_futures = await asyncio.wait(
+                running_futures, return_when=asyncio.FIRST_COMPLETED)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+            try:
+                for _ in range(0, len(done)):
+                    next_chunk = next(dl_tasks)
+                    running_futures.add(asyncio.ensure_future(downloader.process_chunk(next_chunk)))
+            except StopIteration:
+                break
+
+        if running_futures:
+            # Wait for the remaining downloads to finish
+            done, _running_futures = await asyncio.wait(running_futures)
+            try:
+                for task in done:
+                    task.result()
+            except HttpResponseError as error:
+                process_storage_error(error)
+
+        self._complete_read()
+        return remaining_size
+
+    def _complete_read(self):
+        """Adjusts all offsets to the end of the download."""
+        self._download_offset = self.size
+        self._raw_download_offset = self.size
+        self._read_offset = self.size
+        self._current_content_offset = len(self._current_content)
+
+    async def _check_and_report_progress(self):
+        """Reports progress if necessary."""
+        # Only report progress at the end of each chunk and use download_offset to always report
+        # progress in terms of (approximate) byte count.
+        if self._progress_hook and self._current_content_offset == len(self._current_content):
+            await self._progress_hook(self._download_offset, self.size)
+
+    async def content_as_bytes(self, max_concurrency=1):
+        """DEPRECATED: Download the contents of this file.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The contents of the file as bytes.
+        :rtype: bytes
+        """
+        warnings.warn(
+            "content_as_bytes is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_bytes is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        return await self.readall()
+
+    async def content_as_text(self, max_concurrency=1, encoding="UTF-8"):
+        """DEPRECATED: Download the contents of this blob, and decode as text.
+
+        This operation is blocking until all data is downloaded.
+
+        This method is deprecated, use func:`readall` instead.
+
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :param str encoding:
+            Test encoding to decode the downloaded bytes. Default is UTF-8.
+        :returns: The content of the file as a str.
+        :rtype: str
+        """
+        warnings.warn(
+            "content_as_text is deprecated, use readall instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "content_as_text is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        self._encoding = encoding
+        return await self.readall()
+
+    async def download_to_stream(self, stream, max_concurrency=1):
+        """DEPRECATED: Download the contents of this blob to a stream.
+
+        This method is deprecated, use func:`readinto` instead.
+
+        :param IO[T] stream:
+            The stream to download to. This can be an open file-handle,
+            or any writable stream. The stream must be seekable if the download
+            uses more than one parallel connection.
+        :param int max_concurrency:
+            The number of parallel connections with which to download.
+        :returns: The properties of the downloaded blob.
+        :rtype: Any
+        """
+        warnings.warn(
+            "download_to_stream is deprecated, use readinto instead",
+            DeprecationWarning
+        )
+        if self._text_mode:
+            raise ValueError("Stream has been partially read in text mode. "
+                             "download_to_stream is not supported in text mode.")
+
+        self._max_concurrency = max_concurrency
+        await self.readinto(stream)
+        return self.properties
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py
new file mode 100644
index 00000000..97334d96
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_encryption_async.py
@@ -0,0 +1,72 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import inspect
+import sys
+from io import BytesIO
+from typing import IO
+
+from .._encryption import _GCM_REGION_DATA_LENGTH, encrypt_data_v2
+
+
+class GCMBlobEncryptionStream:
+    """
+    An async stream that performs AES-GCM encryption on the given data as
+    it's streamed. Data is read and encrypted in regions. The stream
+    will use the same encryption key and will generate a guaranteed unique
+    nonce for each encryption region.
+    """
+    def __init__(
+        self, content_encryption_key: bytes,
+        data_stream: IO[bytes],
+    ) -> None:
+        """
+        :param bytes content_encryption_key: The encryption key to use.
+        :param IO[bytes] data_stream: The data stream to read data from.
+        """
+        self.content_encryption_key = content_encryption_key
+        self.data_stream = data_stream
+
+        self.offset = 0
+        self.current = b''
+        self.nonce_counter = 0
+
+    async def read(self, size: int = -1) -> bytes:
+        """
+        Read data from the stream. Specify -1 to read all available data.
+
+        :param int size: The amount of data to read. Defaults to -1 for all data.
+        :return: The bytes read.
+        :rtype: bytes
+        """
+        result = BytesIO()
+        remaining = sys.maxsize if size == -1 else size
+
+        while remaining > 0:
+            # Start by reading from current
+            if len(self.current) > 0:
+                read = min(remaining, len(self.current))
+                result.write(self.current[:read])
+
+                self.current = self.current[read:]
+                self.offset += read
+                remaining -= read
+
+            if remaining > 0:
+                # Read one region of data and encrypt it
+                data = self.data_stream.read(_GCM_REGION_DATA_LENGTH)
+                if inspect.isawaitable(data):
+                    data = await data
+
+                if len(data) == 0:
+                    # No more data to read
+                    break
+
+                self.current = encrypt_data_v2(data, self.nonce_counter, self.content_encryption_key)
+                # IMPORTANT: Must increment the nonce each time.
+                self.nonce_counter += 1
+
+        return result.getvalue()
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py
new file mode 100644
index 00000000..b5bfad95
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_lease_async.py
@@ -0,0 +1,346 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=docstring-keyword-should-match-keyword-only
+
+import uuid
+from typing import Any, Optional, Union, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError
+from azure.core.tracing.decorator_async import distributed_trace_async
+
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._serialize import get_modify_conditions
+
+if TYPE_CHECKING:
+    from azure.storage.blob.aio import BlobClient, ContainerClient
+    from datetime import datetime
+
+
+class BlobLeaseClient(): # pylint: disable=client-accepts-api-version-keyword
+    """Creates a new BlobLeaseClient.
+
+    This client provides lease operations on a BlobClient or ContainerClient.
+    :param client: The client of the blob or container to lease.
+    :type client: Union[BlobClient, ContainerClient]
+    :param lease_id: A string representing the lease ID of an existing lease. This value does not need to be
+    specified in order to acquire a new lease, or break one.
+    :type lease_id: Optional[str]
+    """
+
+    id: str
+    """The ID of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired."""
+    etag: Optional[str]
+    """The ETag of the lease currently being maintained. This will be `None` if no
+    lease has yet been acquired or modified."""
+    last_modified: Optional["datetime"]
+    """The last modified timestamp of the lease currently being maintained.
+    This will be `None` if no lease has yet been acquired or modified."""
+
+    def __init__( # pylint: disable=missing-client-constructor-parameter-credential, missing-client-constructor-parameter-kwargs
+        self, client: Union["BlobClient", "ContainerClient"],
+        lease_id: Optional[str] = None
+    ) -> None:
+        self.id = lease_id or str(uuid.uuid4())
+        self.last_modified = None
+        self.etag = None
+        if hasattr(client, 'blob_name'):
+            self._client = client._client.blob
+        elif hasattr(client, 'container_name'):
+            self._client = client._client.container
+        else:
+            raise TypeError("Lease must use either BlobClient or ContainerClient.")
+
+    def __enter__(self):
+        raise TypeError("Async lease must use 'async with'.")
+
+    def __exit__(self, *args):
+        self.release()
+
+    async def __aenter__(self):
+        return self
+
+    async def __aexit__(self, *args):
+        await self.release()
+
+    @distributed_trace_async
+    async def acquire(self, lease_duration: int = -1, **kwargs: Any) -> None:
+        """Requests a new lease.
+
+        If the container does not have an active lease, the Blob service creates a
+        lease on the container and returns a new lease ID.
+
+        :param int lease_duration:
+            Specifies the duration of the lease, in seconds, or negative one
+            (-1) for a lease that never expires. A non-infinite lease can be
+            between 15 and 60 seconds. A lease duration cannot be changed
+            using renew or change. Default is -1 (infinite lease).
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :rtype: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.acquire_lease(
+                timeout=kwargs.pop('timeout', None),
+                duration=lease_duration,
+                proposed_lease_id=self.id,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+        self.etag = response.get('etag')
+
+    @distributed_trace_async
+    async def renew(self, **kwargs: Any) -> None:
+        """Renews the lease.
+
+        The lease can be renewed if the lease ID specified in the
+        lease client matches that associated with the container or blob. Note that
+        the lease may be renewed even if it has expired as long as the container
+        or blob has not been leased again since the expiration of that lease. When you
+        renew a lease, the lease duration clock resets.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.renew_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def release(self, **kwargs: Any) -> None:
+        """Release the lease.
+
+        The lease may be released if the client lease id specified matches
+        that associated with the container or blob. Releasing the lease allows another client
+        to immediately acquire the lease for the container or blob as soon as the release is complete.
+
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.release_lease(
+                lease_id=self.id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def change(self, proposed_lease_id: str, **kwargs: Any) -> None:
+        """Change the lease ID of an active lease.
+
+        :param str proposed_lease_id:
+            Proposed lease ID, in a GUID string format. The Blob service returns 400
+            (Invalid request) if the proposed lease ID is not in the correct format.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str etag:
+            An ETag value, or the wildcard character (*). Used to check if the resource has changed,
+            and act according to the condition specified by the `match_condition` parameter.
+        :keyword ~azure.core.MatchConditions match_condition:
+            The match condition to use upon the etag.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: None
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.change_lease(
+                lease_id=self.id,
+                proposed_lease_id=proposed_lease_id,
+                timeout=kwargs.pop('timeout', None),
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        self.etag = response.get('etag')
+        self.id = response.get('lease_id')
+        self.last_modified = response.get('last_modified')
+
+    @distributed_trace_async
+    async def break_lease(self, lease_break_period: Optional[int] = None, **kwargs: Any) -> int:
+        """Break the lease, if the container or blob has an active lease.
+
+        Once a lease is broken, it cannot be renewed. Any authorized request can break the lease;
+        the request is not required to specify a matching lease ID. When a lease
+        is broken, the lease break period is allowed to elapse, during which time
+        no lease operation except break and release can be performed on the container or blob.
+        When a lease is successfully broken, the response indicates the interval
+        in seconds until a new lease can be acquired.
+
+        :param int lease_break_period:
+            This is the proposed duration of seconds that the lease
+            should continue before it is broken, between 0 and 60 seconds. This
+            break period is only used if it is shorter than the time remaining
+            on the lease. If longer, the time remaining on the lease is used.
+            A new lease will not be available before the break period has
+            expired, but the lease may be held for longer than the break
+            period. If this header does not appear with a break
+            operation, a fixed-duration lease breaks after the remaining lease
+            period elapses, and an infinite lease breaks immediately.
+        :keyword ~datetime.datetime if_modified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only
+            if the resource has been modified since the specified time.
+        :keyword ~datetime.datetime if_unmodified_since:
+            A DateTime value. Azure expects the date value passed in to be UTC.
+            If timezone is included, any non-UTC datetimes will be converted to UTC.
+            If a date is passed in without timezone info, it is assumed to be UTC.
+            Specify this header to perform the operation only if
+            the resource has not been modified since the specified date/time.
+        :keyword str if_tags_match_condition:
+            Specify a SQL where clause on blob tags to operate only on blob with a matching value.
+            eg. ``\"\\\"tagname\\\"='my tag'\"``
+
+            .. versionadded:: 12.4.0
+
+        :keyword int timeout:
+            Sets the server-side timeout for the operation in seconds. For more details see
+            https://learn.microsoft.com/rest/api/storageservices/setting-timeouts-for-blob-service-operations.
+            This value is not tracked or validated on the client. To configure client-side network timesouts
+            see `here <https://github.com/Azure/azure-sdk-for-python/tree/main/sdk/storage/azure-storage-blob
+            #other-client--per-operation-configuration>`__.
+        :return: Approximate time remaining in the lease period, in seconds.
+        :rtype: int
+        """
+        mod_conditions = get_modify_conditions(kwargs)
+        try:
+            response: Any = await self._client.break_lease(
+                timeout=kwargs.pop('timeout', None),
+                break_period=lease_break_period,
+                modified_access_conditions=mod_conditions,
+                cls=return_response_headers,
+                **kwargs)
+        except HttpResponseError as error:
+            process_storage_error(error)
+        return response.get('lease_time') # type: ignore
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py
new file mode 100644
index 00000000..1731a318
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_list_blobs_helper.py
@@ -0,0 +1,249 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+from typing import Callable, List, Optional
+from urllib.parse import unquote
+
+from azure.core.async_paging import AsyncItemPaged, AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._deserialize import (
+    get_blob_properties_from_generated_code,
+    load_many_xml_nodes,
+    load_xml_int,
+    load_xml_string
+)
+from .._generated.models import BlobItemInternal, BlobPrefix as GenBlobPrefix
+from .._models import BlobProperties
+from .._shared.models import DictMixin
+from .._shared.response_handlers import (
+    process_storage_error,
+    return_context_and_deserialized,
+    return_raw_deserialized
+)
+
+
+class BlobPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Blob properties."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        delimiter: Optional[str] = None,
+        location_mode: Optional[str] = None,
+    ) -> None:
+        super(BlobPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.delimiter = delimiter
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.container = self._response.container_name
+        self.current_page = [self._build_item(item) for item in self._response.segment.blob_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    def _build_item(self, item):
+        if isinstance(item, BlobProperties):
+            return item
+        if isinstance(item, BlobItemInternal):
+            blob = get_blob_properties_from_generated_code(item)
+            blob.container = self.container  # type: ignore [assignment]
+            return blob
+        return item
+
+
+class BlobNamesPaged(AsyncPageIterator):
+    """An Iterable of Blob names."""
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of blobs to retrieve per call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+    delimiter: Optional[str]
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        prefix: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(BlobNamesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                prefix=self.prefix,
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_raw_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.get('ServiceEndpoint')
+        self.prefix = load_xml_string(self._response, 'Prefix')
+        self.marker = load_xml_string(self._response, 'Marker')
+        self.results_per_page = load_xml_int(self._response, 'MaxResults')
+        self.container = self._response.get('ContainerName')
+
+        blobs = load_many_xml_nodes(self._response, 'Blob', wrapper='Blobs')
+        self.current_page = [load_xml_string(blob, 'Name') for blob in blobs]
+
+        next_marker = load_xml_string(self._response, 'NextMarker')
+        return next_marker or None, self.current_page
+
+
+class BlobPrefix(AsyncItemPaged, DictMixin):
+    """An Iterable of Blob properties.
+
+    Returned from walk_blobs when a delimiter is used.
+    Can be thought of as a virtual blob directory."""
+
+    name: str
+    """The prefix, or "directory name" of the blob."""
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: str
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    next_marker: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: str
+    """The location mode being used to list results. The available
+    options include "primary" and "secondary"."""
+    current_page: Optional[List[BlobProperties]]
+    """The current page of listed results."""
+    delimiter: str
+    """A delimiting character used for hierarchy listing."""
+    command: Callable
+    """Function to retrieve the next page of items."""
+    container: str
+    """The name of the container."""
+
+    def __init__(self, *args, **kwargs):
+        super(BlobPrefix, self).__init__(*args, page_iterator_class=BlobPrefixPaged, **kwargs)
+        self.name = kwargs.get('prefix')
+        self.prefix = kwargs.get('prefix')
+        self.results_per_page = kwargs.get('results_per_page')
+        self.container = kwargs.get('container')
+        self.delimiter = kwargs.get('delimiter')
+        self.location_mode = kwargs.get('location_mode')
+
+
+class BlobPrefixPaged(BlobPropertiesPaged):
+    def __init__(self, *args, **kwargs):
+        super(BlobPrefixPaged, self).__init__(*args, **kwargs)
+        self.name = self.prefix
+
+    async def _extract_data_cb(self, get_next_return):
+        continuation_token, _ = await super(BlobPrefixPaged, self)._extract_data_cb(get_next_return)
+        self.current_page = self._response.segment.blob_prefixes + self._response.segment.blob_items
+        self.current_page = [self._build_item(item) for item in self.current_page]
+        self.delimiter = self._response.delimiter
+
+        return continuation_token, self.current_page
+
+    def _build_item(self, item):
+        item = super(BlobPrefixPaged, self)._build_item(item)
+        if isinstance(item, GenBlobPrefix):
+            if item.name.encoded:
+                name = unquote(item.name.content)
+            else:
+                name = item.name.content
+            return BlobPrefix(
+                self._command,
+                container=self.container,
+                prefix=name,
+                results_per_page=self.results_per_page,
+                location_mode=self.location_mode)
+        return item
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py
new file mode 100644
index 00000000..27d1d8fa
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_models.py
@@ -0,0 +1,199 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+# pylint: disable=too-few-public-methods
+
+from typing import Callable, List, Optional, TYPE_CHECKING
+
+from azure.core.async_paging import AsyncPageIterator
+from azure.core.exceptions import HttpResponseError
+
+from .._deserialize import parse_tags
+from .._generated.models import FilterBlobItem
+from .._models import ContainerProperties, FilteredBlob, parse_page_list
+from .._shared.response_handlers import process_storage_error, return_context_and_deserialized
+
+if TYPE_CHECKING:
+    from .._models import BlobProperties
+
+
+class ContainerPropertiesPaged(AsyncPageIterator):
+    """An Iterable of Container properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] prefix: Filters the results to return only containers whose names
+        begin with the specified prefix.
+    :param Optional[int] results_per_page: The maximum number of container names to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    """
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A container name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: List[ContainerProperties]
+    """The current page of listed results."""
+
+    def __init__(self, command, prefix=None, results_per_page=None, continuation_token=None):
+        super(ContainerPropertiesPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.prefix = prefix
+        self.marker = None
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.prefix = self._response.prefix
+        self.marker = self._response.marker
+        self.results_per_page = self._response.max_results
+        self.current_page = [self._build_item(item) for item in self._response.container_items]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        return ContainerProperties._from_generated(item)  # pylint: disable=protected-access
+
+
+class FilteredBlobPaged(AsyncPageIterator):
+    """An Iterable of Blob properties.
+
+    :param Callable command: Function to retrieve the next page of items.
+    :param Optional[str] container: The name of the container.
+    :param Optional[int] results_per_page: The maximum number of blobs to retrieve per
+        call.
+    :param Optional[str] continuation_token: An opaque continuation token.
+    :param Optional[str] location_mode:
+    Specifies the location the request should be sent to. This mode only applies for RA-GRS accounts
+        which allow secondary read access. Options include 'primary' or 'secondary'.
+    """
+
+    service_endpoint: Optional[str]
+    """The service URL."""
+    prefix: Optional[str]
+    """A blob name prefix being used to filter the list."""
+    marker: Optional[str]
+    """The continuation token of the current page of results."""
+    results_per_page: Optional[int]
+    """The maximum number of results retrieved per API call."""
+    continuation_token: Optional[str]
+    """The continuation token to retrieve the next page of results."""
+    location_mode: Optional[str]
+    """The location mode being used to list results. The available
+        options include "primary" and "secondary"."""
+    current_page: Optional[List["BlobProperties"]]
+    """The current page of listed results."""
+    container: Optional[str]
+    """The container that the blobs are listed from."""
+
+    def __init__(
+        self, command: Callable,
+        container: Optional[str] = None,
+        results_per_page: Optional[int] = None,
+        continuation_token: Optional[str] = None,
+        location_mode: Optional[str] = None
+    ) -> None:
+        super(FilteredBlobPaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.service_endpoint = None
+        self.marker = continuation_token
+        self.results_per_page = results_per_page
+        self.container = container
+        self.current_page = None
+        self.location_mode = location_mode
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.service_endpoint = self._response.service_endpoint
+        self.marker = self._response.next_marker
+        self.current_page = [self._build_item(item) for item in self._response.blobs]
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_item(item):
+        if isinstance(item, FilterBlobItem):
+            tags = parse_tags(item.tags)
+            blob = FilteredBlob(name=item.name, container_name=item.container_name, tags=tags)
+            return blob
+        return item
+
+
+class PageRangePaged(AsyncPageIterator):
+    def __init__(self, command, results_per_page=None, continuation_token=None):
+        super(PageRangePaged, self).__init__(
+            get_next=self._get_next_cb,
+            extract_data=self._extract_data_cb,
+            continuation_token=continuation_token or ""
+        )
+        self._command = command
+        self.results_per_page = results_per_page
+        self.location_mode = None
+        self.current_page = []
+
+    async def _get_next_cb(self, continuation_token):
+        try:
+            return await self._command(
+                marker=continuation_token or None,
+                maxresults=self.results_per_page,
+                cls=return_context_and_deserialized,
+                use_location=self.location_mode)
+        except HttpResponseError as error:
+            process_storage_error(error)
+
+    async def _extract_data_cb(self, get_next_return):
+        self.location_mode, self._response = get_next_return
+        self.current_page = self._build_page(self._response)
+
+        return self._response.next_marker or None, self.current_page
+
+    @staticmethod
+    def _build_page(response):
+        if not response:
+            raise StopIteration
+
+        return parse_page_list(response)
diff --git a/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py
new file mode 100644
index 00000000..794beee3
--- /dev/null
+++ b/.venv/lib/python3.12/site-packages/azure/storage/blob/aio/_upload_helpers.py
@@ -0,0 +1,334 @@
+# -------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for
+# license information.
+# --------------------------------------------------------------------------
+
+import inspect
+from io import SEEK_SET, UnsupportedOperation
+from typing import Any, cast, Dict, IO, Optional, TypeVar, TYPE_CHECKING
+
+from azure.core.exceptions import HttpResponseError, ResourceModifiedError
+
+from ._encryption_async import GCMBlobEncryptionStream
+from .._encryption import (
+    encrypt_blob,
+    get_adjusted_upload_size,
+    get_blob_encryptor_and_padder,
+    generate_blob_encryption_data,
+    _ENCRYPTION_PROTOCOL_V1,
+    _ENCRYPTION_PROTOCOL_V2
+)
+from .._generated.models import (
+    AppendPositionAccessConditions,
+    BlockLookupList,
+    ModifiedAccessConditions
+)
+from .._shared.response_handlers import process_storage_error, return_response_headers
+from .._shared.uploads_async import (
+    AppendBlobChunkUploader,
+    BlockBlobChunkUploader,
+    PageBlobChunkUploader,
+    upload_data_chunks,
+    upload_substream_blocks
+)
+from .._upload_helpers import _any_conditions, _convert_mod_error
+
+if TYPE_CHECKING:
+    from .._generated.aio.operations import AppendBlobOperations, BlockBlobOperations, PageBlobOperations
+    from .._shared.models import StorageConfiguration
+    BlobLeaseClient = TypeVar("BlobLeaseClient")
+
+
+async def upload_block_blob(  # pylint: disable=too-many-locals, too-many-statements
+    client: "BlockBlobOperations",
+    stream: IO,
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    validate_content: bool,
+    max_concurrency: Optional[int],
+    length: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        adjusted_count = length
+        if (encryption_options.get('key') is not None) and (adjusted_count is not None):
+            adjusted_count = get_adjusted_upload_size(adjusted_count, encryption_options['version'])
+        blob_headers = kwargs.pop('blob_headers', None)
+        tier = kwargs.pop('standard_blob_tier', None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+
+        immutability_policy = kwargs.pop('immutability_policy', None)
+        immutability_policy_expiry = None if immutability_policy is None else immutability_policy.expiry_time
+        immutability_policy_mode = None if immutability_policy is None else immutability_policy.policy_mode
+        legal_hold = kwargs.pop('legal_hold', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        # Do single put if the size is smaller than config.max_single_put_size
+        if adjusted_count is not None and (adjusted_count <= blob_settings.max_single_put_size):
+            data = stream.read(length or -1)
+            if inspect.isawaitable(data):
+                data = await data
+            if not isinstance(data, bytes):
+                raise TypeError('Blob data should be of type bytes.')
+
+            if encryption_options.get('key'):
+                if not isinstance(data, bytes):
+                    raise TypeError('Blob data should be of type bytes.')
+                encryption_data, data = encrypt_blob(data, encryption_options['key'], encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_data
+
+            response = cast(Dict[str, Any], await client.upload(
+                body=data,  # type: ignore [arg-type]
+                content_length=adjusted_count,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                cls=return_response_headers,
+                validate_content=validate_content,
+                data_stream_total=adjusted_count,
+                upload_stream_current=0,
+                tier=tier.value if tier else None,
+                blob_tags_string=blob_tags_string,
+                immutability_policy_expiry=immutability_policy_expiry,
+                immutability_policy_mode=immutability_policy_mode,
+                legal_hold=legal_hold,
+                **kwargs))
+
+            if progress_hook:
+                await progress_hook(adjusted_count, adjusted_count)
+
+            return response
+
+        use_original_upload_path = blob_settings.use_byte_buffer or \
+            validate_content or encryption_options.get('required') or \
+            blob_settings.max_block_size < blob_settings.min_large_block_upload_threshold or \
+            hasattr(stream, 'seekable') and not stream.seekable() or \
+            not hasattr(stream, 'seek') or not hasattr(stream, 'tell')
+
+        if use_original_upload_path:
+            total_size = length
+            encryptor, padder = None, None
+            if encryption_options and encryption_options.get('key'):
+                cek, iv, encryption_metadata = generate_blob_encryption_data(
+                    encryption_options['key'],
+                    encryption_options['version'])
+                headers['x-ms-meta-encryptiondata'] = encryption_metadata
+
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                    encryptor, padder = get_blob_encryptor_and_padder(cek, iv, True)
+
+                # Adjust total_size for encryption V2
+                if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V2:
+                    # Adjust total_size for encryption V2
+                    total_size = adjusted_count
+                    # V2 wraps the data stream with an encryption stream
+                    if cek is None:
+                        raise ValueError("Generate encryption metadata failed. 'cek' is None.")
+                    stream = GCMBlobEncryptionStream(cek, stream)  # type: ignore [assignment]
+
+            block_ids = await upload_data_chunks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=total_size,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                encryptor=encryptor,
+                padder=padder,
+                headers=headers,
+                **kwargs
+            )
+        else:
+            block_ids = await upload_substream_blocks(
+                service=client,
+                uploader_class=BlockBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                max_concurrency=max_concurrency,
+                stream=stream,
+                validate_content=validate_content,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs
+            )
+
+        block_lookup = BlockLookupList(committed=[], uncommitted=[], latest=[])
+        block_lookup.latest = block_ids
+        return cast(Dict[str, Any], await client.commit_block_list(
+            block_lookup,
+            blob_http_headers=blob_headers,
+            cls=return_response_headers,
+            validate_content=validate_content,
+            headers=headers,
+            tier=tier.value if tier else None,
+            blob_tags_string=blob_tags_string,
+            immutability_policy_expiry=immutability_policy_expiry,
+            immutability_policy_mode=immutability_policy_mode,
+            legal_hold=legal_hold,
+            **kwargs))
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+async def upload_page_blob(
+    client: "PageBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if not overwrite and not _any_conditions(**kwargs):
+            kwargs['modified_access_conditions'].if_none_match = '*'
+        if length is None or length < 0:
+            raise ValueError("A content length must be specified for a Page Blob.")
+        if length % 512 != 0:
+            raise ValueError(f"Invalid page blob size: {length}. "
+                             "The size must be aligned to a 512-byte boundary.")
+        tier = None
+        if kwargs.get('premium_page_blob_tier'):
+            premium_page_blob_tier = kwargs.pop('premium_page_blob_tier')
+            try:
+                tier = premium_page_blob_tier.value
+            except AttributeError:
+                tier = premium_page_blob_tier
+
+        if encryption_options and encryption_options.get('key'):
+            cek, iv, encryption_data = generate_blob_encryption_data(
+                encryption_options['key'],
+                encryption_options['version'])
+            headers['x-ms-meta-encryptiondata'] = encryption_data
+
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        response = cast(Dict[str, Any], await client.create(
+            content_length=0,
+            blob_content_length=length,
+            blob_sequence_number=None,  # type: ignore [arg-type]
+            blob_http_headers=kwargs.pop('blob_headers', None),
+            blob_tags_string=blob_tags_string,
+            tier=tier,
+            cls=return_response_headers,
+            headers=headers,
+            **kwargs))
+        if length == 0:
+            return cast(Dict[str, Any], response)
+
+        if encryption_options and encryption_options.get('key'):
+            if encryption_options['version'] == _ENCRYPTION_PROTOCOL_V1:
+                encryptor, padder = get_blob_encryptor_and_padder(cek, iv, False)
+                kwargs['encryptor'] = encryptor
+                kwargs['padder'] = padder
+
+        kwargs['modified_access_conditions'] = ModifiedAccessConditions(if_match=response['etag'])
+        return cast(Dict[str, Any], await upload_data_chunks(
+            service=client,
+            uploader_class=PageBlobChunkUploader,
+            total_size=length,
+            chunk_size=blob_settings.max_page_size,
+            stream=stream,
+            max_concurrency=max_concurrency,
+            validate_content=validate_content,
+            progress_hook=progress_hook,
+            headers=headers,
+            **kwargs))
+
+    except HttpResponseError as error:
+        try:
+            process_storage_error(error)
+        except ResourceModifiedError as mod_error:
+            if not overwrite:
+                _convert_mod_error(mod_error)
+            raise
+
+
+async def upload_append_blob(  # pylint: disable=unused-argument
+    client: "AppendBlobOperations",
+    overwrite: bool,
+    encryption_options: Dict[str, Any],
+    blob_settings: "StorageConfiguration",
+    headers: Dict[str, Any],
+    stream: IO,
+    length: Optional[int] = None,
+    validate_content: Optional[bool] = None,
+    max_concurrency: Optional[int] = None,
+    **kwargs: Any
+) -> Dict[str, Any]:
+    try:
+        if length == 0:
+            return {}
+        blob_headers = kwargs.pop('blob_headers', None)
+        append_conditions = AppendPositionAccessConditions(
+            max_size=kwargs.pop('maxsize_condition', None),
+            append_position=None)
+        blob_tags_string = kwargs.pop('blob_tags_string', None)
+        progress_hook = kwargs.pop('progress_hook', None)
+
+        try:
+            if overwrite:
+                await client.create(
+                    content_length=0,
+                    blob_http_headers=blob_headers,
+                    headers=headers,
+                    blob_tags_string=blob_tags_string,
+                    **kwargs)
+            return cast(Dict[str, Any], await upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+        except HttpResponseError as error:
+            if error.response.status_code != 404:  # type: ignore [union-attr]
+                raise
+            # rewind the request body if it is a stream
+            if hasattr(stream, 'read'):
+                try:
+                    # attempt to rewind the body to the initial position
+                    stream.seek(0, SEEK_SET)
+                except UnsupportedOperation as exc:
+                    # if body is not seekable, then retry would not work
+                    raise error from exc
+            await client.create(
+                content_length=0,
+                blob_http_headers=blob_headers,
+                headers=headers,
+                blob_tags_string=blob_tags_string,
+                **kwargs)
+            return cast(Dict[str, Any], await upload_data_chunks(
+                service=client,
+                uploader_class=AppendBlobChunkUploader,
+                total_size=length,
+                chunk_size=blob_settings.max_block_size,
+                stream=stream,
+                max_concurrency=max_concurrency,
+                validate_content=validate_content,
+                append_position_access_conditions=append_conditions,
+                progress_hook=progress_hook,
+                headers=headers,
+                **kwargs))
+    except HttpResponseError as error:
+        process_storage_error(error)